From 4ac3d55709b190776c089d57f70a51bf1553f661 Mon Sep 17 00:00:00 2001 From: freemine Date: Wed, 1 Jan 2025 16:15:37 +0800 Subject: [PATCH 001/120] bypass some specific test case which believes to fail in community version of taos --- source/libs/parser/test/parAlterToBalanceTest.cpp | 14 +++++++++++++- source/libs/parser/test/parInitialCTest.cpp | 8 ++++++++ source/libs/tcs/test/tcsTest.cpp | 8 ++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/test/parAlterToBalanceTest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp index d25435913f..7b5817d9f1 100644 --- a/source/libs/parser/test/parAlterToBalanceTest.cpp +++ b/source/libs/parser/test/parAlterToBalanceTest.cpp @@ -196,14 +196,24 @@ TEST_F(ParserInitialATest, alterDatabase) { setAlterDbFsync(200); setAlterDbWal(1); setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW); +#ifdef TD_ENTERPRISE setAlterDbSttTrigger(16); +#else + setAlterDbSttTrigger(1); +#endif setAlterDbBuffer(16); setAlterDbPages(128); setAlterDbReplica(3); setAlterDbWalRetentionPeriod(10); setAlterDbWalRetentionSize(20); run("ALTER DATABASE test BUFFER 16 CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 PAGES 128 " - "REPLICA 3 WAL_LEVEL 1 STT_TRIGGER 16 WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); + "REPLICA 3 WAL_LEVEL 1 " +#ifdef TD_ENTERPRISE + "STT_TRIGGER 16 " +#else + "STT_TRIGGER 1 " +#endif + "WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); clearAlterDbReq(); initAlterDb("test"); @@ -289,10 +299,12 @@ TEST_F(ParserInitialATest, alterDatabase) { initAlterDb("test"); setAlterDbSttTrigger(1); run("ALTER DATABASE test STT_TRIGGER 1"); +#ifdef TD_ENTERPRISE setAlterDbSttTrigger(4); run("ALTER DATABASE test STT_TRIGGER 4"); setAlterDbSttTrigger(16); run("ALTER DATABASE test STT_TRIGGER 16"); +#endif clearAlterDbReq(); initAlterDb("test"); diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 3422ebe028..6ded5696f9 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -292,7 +292,11 @@ TEST_F(ParserInitialCTest, createDatabase) { setDbWalRetentionSize(-1); setDbWalRollPeriod(10); setDbWalSegmentSize(20); +#ifdef TD_ENTERPRISE setDbSstTrigger(16); +#else + setDbSstTrigger(1); +#endif setDbHashPrefix(3); setDbHashSuffix(4); setDbTsdbPageSize(32); @@ -320,7 +324,11 @@ TEST_F(ParserInitialCTest, createDatabase) { "WAL_RETENTION_SIZE -1 " "WAL_ROLL_PERIOD 10 " "WAL_SEGMENT_SIZE 20 " +#ifdef TD_ENTERPRISE "STT_TRIGGER 16 " +#else + "STT_TRIGGER 1 " +#endif "TABLE_PREFIX 3 " "TABLE_SUFFIX 4 " "TSDB_PAGESIZE 32"); diff --git a/source/libs/tcs/test/tcsTest.cpp b/source/libs/tcs/test/tcsTest.cpp index 4b5afc5b85..5b5a1bd1d2 100644 --- a/source/libs/tcs/test/tcsTest.cpp +++ b/source/libs/tcs/test/tcsTest.cpp @@ -234,6 +234,13 @@ TEST(TcsTest, InterfaceTest) { // TEST(TcsTest, DISABLED_InterfaceNonBlobTest) { TEST(TcsTest, InterfaceNonBlobTest) { +#ifndef TD_ENTERPRISE + // NOTE: this test case will coredump for community edition of taos + // thus we bypass this test case for the moment + // code = tcsGetObjectBlock(object_name, 0, size, check, &pBlock); + // tcsGetObjectBlock succeeded but pBlock is nullptr + // which results in nullptr-access-coredump shortly after +#else int code = 0; bool check = false; bool withcp = false; @@ -348,4 +355,5 @@ TEST(TcsTest, InterfaceNonBlobTest) { GTEST_ASSERT_EQ(code, 0); tcsUninit(); +#endif } From c4b522ff09e80356c3a522788c5fc83ec81607c5 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Fri, 3 Jan 2025 14:15:29 +0800 Subject: [PATCH 002/120] update README structure for support linux/win/mac platforms by charles --- README.md | 337 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 198 insertions(+), 139 deletions(-) diff --git a/README.md b/README.md index e4814cee67..dfd1be23c7 100644 --- a/README.md +++ b/README.md @@ -26,24 +26,51 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine # Table of Contents -1. [What is TDengine?](#1-what-is-tdengine) -2. [Documentation](#2-documentation) -3. [Building](#3-building) - 1. [Install build tools](#31-install-build-tools) - 1. [Get the source codes](#32-get-the-source-codes) - 1. [Special Note](#33-special-note) - 1. [Build TDengine](#34-build-tdengine) -4. [Installing](#4-installing) - 1. [On Linux platform](#41-on-linux-platform) - 1. [On Windows platform](#42-on-windows-platform) - 1. [On macOS platform](#43-on-macos-platform) - 1. [Quick Run](#44-quick-run) -5. [Try TDengine](#5-try-tdengine) -6. [Developing with TDengine](#6-developing-with-tdengine) -7. [Contribute to TDengine](#7-contribute-to-tdengine) -8. [Join the TDengine Community](#8-join-the-tdengine-community) +- [Table of Contents](#table-of-contents) +- [1. Introduction](#1-introduction) +- [2. Documentation](#2-documentation) +- [3. Prerequisites](#3-prerequisites) + - [3.1 Install dependencies tools](#31-install-dependencies-tools) + - [3.1.1 Linux platform](#311-linux-platform) + - [Install the required package for linux](#install-the-required-package-for-linux) + - [Install Go for linux](#install-go-for-linux) + - [Install node for linux](#install-node-for-linux) + - [Install Python-connector for linux](#install-python-connector-for-linux) + - [3.1.2 Windows platform](#312-windows-platform) + - [Install the required package for windows](#install-the-required-package-for-windows) + - [3.1.3 MacOS platform](#313-macos-platform) + - [Install the required package for macOS](#install-the-required-package-for-macos) + - [3.2 Get the source codes](#32-get-the-source-codes) + - [3.3 Special Note](#33-special-note) +- [4. Building](#4-building) + - [4.1 Linux platform building](#41-linux-platform-building) + - [4.2 Windows platform building](#42-windows-platform-building) + - [4.3 MacOS platform building](#43-macos-platform-building) +- [5. Packaging](#5-packaging) + - [5.1 Linux platform packaging](#51-linux-platform-packaging) + - [5.2 Windows platform packaging](#52-windows-platform-packaging) + - [5.3 MacOS platform packaging](#53-macos-platform-packaging) +- [6. Installation](#6-installation) + - [6.1 Linux platform installation](#61-linux-platform-installation) + - [6.2 Windows platform installation](#62-windows-platform-installation) + - [6.3 MacOS platform installation](#63-macos-platform-installation) +- [7. Running](#7-running) + - [7.1 Linux platform](#71-linux-platform) + - [7.2 Windows platform](#72-windows-platform) + - [7.3 MacOS platform](#73-macos-platform) +- [8. Testing](#8-testing) + - [8.1 Linux platform testing](#81-linux-platform-testing) + - [8.2 Windows platform testing](#82-windows-platform-testing) + - [8.3 MacOS platform testing](#83-macos-platform-testing) +- [9. Releasing](#9-releasing) + - [9.1 Linux platform releasing](#91-linux-platform-releasing) + - [9.2 Windows platform realeasing](#92-windows-platform-realeasing) + - [9.3 MacOS platform releasing](#93-macos-platform-releasing) +- [10. CI/CD](#10-cicd) +- [11. Coverage](#11-coverage) +- [12. Contributing](#12-contributing) -# 1. What is TDengine? +# 1. Introduction TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages: @@ -65,109 +92,127 @@ For a full list of TDengine competitive advantages, please [check here](https:// For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com)) -# 3. Building +# 3. Prerequisites -At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. +List the software and tools required to work on the project. -You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source. +- go 1.20+ (for taosadapter) +- node 16.20.2 (for taos-explorer) +- python 3.10.12+ (for test) -TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. +Step-by-step instructions to set up the prerequisites software. -To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. +## 3.1 Install dependencies tools -## 3.1 Install build tools +### 3.1.1 Linux platform -### Ubuntu 18.04 and above or Debian +#### Install the required package for linux ```bash -sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev +apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping ``` -#### Install build dependencies for taosTools +#### Install Go for linux -To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed. +Update the installation package to version 1.23.3. ```bash -sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config +cd /usr/local/ +wget https://golang.google.cn/dl/go1.23.3.linux-amd64.tar.gz +rm -rf /usr/local/go && tar -C /usr/local -xzf go1.23.3.linux-amd64.tar.gz ``` -### CentOS 7.9 +Set up environment variables, first add the following content to the end of the `~/.bashrc` file. ```bash -sudo yum install epel-release -sudo yum update -sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel -sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake +export GO_HOME=/usr/local/go +export PATH=$GO_HOME/bin:$PATH +export CGO_ENABLED=1 ``` -### CentOS 8/Fedora/Rocky Linux +Then make the environment variables take effect. ```bash -sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel +source ~/.bashrc ``` -#### Install build dependencies for taosTools on CentOS +Configure proxy to accelerate the download of Go dependencies. -#### CentOS 7.9 - -``` -sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel +```bash +go env -w GOPROXY=https://goproxy.cn,direct +go env -w GO111MODULE=on ``` -#### CentOS 8/Fedora/Rocky Linux +Check if the environment variables have taken effect and if the version is the installed version. -``` -sudo yum install -y epel-release -sudo yum install -y dnf-plugins-core -sudo yum config-manager --set-enabled powertools -sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel +```bash +go env +go version ``` -Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well. +#### Install node for linux -If the PowerTools installation fails, you can try to use: +Recommend install node using nvm. -``` -sudo yum config-manager --set-enabled powertools +```bash +curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.8/install.sh | bash ``` -#### For CentOS + devtoolset +Set up environment variables, add the following content to the end of the `~/.bashrc` file. -Besides above dependencies, please run following commands: - -``` -sudo yum install centos-release-scl -sudo yum install devtoolset-9 devtoolset-9-libatomic-devel -scl enable devtoolset-9 -- bash +```bash +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" ``` -### macOS +Then make the environment variables take effect. + +```bash +source ~/.bashrc +``` + +Finally, Install node and yarn. + +```bash +nvm install 16.20.2 +npm config set registry=https://registry.npmmirror.com +npm install -g yarn +``` + +#### Install Python-connector for linux + +Install Python3. + +```bash +apt install python3 +apt install python3-pip +``` + +Install the dependent Python components. + +```bash +pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog +``` + +Install the Python connector for TDengine. + +```bash +pip3 install taospy taos-ws-py +``` + +### 3.1.2 Windows platform + +#### Install the required package for windows + + +### 3.1.3 MacOS platform + +#### Install the required package for macOS ``` brew install argp-standalone gflags pkgconfig ``` -### Setup golang environment - -TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup. - -Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading. - -``` -go env -w GO111MODULE=on -go env -w GOPROXY=https://goproxy.cn,direct -``` - -The default will not build taosAdapter, but you can use the following command to build taosAdapter as the service for RESTful interface. - -``` -cmake .. -DBUILD_HTTP=false -``` - -### Setup rust environment - -TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup. - ## 3.2 Get the source codes First of all, you may clone the source codes from github: @@ -188,9 +233,17 @@ You can modify the file ~/.gitconfig to use ssh protocol instead of https for be [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. -## 3.4 Build TDengine +# 4. Building -### On Linux platform +At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. + +You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source. + +TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. + +To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. + +## 4.1 Linux platform building You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: @@ -223,7 +276,7 @@ aarch64: cmake .. -DCPUTYPE=aarch64 && cmake --build . ``` -### On Windows platform +## 4.2 Windows platform building If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. @@ -255,7 +308,7 @@ cmake .. -G "NMake Makefiles" nmake ``` -### On macOS platform +## 4.3 MacOS platform building Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. @@ -264,9 +317,17 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` -# 4. Installing +# 5. Packaging -## 4.1 On Linux platform +## 5.1 Linux platform packaging + +## 5.2 Windows platform packaging + +## 5.3 MacOS platform packaging + +# 6. Installation + +## 6.1 Linux platform installation After building successfully, TDengine can be installed by @@ -278,21 +339,7 @@ Users can find more information about directories installed on the system in the Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it. -To start the service after installation, in a terminal, use: - -```bash -sudo systemctl start taosd -``` - -Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: - -```bash -taos -``` - -If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. - -## 4.2 On Windows platform +## 6.2 Windows platform installation After building successfully, TDengine can be installed by: @@ -300,7 +347,7 @@ After building successfully, TDengine can be installed by: nmake install ``` -## 4.3 On macOS platform +## 6.3 MacOS platform installation After building successfully, TDengine can be installed by: @@ -326,7 +373,23 @@ taos If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. -## 4.4 Quick Run +# 7. Running + +## 7.1 Linux platform + +To start the service after installation, in a terminal, use: + +```bash +sudo systemctl start taosd +``` + +Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + +```bash +taos +``` + +If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) @@ -342,48 +405,44 @@ In another terminal, use the TDengine CLI to connect the server: option "-c test/cfg" specifies the system configuration file directory. -# 5. Try TDengine +## 7.2 Windows platform -It is easy to run SQL commands from TDengine CLI which is the same as other SQL databases. +## 7.3 MacOS platform -```sql -CREATE DATABASE demo; -USE demo; -CREATE TABLE t (ts TIMESTAMP, speed INT); -INSERT INTO t VALUES('2019-07-15 00:00:00', 10); -INSERT INTO t VALUES('2019-07-15 01:00:00', 20); -SELECT * FROM t; - ts | speed | -=================================== - 19-07-15 00:00:00.000| 10| - 19-07-15 01:00:00.000| 20| -Query OK, 2 row(s) in set (0.001700s) +# 8. Testing + +## 8.1 Linux platform testing + +## 8.2 Windows platform testing + +## 8.3 MacOS platform testing + +# 9. Releasing + +## 9.1 Linux platform releasing + +## 9.2 Windows platform realeasing + +## 9.3 MacOS platform releasing + +# 10. CI/CD +We use jenkins for CI/CD workflow configuration. See http://ci.bl.taosdata.com:8080/job/NewTest/view/change-requests/ +We can also run ci script locally. + +```bash +cd /root/TDengine/tests +./run_all_ci_cases.sh ``` -# 6. Developing with TDengine +# 11. Coverage +We can see coverage result in https://coveralls.io/github/taosdata/TDengine +We can also run coverage script locally. -## Official Connectors +```bash +cd /root/TDengine/tests +./run_local_coverage.sh +``` -TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. - -- [Java](https://docs.tdengine.com/reference/connectors/java/) -- [C/C++](https://docs.tdengine.com/reference/connectors/cpp/) -- [Python](https://docs.tdengine.com/reference/connectors/python/) -- [Go](https://docs.tdengine.com/reference/connectors/go/) -- [Node.js](https://docs.tdengine.com/reference/connectors/node/) -- [Rust](https://docs.tdengine.com/reference/connectors/rust/) -- [C#](https://docs.tdengine.com/reference/connectors/csharp/) -- [RESTful API](https://docs.tdengine.com/reference/connectors/rest-api/) - -# 7. Contribute to TDengine +# 12. Contributing Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project. - -# 8. Join the TDengine Community - -For more information about TDengine, you can follow us on social media and join our Discord server: - -- [Discord](https://discord.com/invite/VZdSuUg4pS) -- [Twitter](https://twitter.com/TDengineDB) -- [LinkedIn](https://www.linkedin.com/company/tdengine/) -- [YouTube](https://www.youtube.com/@tdengine) From f53d458b61d9ce12989d952823e8b2523f59e28c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 3 Jan 2025 16:09:32 +0800 Subject: [PATCH 003/120] fix invalid lock --- source/libs/stream/src/streamMeta.c | 36 ++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 3955343fdb..0de256d86d 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -253,7 +253,7 @@ _EXIT: int32_t len = strlen(pMeta->path) + 32; char* state = taosMemoryCalloc(1, len); if (state != NULL) { - (void) snprintf(state, len, "%s%s%s", pMeta->path, TD_DIRSEP, "state"); + (void)snprintf(state, len, "%s%s%s", pMeta->path, TD_DIRSEP, "state"); taosRemoveDir(state); taosMemoryFree(state); } else { @@ -380,7 +380,7 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, char* tpath = taosMemoryCalloc(1, len); TSDB_CHECK_NULL(tpath, code, lino, _err, terrno); - (void) snprintf(tpath, len, "%s%s%s", path, TD_DIRSEP, "stream"); + (void)snprintf(tpath, len, "%s%s%s", path, TD_DIRSEP, "stream"); pMeta->path = tpath; code = streamMetaOpenTdb(pMeta); @@ -392,6 +392,22 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, TSDB_CHECK_CODE(code, lino, _err); } + // set the attribute when running on Linux OS + TdThreadRwlockAttr attr; + code = taosThreadRwlockAttrInit(&attr); + TSDB_CHECK_CODE(code, lino, _err); + +#ifdef LINUX + code = pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); + TSDB_CHECK_CODE(code, lino, _err); +#endif + + code = taosThreadRwlockInit(&pMeta->lock, &attr); + TSDB_CHECK_CODE(code, lino, _err); + + code = taosThreadRwlockAttrDestroy(&attr); + TSDB_CHECK_CODE(code, lino, _err); + if ((code = streamMetaBegin(pMeta) < 0)) { stError("vgId:%d begin trans for stream meta failed", pMeta->vgId); goto _err; @@ -431,22 +447,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, stInfo("vgId:%d open stream meta succ, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, stage); - // set the attribute when running on Linux OS - TdThreadRwlockAttr attr; - code = taosThreadRwlockAttrInit(&attr); - TSDB_CHECK_CODE(code, lino, _err); - -#ifdef LINUX - code = pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); - TSDB_CHECK_CODE(code, lino, _err); -#endif - - code = taosThreadRwlockInit(&pMeta->lock, &attr); - TSDB_CHECK_CODE(code, lino, _err); - - code = taosThreadRwlockAttrDestroy(&attr); - TSDB_CHECK_CODE(code, lino, _err); - code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); TSDB_CHECK_CODE(code, lino, _err); From 5822d7a6b0ce27b662deb4b8e7740753adba847e Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Mon, 6 Jan 2025 17:53:28 +0800 Subject: [PATCH 004/120] doc:add test cases --- README.md | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index dfd1be23c7..363194f202 100644 --- a/README.md +++ b/README.md @@ -413,6 +413,64 @@ option "-c test/cfg" specifies the system configuration file directory. ## 8.1 Linux platform testing +### 8.1.1 Run the TSIM test script +Run the TSIM test script to simulate TDengine. +```bash +cd /root/TDengine/tests/script +./test.sh -f tsim/db/basic1.sim +``` + +### 8.1.2 Run the Python test script +Run the Python test script to simulate TDengine. +```bash +cd /root/TDengine/tests/system-test +python3 ./test.py -f 2-query/floor.py +``` + +### 8.1.3 Run unittest +Run the unit test script to simulate TDengine. +```bash +cd /root/TDengine/tests/unit-test/ +bash test.sh +``` + +### 8.1.4 Smoke Testing +Run the smoke test script to simulate TDengine. +```bash +cd /root/TDengine/packaging/smokeTest +./test_smoking_selfhost.sh +``` + +### 8.1.5 TSBS Test --replace +Run the TSBS test script to simulate TDengine. +1. Clone the code +```bash +cd /root && git clone https://github.com/taosdata/tsbs.git && cd tsbs/scripts/tsdbComp +``` +2. Modify IP and host of client and server in `test.ini` +```ini +clientIP="192.168.0.203" # client ip +clientHost="trd03" # client hostname +serverIP="192.168.0.204" # server ip +serverHost="trd04" # server hostname +``` +3. Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: +```ini +serverPass="taosdata123" # server root password +``` +4. Run the following command to start the test: + ```bash +nohup bash tsdbComparison.sh > test.log & +``` +5. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. + +### 8.1.6 Crash_gen Test +Run the crash_gen script to simulate TDengine. +```bash +cd /root/TDengine/tests/pytest/ && ./crash_gen.sh +``` + + ## 8.2 Windows platform testing ## 8.3 MacOS platform testing @@ -425,16 +483,16 @@ option "-c test/cfg" specifies the system configuration file directory. ## 9.3 MacOS platform releasing -# 10. CI/CD +# 10. CI/CD We use jenkins for CI/CD workflow configuration. See http://ci.bl.taosdata.com:8080/job/NewTest/view/change-requests/ We can also run ci script locally. ```bash cd /root/TDengine/tests -./run_all_ci_cases.sh +./run_all_ci_cases.sh -b main ``` -# 11. Coverage +# 11. Coverage --replace We can see coverage result in https://coveralls.io/github/taosdata/TDengine We can also run coverage script locally. From 1e2e526f960ec13103e0909ce72a27f8bcd697e6 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 7 Jan 2025 14:56:40 +0800 Subject: [PATCH 005/120] update structure of readme to remove the second layer title by charles --- README.md | 188 ++++++++++++++++++++++++++---------------------------- 1 file changed, 89 insertions(+), 99 deletions(-) diff --git a/README.md b/README.md index 363194f202..62c3d6c5c9 100644 --- a/README.md +++ b/README.md @@ -31,43 +31,22 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine - [2. Documentation](#2-documentation) - [3. Prerequisites](#3-prerequisites) - [3.1 Install dependencies tools](#31-install-dependencies-tools) - - [3.1.1 Linux platform](#311-linux-platform) - - [Install the required package for linux](#install-the-required-package-for-linux) - - [Install Go for linux](#install-go-for-linux) - - [Install node for linux](#install-node-for-linux) - - [Install Python-connector for linux](#install-python-connector-for-linux) - - [3.1.2 Windows platform](#312-windows-platform) - - [Install the required package for windows](#install-the-required-package-for-windows) - - [3.1.3 MacOS platform](#313-macos-platform) - - [Install the required package for macOS](#install-the-required-package-for-macos) - [3.2 Get the source codes](#32-get-the-source-codes) - [3.3 Special Note](#33-special-note) - [4. Building](#4-building) - - [4.1 Linux platform building](#41-linux-platform-building) - - [4.2 Windows platform building](#42-windows-platform-building) - - [4.3 MacOS platform building](#43-macos-platform-building) - [5. Packaging](#5-packaging) - - [5.1 Linux platform packaging](#51-linux-platform-packaging) - - [5.2 Windows platform packaging](#52-windows-platform-packaging) - - [5.3 MacOS platform packaging](#53-macos-platform-packaging) - [6. Installation](#6-installation) - - [6.1 Linux platform installation](#61-linux-platform-installation) - - [6.2 Windows platform installation](#62-windows-platform-installation) - - [6.3 MacOS platform installation](#63-macos-platform-installation) - [7. Running](#7-running) - - [7.1 Linux platform](#71-linux-platform) - - [7.2 Windows platform](#72-windows-platform) - - [7.3 MacOS platform](#73-macos-platform) - [8. Testing](#8-testing) - - [8.1 Linux platform testing](#81-linux-platform-testing) - - [8.2 Windows platform testing](#82-windows-platform-testing) - - [8.3 MacOS platform testing](#83-macos-platform-testing) + - [8.1 Run the TSIM test script](#81-run-the-tsim-test-script) + - [8.2 Run the Python test script](#82-run-the-python-test-script) + - [8.3 Run unittest](#83-run-unittest) + - [8.4 Smoke Testing](#84-smoke-testing) + - [8.5 TSBS Test --replace](#85-tsbs-test---replace) + - [8.6 Crash\_gen Test](#86-crash_gen-test) - [9. Releasing](#9-releasing) - - [9.1 Linux platform releasing](#91-linux-platform-releasing) - - [9.2 Windows platform realeasing](#92-windows-platform-realeasing) - - [9.3 MacOS platform releasing](#93-macos-platform-releasing) - [10. CI/CD](#10-cicd) -- [11. Coverage](#11-coverage) +- [11. Coverage --replace](#11-coverage---replace) - [12. Contributing](#12-contributing) # 1. Introduction @@ -104,15 +83,13 @@ Step-by-step instructions to set up the prerequisites software. ## 3.1 Install dependencies tools -### 3.1.1 Linux platform - -#### Install the required package for linux +Install the required package for linux ```bash apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping ``` -#### Install Go for linux +Install Go for linux Update the installation package to version 1.23.3. @@ -150,7 +127,7 @@ go env go version ``` -#### Install node for linux +Install node for linux Recommend install node using nvm. @@ -179,7 +156,7 @@ npm config set registry=https://registry.npmmirror.com npm install -g yarn ``` -#### Install Python-connector for linux +Install Python-connector for linux Install Python3. @@ -200,14 +177,11 @@ Install the Python connector for TDengine. pip3 install taospy taos-ws-py ``` -### 3.1.2 Windows platform +Install the required package for windows -#### Install the required package for windows +to be updated... - -### 3.1.3 MacOS platform - -#### Install the required package for macOS +Install the required package for macOS ``` brew install argp-standalone gflags pkgconfig @@ -243,7 +217,7 @@ TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) a To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. -## 4.1 Linux platform building +Linux platform building You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: @@ -276,7 +250,7 @@ aarch64: cmake .. -DCPUTYPE=aarch64 && cmake --build . ``` -## 4.2 Windows platform building +4.2 Windows platform building If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. @@ -308,7 +282,7 @@ cmake .. -G "NMake Makefiles" nmake ``` -## 4.3 MacOS platform building +MacOS platform building Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. @@ -319,15 +293,21 @@ cmake .. && cmake --build . # 5. Packaging -## 5.1 Linux platform packaging +Linux platform packaging -## 5.2 Windows platform packaging +to be updated... -## 5.3 MacOS platform packaging +Windows platform packaging + +to be updated... + +MacOS platform packaging + +to be updated... # 6. Installation -## 6.1 Linux platform installation +Linux platform installation After building successfully, TDengine can be installed by @@ -337,9 +317,9 @@ sudo make install Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. -Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it. +Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. -## 6.2 Windows platform installation +Windows platform installation After building successfully, TDengine can be installed by: @@ -347,7 +327,7 @@ After building successfully, TDengine can be installed by: nmake install ``` -## 6.3 MacOS platform installation +MacOS platform installation After building successfully, TDengine can be installed by: @@ -355,29 +335,9 @@ After building successfully, TDengine can be installed by: sudo make install ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. - -Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it. - -To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use: - -```bash -sudo launchctl start com.tdengine.taosd -``` - -Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: - -```bash -taos -``` - -If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. - # 7. Running -## 7.1 Linux platform - -To start the service after installation, in a terminal, use: +To start the service after installation on linux, in a terminal, use: ```bash sudo systemctl start taosd @@ -405,87 +365,117 @@ In another terminal, use the TDengine CLI to connect the server: option "-c test/cfg" specifies the system configuration file directory. -## 7.2 Windows platform +To start the service after installation on windows, in a terminal, use: -## 7.3 MacOS platform +```bash +to be updated +``` +Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + +```bash +to be updated +``` + +To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: + +```bash +sudo launchctl start com.tdengine.taosd +``` + +Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + +```bash +taos +``` + +If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. # 8. Testing -## 8.1 Linux platform testing +## 8.1 Run the TSIM test script + +TSIM test framework is developed by c++ in the start-up period of TDengine, the test scripts are basic cases, you can run +the script with below command: -### 8.1.1 Run the TSIM test script -Run the TSIM test script to simulate TDengine. ```bash cd /root/TDengine/tests/script ./test.sh -f tsim/db/basic1.sim ``` -### 8.1.2 Run the Python test script -Run the Python test script to simulate TDengine. +## 8.2 Run the Python test script +Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only +work for TDengine enterprise version, you can run the script with below command: + ```bash cd /root/TDengine/tests/system-test python3 ./test.py -f 2-query/floor.py ``` -### 8.1.3 Run unittest -Run the unit test script to simulate TDengine. +## 8.3 Run unittest +Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run +the script with below command: + ```bash cd /root/TDengine/tests/unit-test/ bash test.sh ``` -### 8.1.4 Smoke Testing -Run the smoke test script to simulate TDengine. +## 8.4 Smoke Testing +Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the +script with below command: + ```bash cd /root/TDengine/packaging/smokeTest ./test_smoking_selfhost.sh ``` -### 8.1.5 TSBS Test --replace -Run the TSBS test script to simulate TDengine. +## 8.5 TSBS Test --replace +The Time Series Benchmark Suite(TSBS) test script provides a standardized approach to evaluate the performance of various +time series databases, you can run the script with below command: + 1. Clone the code ```bash cd /root && git clone https://github.com/taosdata/tsbs.git && cd tsbs/scripts/tsdbComp ``` -2. Modify IP and host of client and server in `test.ini` +1. Modify IP and host of client and server in `test.ini` ```ini clientIP="192.168.0.203" # client ip clientHost="trd03" # client hostname serverIP="192.168.0.204" # server ip serverHost="trd04" # server hostname ``` -3. Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: +1. Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: ```ini serverPass="taosdata123" # server root password ``` -4. Run the following command to start the test: +1. Run the following command to start the test: ```bash nohup bash tsdbComparison.sh > test.log & ``` -5. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. +1. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. + +## 8.6 Crash_gen Test +Crash_gen is a chaotic testing tool developed by TDengine, it can evaluate the resilience of TDengine through simulating +normal operation、intentional fault injection and their combination, you can run the script with below command: -### 8.1.6 Crash_gen Test -Run the crash_gen script to simulate TDengine. ```bash cd /root/TDengine/tests/pytest/ && ./crash_gen.sh ``` +Windows platform testing -## 8.2 Windows platform testing +to be updated... -## 8.3 MacOS platform testing +MacOS platform testing + +to be updated... # 9. Releasing -## 9.1 Linux platform releasing - -## 9.2 Windows platform realeasing - -## 9.3 MacOS platform releasing +You can access the [Release](https://github.com/taosdata/TDengine/releases) for TDengine release version list. # 10. CI/CD -We use jenkins for CI/CD workflow configuration. See http://ci.bl.taosdata.com:8080/job/NewTest/view/change-requests/ -We can also run ci script locally. +You can run ci script locally with below commands: ```bash cd /root/TDengine/tests From 16d15427120495d837e508adb18c10ebb60bf4b4 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 7 Jan 2025 16:44:39 +0800 Subject: [PATCH 006/120] udpate README to remove the second layer section for Building and Testing by charles --- README.md | 528 ++++++++++++++++++++++++++---------------------------- 1 file changed, 259 insertions(+), 269 deletions(-) diff --git a/README.md b/README.md index 62c3d6c5c9..d843f98ffa 100644 --- a/README.md +++ b/README.md @@ -30,20 +30,11 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine - [1. Introduction](#1-introduction) - [2. Documentation](#2-documentation) - [3. Prerequisites](#3-prerequisites) - - [3.1 Install dependencies tools](#31-install-dependencies-tools) - - [3.2 Get the source codes](#32-get-the-source-codes) - - [3.3 Special Note](#33-special-note) - [4. Building](#4-building) - [5. Packaging](#5-packaging) - [6. Installation](#6-installation) - [7. Running](#7-running) - [8. Testing](#8-testing) - - [8.1 Run the TSIM test script](#81-run-the-tsim-test-script) - - [8.2 Run the Python test script](#82-run-the-python-test-script) - - [8.3 Run unittest](#83-run-unittest) - - [8.4 Smoke Testing](#84-smoke-testing) - - [8.5 TSBS Test --replace](#85-tsbs-test---replace) - - [8.6 Crash\_gen Test](#86-crash_gen-test) - [9. Releasing](#9-releasing) - [10. CI/CD](#10-cicd) - [11. Coverage --replace](#11-coverage---replace) @@ -81,131 +72,130 @@ List the software and tools required to work on the project. Step-by-step instructions to set up the prerequisites software. -## 3.1 Install dependencies tools +- Install dependencies tools -Install the required package for linux + - The required package for linux -```bash -apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping -``` + ```bash + apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping + ``` -Install Go for linux + Install Go for linux -Update the installation package to version 1.23.3. + Update the installation package to version 1.23.3. -```bash -cd /usr/local/ -wget https://golang.google.cn/dl/go1.23.3.linux-amd64.tar.gz -rm -rf /usr/local/go && tar -C /usr/local -xzf go1.23.3.linux-amd64.tar.gz -``` + ```bash + cd /usr/local/ + wget https://golang.google.cn/dl/go1.23.3.linux-amd64.tar.gz + rm -rf /usr/local/go && tar -C /usr/local -xzf go1.23.3.linux-amd64.tar.gz + ``` -Set up environment variables, first add the following content to the end of the `~/.bashrc` file. + Set up environment variables, first add the following content to the end of the `~/.bashrc` file. -```bash -export GO_HOME=/usr/local/go -export PATH=$GO_HOME/bin:$PATH -export CGO_ENABLED=1 -``` + ```bash + export GO_HOME=/usr/local/go + export PATH=$GO_HOME/bin:$PATH + export CGO_ENABLED=1 + ``` -Then make the environment variables take effect. + Then make the environment variables take effect. -```bash -source ~/.bashrc -``` + ```bash + source ~/.bashrc + ``` -Configure proxy to accelerate the download of Go dependencies. + Configure proxy to accelerate the download of Go dependencies. -```bash -go env -w GOPROXY=https://goproxy.cn,direct -go env -w GO111MODULE=on -``` + ```bash + go env -w GOPROXY=https://goproxy.cn,direct + go env -w GO111MODULE=on + ``` -Check if the environment variables have taken effect and if the version is the installed version. + Check if the environment variables have taken effect and if the version is the installed version. -```bash -go env -go version -``` + ```bash + go env + go version + ``` -Install node for linux + Install node for linux -Recommend install node using nvm. + Recommend install node using nvm. -```bash -curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.8/install.sh | bash -``` + ```bash + curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.8/install.sh | bash + ``` -Set up environment variables, add the following content to the end of the `~/.bashrc` file. + Set up environment variables, add the following content to the end of the `~/.bashrc` file. -```bash -export NVM_DIR="$HOME/.nvm" -[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" -``` + ```bash + export NVM_DIR="$HOME/.nvm" + [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" + ``` -Then make the environment variables take effect. + Then make the environment variables take effect. -```bash -source ~/.bashrc -``` + ```bash + source ~/.bashrc + ``` -Finally, Install node and yarn. + Finally, Install node and yarn. -```bash -nvm install 16.20.2 -npm config set registry=https://registry.npmmirror.com -npm install -g yarn -``` + ```bash + nvm install 16.20.2 + npm config set registry=https://registry.npmmirror.com + npm install -g yarn + ``` -Install Python-connector for linux + Install Python-connector for linux -Install Python3. + Install Python3. -```bash -apt install python3 -apt install python3-pip -``` + ```bash + apt install python3 + apt install python3-pip + ``` -Install the dependent Python components. + Install the dependent Python components. -```bash -pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog -``` + ```bash + pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog + ``` -Install the Python connector for TDengine. + Install the Python connector for TDengine. -```bash -pip3 install taospy taos-ws-py -``` + ```bash + pip3 install taospy taos-ws-py + ``` -Install the required package for windows + - The required package for windows -to be updated... + to be updated... -Install the required package for macOS + - The required package for macOS -``` -brew install argp-standalone gflags pkgconfig -``` + ``` + brew install argp-standalone gflags pkgconfig + ``` -## 3.2 Get the source codes +- Get the source codes -First of all, you may clone the source codes from github: + First of all, you may clone the source codes from github: -```bash -git clone https://github.com/taosdata/TDengine.git -cd TDengine -``` + ```bash + git clone https://github.com/taosdata/TDengine.git + cd TDengine + ``` -You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. + You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. -``` -[url "git@github.com:"] - insteadOf = https://github.com/ -``` + ``` + [url "git@github.com:"] + insteadOf = https://github.com/ + ``` -## 3.3 Special Note - -[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. +- Special Note + [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. # 4. Building @@ -217,258 +207,258 @@ TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) a To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. -Linux platform building +- Linux platform building -You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: + You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: -```bash -./build.sh -``` + ```bash + ./build.sh + ``` -It equals to execute following commands: + It equals to execute following commands: -```bash -mkdir debug -cd debug -cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true -make -``` + ```bash + mkdir debug + cd debug + cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true + make + ``` -You can use Jemalloc as memory allocator instead of glibc: + You can use Jemalloc as memory allocator instead of glibc: -``` -apt install autoconf -cmake .. -DJEMALLOC_ENABLED=true -``` + ``` + apt install autoconf + cmake .. -DJEMALLOC_ENABLED=true + ``` -TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform. -You can also specify CPUTYPE option like aarch64 too if the detection result is not correct: + TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform. + You can also specify CPUTYPE option like aarch64 too if the detection result is not correct: -aarch64: + aarch64: -```bash -cmake .. -DCPUTYPE=aarch64 && cmake --build . -``` + ```bash + cmake .. -DCPUTYPE=aarch64 && cmake --build . + ``` -4.2 Windows platform building +- Windows platform building -If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". -Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. + If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". + Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. -```cmd -mkdir debug && cd debug -"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 > -cmake .. -G "NMake Makefiles" -nmake -``` + ```cmd + mkdir debug && cd debug + "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 > + cmake .. -G "NMake Makefiles" + nmake + ``` -If you use the Visual Studio 2019 or 2017: + If you use the Visual Studio 2019 or 2017: -please open a command window by executing "cmd.exe". -Please specify "x64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. + please open a command window by executing "cmd.exe". + Please specify "x64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. -```cmd -mkdir debug && cd debug -"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 > -cmake .. -G "NMake Makefiles" -nmake -``` + ```cmd + mkdir debug && cd debug + "c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 > + cmake .. -G "NMake Makefiles" + nmake + ``` -Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows: + Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows: -```cmd -mkdir debug && cd debug -cmake .. -G "NMake Makefiles" -nmake -``` + ```cmd + mkdir debug && cd debug + cmake .. -G "NMake Makefiles" + nmake + ``` -MacOS platform building +- MacOS platform building -Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. + Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. -```shell -mkdir debug && cd debug -cmake .. && cmake --build . -``` + ```shell + mkdir debug && cd debug + cmake .. && cmake --build . + ``` # 5. Packaging -Linux platform packaging - -to be updated... - -Windows platform packaging - -to be updated... - -MacOS platform packaging - -to be updated... +To be updated... # 6. Installation -Linux platform installation +- Linux platform installation -After building successfully, TDengine can be installed by + After building successfully, TDengine can be installed by -```bash -sudo make install -``` + ```bash + sudo make install + ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. + Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. -Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. + Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. -Windows platform installation +- Windows platform installation -After building successfully, TDengine can be installed by: + After building successfully, TDengine can be installed by: -```cmd -nmake install -``` + ```cmd + nmake install + ``` -MacOS platform installation +- MacOS platform installation -After building successfully, TDengine can be installed by: + After building successfully, TDengine can be installed by: -```bash -sudo make install -``` + ```bash + sudo make install + ``` # 7. Running -To start the service after installation on linux, in a terminal, use: +- To start the service after installation on linux, in a terminal, use: -```bash -sudo systemctl start taosd -``` + ```bash + sudo systemctl start taosd + ``` -Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: -```bash -taos -``` + ```bash + taos + ``` -If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. + If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. -If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) + If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) -```bash -./build/bin/taosd -c test/cfg -``` + ```bash + ./build/bin/taosd -c test/cfg + ``` -In another terminal, use the TDengine CLI to connect the server: + In another terminal, use the TDengine CLI to connect the server: -```bash -./build/bin/taos -c test/cfg -``` + ```bash + ./build/bin/taos -c test/cfg + ``` -option "-c test/cfg" specifies the system configuration file directory. + option "-c test/cfg" specifies the system configuration file directory. -To start the service after installation on windows, in a terminal, use: +- To start the service after installation on windows, in a terminal, use: -```bash -to be updated -``` -Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + ```bash + to be updated + ``` + Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: -```bash -to be updated -``` + ```bash + to be updated + ``` -To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: +- To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: -```bash -sudo launchctl start com.tdengine.taosd -``` + ```bash + sudo launchctl start com.tdengine.taosd + ``` -Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: -```bash -taos -``` + ```bash + taos + ``` If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. # 8. Testing -## 8.1 Run the TSIM test script +- Run the TSIM test script -TSIM test framework is developed by c++ in the start-up period of TDengine, the test scripts are basic cases, you can run -the script with below command: + TSIM test framework is developed by c++ in the start-up period of TDengine, the test scripts are basic cases, you can run + the script with below command: -```bash -cd /root/TDengine/tests/script -./test.sh -f tsim/db/basic1.sim -``` + ```bash + cd /root/TDengine/tests/script + ./test.sh -f tsim/db/basic1.sim + ``` -## 8.2 Run the Python test script -Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only -work for TDengine enterprise version, you can run the script with below command: +- Run the Python test script + + Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only + work for TDengine enterprise version, you can run the script with below command: -```bash -cd /root/TDengine/tests/system-test -python3 ./test.py -f 2-query/floor.py -``` + ```bash + cd /root/TDengine/tests/system-test + python3 ./test.py -f 2-query/floor.py + ``` -## 8.3 Run unittest -Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run -the script with below command: +- Run unittest + + Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run + the script with below command: -```bash -cd /root/TDengine/tests/unit-test/ -bash test.sh -``` + ```bash + cd /root/TDengine/tests/unit-test/ + bash test.sh + ``` -## 8.4 Smoke Testing -Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the -script with below command: +- Smoke Testing + + Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the + script with below command: -```bash -cd /root/TDengine/packaging/smokeTest -./test_smoking_selfhost.sh -``` + ```bash + cd /root/TDengine/packaging/smokeTest + ./test_smoking_selfhost.sh + ``` -## 8.5 TSBS Test --replace -The Time Series Benchmark Suite(TSBS) test script provides a standardized approach to evaluate the performance of various -time series databases, you can run the script with below command: +- TSBS Test --replace -1. Clone the code -```bash -cd /root && git clone https://github.com/taosdata/tsbs.git && cd tsbs/scripts/tsdbComp -``` -1. Modify IP and host of client and server in `test.ini` -```ini -clientIP="192.168.0.203" # client ip -clientHost="trd03" # client hostname -serverIP="192.168.0.204" # server ip -serverHost="trd04" # server hostname -``` -1. Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: -```ini -serverPass="taosdata123" # server root password -``` -1. Run the following command to start the test: - ```bash -nohup bash tsdbComparison.sh > test.log & -``` -1. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. + The Time Series Benchmark Suite(TSBS) test script provides a standardized approach to evaluate the performance of various + time series databases, you can run the script with below command: -## 8.6 Crash_gen Test -Crash_gen is a chaotic testing tool developed by TDengine, it can evaluate the resilience of TDengine through simulating -normal operation、intentional fault injection and their combination, you can run the script with below command: + - Clone the code -```bash -cd /root/TDengine/tests/pytest/ && ./crash_gen.sh -``` + ```bash + cd /root && git clone https://github.com/taosdata/tsbs.git && cd tsbs/scripts/tsdbComp + ``` + + - Modify IP and host of client and server in `test.ini` + ```ini + clientIP="192.168.0.203" # client ip + clientHost="trd03" # client hostname + serverIP="192.168.0.204" # server ip + serverHost="trd04" # server hostname + ``` + + - Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: + + ```ini + serverPass="taosdata123" # server root password + ``` + + - Run the following command to start the test: + ```bash + nohup bash tsdbComparison.sh > test.log & + ``` + When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. + +- Crash_gen Test + + Crash_gen is a chaotic testing tool developed by TDengine, it can evaluate the resilience of TDengine through simulating + normal operation、intentional fault injection and their combination, you can run the script with below command: + + ```bash + cd /root/TDengine/tests/pytest/ && ./crash_gen.sh + ``` Windows platform testing -to be updated... + to be updated... MacOS platform testing -to be updated... + to be updated... # 9. Releasing From 7ac75a8dba6ffbe01c335a16ce8c8e0900166f15 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Tue, 7 Jan 2025 18:18:14 +0800 Subject: [PATCH 007/120] Update toc --- README.md | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index d843f98ffa..30d8f21d88 100644 --- a/README.md +++ b/README.md @@ -26,19 +26,18 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine # Table of Contents -- [Table of Contents](#table-of-contents) -- [1. Introduction](#1-introduction) -- [2. Documentation](#2-documentation) -- [3. Prerequisites](#3-prerequisites) -- [4. Building](#4-building) -- [5. Packaging](#5-packaging) -- [6. Installation](#6-installation) -- [7. Running](#7-running) -- [8. Testing](#8-testing) -- [9. Releasing](#9-releasing) -- [10. CI/CD](#10-cicd) -- [11. Coverage --replace](#11-coverage---replace) -- [12. Contributing](#12-contributing) +1. [Introduction](#1-introduction) +1. [Documentation](#2-documentation) +1. [Prerequisites](#3-prerequisites) +1. [Building](#4-building) +1. [Packaging](#5-packaging) +1. [Installation](#6-installation) +1. [Running](#7-running) +1. [Testing](#8-testing) +1. [Releasing](#9-releasing) +1. [CI/CD](#10-cicd) +1. [Coverage --replace](#11-coverage---replace) +1. [Contributing](#12-contributing) # 1. Introduction From c85f5833a0742fb74179db1fd98ad61dda602f3c Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Wed, 8 Jan 2025 08:18:39 +0800 Subject: [PATCH 008/120] adjust the contents style format by charles --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 30d8f21d88..fc938942d6 100644 --- a/README.md +++ b/README.md @@ -65,13 +65,13 @@ For user manual, system design and architecture, please refer to [TDengine Docum List the software and tools required to work on the project. -- go 1.20+ (for taosadapter) -- node 16.20.2 (for taos-explorer) -- python 3.10.12+ (for test) + - go 1.20+ (for taosadapter) + - node 16.20.2 (for taos-explorer) + - python 3.10.12+ (for test) Step-by-step instructions to set up the prerequisites software. -- Install dependencies tools +#### Install dependencies tools - The required package for linux @@ -177,7 +177,7 @@ Step-by-step instructions to set up the prerequisites software. brew install argp-standalone gflags pkgconfig ``` -- Get the source codes +#### Get the source codes First of all, you may clone the source codes from github: @@ -193,7 +193,7 @@ Step-by-step instructions to set up the prerequisites software. insteadOf = https://github.com/ ``` -- Special Note +#### Special Note [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. # 4. Building @@ -416,13 +416,13 @@ If TDengine CLI connects the server successfully, welcome messages and version i The Time Series Benchmark Suite(TSBS) test script provides a standardized approach to evaluate the performance of various time series databases, you can run the script with below command: - - Clone the code + Clone the code ```bash cd /root && git clone https://github.com/taosdata/tsbs.git && cd tsbs/scripts/tsdbComp ``` - - Modify IP and host of client and server in `test.ini` + Modify IP and host of client and server in `test.ini` ```ini clientIP="192.168.0.203" # client ip clientHost="trd03" # client hostname @@ -430,13 +430,13 @@ If TDengine CLI connects the server successfully, welcome messages and version i serverHost="trd04" # server hostname ``` - - Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: + Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: ```ini serverPass="taosdata123" # server root password ``` - - Run the following command to start the test: + Run the following command to start the test: ```bash nohup bash tsdbComparison.sh > test.log & ``` @@ -472,8 +472,8 @@ cd /root/TDengine/tests ``` # 11. Coverage --replace -We can see coverage result in https://coveralls.io/github/taosdata/TDengine -We can also run coverage script locally. +You can see coverage result in https://coveralls.io/github/taosdata/TDengine +You can also run coverage script locally. ```bash cd /root/TDengine/tests From a0be3dcaec62f73088b09423dff8823530b0b1bc Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Wed, 8 Jan 2025 10:40:06 +0800 Subject: [PATCH 009/120] update README to add child section number and remove tsbs, crash_gen test from testing part --- README.md | 459 ++++++++++++++++++++---------------------------------- 1 file changed, 165 insertions(+), 294 deletions(-) diff --git a/README.md b/README.md index fc938942d6..8c5ec10df2 100644 --- a/README.md +++ b/README.md @@ -63,138 +63,42 @@ For user manual, system design and architecture, please refer to [TDengine Docum # 3. Prerequisites -List the software and tools required to work on the project. +## 3.1 Install dependencies tools - - go 1.20+ (for taosadapter) - - node 16.20.2 (for taos-explorer) - - python 3.10.12+ (for test) +### The required package for linux -Step-by-step instructions to set up the prerequisites software. +```bash +apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping +``` -#### Install dependencies tools +### The required package for windows - - The required package for linux +to be updated... - ```bash - apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping - ``` +### The required package for macOS - Install Go for linux +``` +brew install argp-standalone gflags pkgconfig +``` - Update the installation package to version 1.23.3. +## 3.2 Get the source codes - ```bash - cd /usr/local/ - wget https://golang.google.cn/dl/go1.23.3.linux-amd64.tar.gz - rm -rf /usr/local/go && tar -C /usr/local -xzf go1.23.3.linux-amd64.tar.gz - ``` +First of all, you may clone the source codes from github: - Set up environment variables, first add the following content to the end of the `~/.bashrc` file. +```bash +git clone https://github.com/taosdata/TDengine.git +cd TDengine +``` - ```bash - export GO_HOME=/usr/local/go - export PATH=$GO_HOME/bin:$PATH - export CGO_ENABLED=1 - ``` +You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. - Then make the environment variables take effect. +``` +[url "git@github.com:"] + insteadOf = https://github.com/ +``` - ```bash - source ~/.bashrc - ``` - - Configure proxy to accelerate the download of Go dependencies. - - ```bash - go env -w GOPROXY=https://goproxy.cn,direct - go env -w GO111MODULE=on - ``` - - Check if the environment variables have taken effect and if the version is the installed version. - - ```bash - go env - go version - ``` - - Install node for linux - - Recommend install node using nvm. - - ```bash - curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.8/install.sh | bash - ``` - - Set up environment variables, add the following content to the end of the `~/.bashrc` file. - - ```bash - export NVM_DIR="$HOME/.nvm" - [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" - ``` - - Then make the environment variables take effect. - - ```bash - source ~/.bashrc - ``` - - Finally, Install node and yarn. - - ```bash - nvm install 16.20.2 - npm config set registry=https://registry.npmmirror.com - npm install -g yarn - ``` - - Install Python-connector for linux - - Install Python3. - - ```bash - apt install python3 - apt install python3-pip - ``` - - Install the dependent Python components. - - ```bash - pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog - ``` - - Install the Python connector for TDengine. - - ```bash - pip3 install taospy taos-ws-py - ``` - - - The required package for windows - - to be updated... - - - The required package for macOS - - ``` - brew install argp-standalone gflags pkgconfig - ``` - -#### Get the source codes - - First of all, you may clone the source codes from github: - - ```bash - git clone https://github.com/taosdata/TDengine.git - cd TDengine - ``` - - You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. - - ``` - [url "git@github.com:"] - insteadOf = https://github.com/ - ``` - -#### Special Note - [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. +## 3.3 Special Note +[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. # 4. Building @@ -206,79 +110,79 @@ TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) a To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. -- Linux platform building +## 4.1 Linux platform building - You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: +You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: - ```bash - ./build.sh - ``` +```bash +./build.sh +``` - It equals to execute following commands: +It equals to execute following commands: - ```bash - mkdir debug - cd debug - cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true - make - ``` +```bash +mkdir debug +cd debug +cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true +make +``` - You can use Jemalloc as memory allocator instead of glibc: +You can use Jemalloc as memory allocator instead of glibc: - ``` - apt install autoconf - cmake .. -DJEMALLOC_ENABLED=true - ``` +``` +apt install autoconf +cmake .. -DJEMALLOC_ENABLED=true +``` - TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform. - You can also specify CPUTYPE option like aarch64 too if the detection result is not correct: +TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform. +You can also specify CPUTYPE option like aarch64 too if the detection result is not correct: - aarch64: +aarch64: - ```bash - cmake .. -DCPUTYPE=aarch64 && cmake --build . - ``` +```bash +cmake .. -DCPUTYPE=aarch64 && cmake --build . +``` -- Windows platform building +## 4.2 Windows platform building - If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". - Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. +If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". +Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. - ```cmd - mkdir debug && cd debug - "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 > - cmake .. -G "NMake Makefiles" - nmake - ``` +```cmd +mkdir debug && cd debug +"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 > +cmake .. -G "NMake Makefiles" +nmake +``` - If you use the Visual Studio 2019 or 2017: +If you use the Visual Studio 2019 or 2017: - please open a command window by executing "cmd.exe". - Please specify "x64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. +please open a command window by executing "cmd.exe". +Please specify "x64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. - ```cmd - mkdir debug && cd debug - "c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 > - cmake .. -G "NMake Makefiles" - nmake - ``` +```cmd +mkdir debug && cd debug +"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 > +cmake .. -G "NMake Makefiles" +nmake +``` - Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows: +Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows: - ```cmd - mkdir debug && cd debug - cmake .. -G "NMake Makefiles" - nmake - ``` +```cmd +mkdir debug && cd debug +cmake .. -G "NMake Makefiles" +nmake +``` -- MacOS platform building +## 4.3 MacOS platform building - Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. +Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. - ```shell - mkdir debug && cd debug - cmake .. && cmake --build . - ``` +```shell +mkdir debug && cd debug +cmake .. && cmake --build . +``` # 5. Packaging @@ -286,176 +190,143 @@ To be updated... # 6. Installation -- Linux platform installation +## 6.1 Linux platform installation - After building successfully, TDengine can be installed by +After building successfully, TDengine can be installed by - ```bash - sudo make install - ``` +```bash +sudo make install +``` - Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. +Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. - Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. +Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. -- Windows platform installation +## 6.2 Windows platform installation - After building successfully, TDengine can be installed by: +After building successfully, TDengine can be installed by: - ```cmd - nmake install - ``` +```cmd +nmake install +``` -- MacOS platform installation +## 6.3 MacOS platform installation - After building successfully, TDengine can be installed by: +After building successfully, TDengine can be installed by: - ```bash - sudo make install - ``` +```bash +sudo make install +``` # 7. Running -- To start the service after installation on linux, in a terminal, use: +## 7.1 Run TDengine on linux - ```bash - sudo systemctl start taosd - ``` +To start the service after installation on linux, in a terminal, use: - Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: +```bash +sudo systemctl start taosd +``` - ```bash - taos - ``` +Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: - If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. +```bash +taos +``` - If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) +If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. - ```bash - ./build/bin/taosd -c test/cfg - ``` +If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) - In another terminal, use the TDengine CLI to connect the server: +```bash +./build/bin/taosd -c test/cfg +``` - ```bash - ./build/bin/taos -c test/cfg - ``` +In another terminal, use the TDengine CLI to connect the server: - option "-c test/cfg" specifies the system configuration file directory. +```bash +./build/bin/taos -c test/cfg +``` -- To start the service after installation on windows, in a terminal, use: +option "-c test/cfg" specifies the system configuration file directory. - ```bash - to be updated - ``` - Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: +## 7.2 Run TDengine on windows - ```bash - to be updated - ``` +To start the service after installation on windows, in a terminal, use: -- To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: +```bash +to be updated +``` - ```bash - sudo launchctl start com.tdengine.taosd - ``` +Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: - Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: +```bash +taos +``` - ```bash - taos - ``` +## 7.3 Run TDengine on MacOS + +To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: + +```bash +sudo launchctl start com.tdengine.taosd +``` + +Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: + +```bash +taos +``` If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. # 8. Testing -- Run the TSIM test script +## 8.1 Run the TSIM test script - TSIM test framework is developed by c++ in the start-up period of TDengine, the test scripts are basic cases, you can run - the script with below command: +TSIM test framework is developed by c++ in the start-up period of TDengine, the test scripts are basic cases, you can run +the script with below command: - ```bash - cd /root/TDengine/tests/script - ./test.sh -f tsim/db/basic1.sim - ``` +```bash +cd /root/TDengine/tests/script +./test.sh -f tsim/db/basic1.sim +``` -- Run the Python test script - - Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only - work for TDengine enterprise version, you can run the script with below command: +## 8.2 Run the Python test script - ```bash - cd /root/TDengine/tests/system-test - python3 ./test.py -f 2-query/floor.py - ``` +Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only +work for TDengine enterprise version, you can run the script with below command: -- Run unittest - - Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run - the script with below command: +```bash +cd /root/TDengine/tests/system-test +python3 ./test.py -f 2-query/floor.py +``` - ```bash - cd /root/TDengine/tests/unit-test/ - bash test.sh - ``` +## 8.3 Run unittest -- Smoke Testing - - Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the - script with below command: +Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run +the script with below command: - ```bash - cd /root/TDengine/packaging/smokeTest - ./test_smoking_selfhost.sh - ``` +```bash +cd /root/TDengine/tests/unit-test/ +bash test.sh +``` -- TSBS Test --replace +## 8.4 Smoke Testing - The Time Series Benchmark Suite(TSBS) test script provides a standardized approach to evaluate the performance of various - time series databases, you can run the script with below command: +Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the +script with below command: - Clone the code +```bash +cd /root/TDengine/packaging/smokeTest +./test_smoking_selfhost.sh +``` - ```bash - cd /root && git clone https://github.com/taosdata/tsbs.git && cd tsbs/scripts/tsdbComp - ``` - - Modify IP and host of client and server in `test.ini` - ```ini - clientIP="192.168.0.203" # client ip - clientHost="trd03" # client hostname - serverIP="192.168.0.204" # server ip - serverHost="trd04" # server hostname - ``` - - Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: - - ```ini - serverPass="taosdata123" # server root password - ``` - - Run the following command to start the test: - ```bash - nohup bash tsdbComparison.sh > test.log & - ``` - When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. - -- Crash_gen Test - - Crash_gen is a chaotic testing tool developed by TDengine, it can evaluate the resilience of TDengine through simulating - normal operation、intentional fault injection and their combination, you can run the script with below command: - - ```bash - cd /root/TDengine/tests/pytest/ && ./crash_gen.sh - ``` - -Windows platform testing +## Windows platform testing to be updated... -MacOS platform testing +## MacOS platform testing to be updated... From abbc6416328b44ce8d1d2de33e34b513191be5b5 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Wed, 8 Jan 2025 18:49:24 +0800 Subject: [PATCH 010/120] docs: refactor readme --- README.md | 203 +++++++++++++++++++----------------------------- tests/README.md | 43 ++++++++++ 2 files changed, 124 insertions(+), 122 deletions(-) create mode 100644 tests/README.md diff --git a/README.md b/README.md index 8c5ec10df2..5fadbd22d8 100644 --- a/README.md +++ b/README.md @@ -30,14 +30,13 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine 1. [Documentation](#2-documentation) 1. [Prerequisites](#3-prerequisites) 1. [Building](#4-building) -1. [Packaging](#5-packaging) -1. [Installation](#6-installation) -1. [Running](#7-running) -1. [Testing](#8-testing) -1. [Releasing](#9-releasing) -1. [CI/CD](#10-cicd) -1. [Coverage --replace](#11-coverage---replace) -1. [Contributing](#12-contributing) +1. [Installation](#5-installation) +1. [Running](#6-running) +1. [Testing](#7-testing) +1. [Releasing](#8-releasing) +1. [CI/CD](#9-cicd) +1. [Coverage](#10-coverage) +1. [Contributing](#11-contributing) # 1. Introduction @@ -63,42 +62,49 @@ For user manual, system design and architecture, please refer to [TDengine Docum # 3. Prerequisites -## 3.1 Install dependencies tools +## 3.1 On Linux -### The required package for linux +### For Ubuntu 18.04 or Later ```bash -apt-get install -y llvm gcc make cmake libssl-dev pkg-config perl g++ lzma curl locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping -``` - -### The required package for windows - -to be updated... - -### The required package for macOS ``` + +### For CentOS 7 + +```bash + +``` + +### For Fedora or Rocky Linux + +```bash + +``` + +## 3.2 On macOS + +Please intall the dependencies with [brew](https://brew.sh/). + +```bash brew install argp-standalone gflags pkgconfig ``` -## 3.2 Get the source codes +## 3.3 On Windows -First of all, you may clone the source codes from github: +Work in Progress. + +## 3.4 Clone the repo + +Clone the repository to the target machine: ```bash git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. - -``` -[url "git@github.com:"] - insteadOf = https://github.com/ -``` - -## 3.3 Special Note -[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. +> [!NOTE] +> TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust). # 4. Building @@ -110,7 +116,7 @@ TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) a To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. -## 4.1 Linux platform building +## 4.1 Build on Linux You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: @@ -129,7 +135,7 @@ make You can use Jemalloc as memory allocator instead of glibc: -``` +```bash apt install autoconf cmake .. -DJEMALLOC_ENABLED=true ``` @@ -143,7 +149,16 @@ aarch64: cmake .. -DCPUTYPE=aarch64 && cmake --build . ``` -## 4.2 Windows platform building +## 4.2 Build on macOS + +Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. + +```shell +mkdir debug && cd debug +cmake .. && cmake --build . +``` + +## 4.3 Build on Windows If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. @@ -175,22 +190,9 @@ cmake .. -G "NMake Makefiles" nmake ``` -## 4.3 MacOS platform building +# 5. Installation -Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. - -```shell -mkdir debug && cd debug -cmake .. && cmake --build . -``` - -# 5. Packaging - -To be updated... - -# 6. Installation - -## 6.1 Linux platform installation +## 5.1 Install on Linux After building successfully, TDengine can be installed by @@ -202,15 +204,7 @@ Users can find more information about directories installed on the system in the Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. -## 6.2 Windows platform installation - -After building successfully, TDengine can be installed by: - -```cmd -nmake install -``` - -## 6.3 MacOS platform installation +## 5.2 Install on macOS After building successfully, TDengine can be installed by: @@ -218,9 +212,17 @@ After building successfully, TDengine can be installed by: sudo make install ``` -# 7. Running +## 5.3 Install on Windows -## 7.1 Run TDengine on linux +After building successfully, TDengine can be installed by: + +```cmd +nmake install +``` + +# 6. Running + +## 6.1 Run TDengine on Linux To start the service after installation on linux, in a terminal, use: @@ -250,9 +252,9 @@ In another terminal, use the TDengine CLI to connect the server: option "-c test/cfg" specifies the system configuration file directory. -## 7.2 Run TDengine on windows +## 6.2 Run TDengine on Windows -To start the service after installation on windows, in a terminal, use: +To start the service after installation on Windows, in a terminal, use: ```bash to be updated @@ -264,7 +266,7 @@ Then users can use the TDengine CLI to connect the TDengine server. In a termina taos ``` -## 7.3 Run TDengine on MacOS +## 6.3 Run TDengine on macOS To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: @@ -280,77 +282,34 @@ taos If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. -# 8. Testing +# 7. Testing -## 8.1 Run the TSIM test script +For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md). -TSIM test framework is developed by c++ in the start-up period of TDengine, the test scripts are basic cases, you can run -the script with below command: +# 8. Releasing + +For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases). + +# 9. CI/CD + +Now, Jenkins is mainly used to build CI/CD pipeline for TDengine. To run the tests in the CI/CD pipeline, please run following commands: ```bash -cd /root/TDengine/tests/script -./test.sh -f tsim/db/basic1.sim +cd tests +./run_all_ci_cases.sh -b main # on main branch ``` -## 8.2 Run the Python test script +TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). -Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only -work for TDengine enterprise version, you can run the script with below command: +# 10. Coverage + +Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine). To create the test coverage report (in HTML format) locally, please run following commands: ```bash -cd /root/TDengine/tests/system-test -python3 ./test.py -f 2-query/floor.py -``` - -## 8.3 Run unittest - -Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run -the script with below command: - -```bash -cd /root/TDengine/tests/unit-test/ -bash test.sh -``` - -## 8.4 Smoke Testing - -Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the -script with below command: - -```bash -cd /root/TDengine/packaging/smokeTest -./test_smoking_selfhost.sh -``` - -## Windows platform testing - - to be updated... - -## MacOS platform testing - - to be updated... - -# 9. Releasing - -You can access the [Release](https://github.com/taosdata/TDengine/releases) for TDengine release version list. - -# 10. CI/CD -You can run ci script locally with below commands: - -```bash -cd /root/TDengine/tests -./run_all_ci_cases.sh -b main -``` - -# 11. Coverage --replace -You can see coverage result in https://coveralls.io/github/taosdata/TDengine -You can also run coverage script locally. - -```bash -cd /root/TDengine/tests +cd tests ./run_local_coverage.sh ``` -# 12. Contributing +# 11. Contributing -Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project. +Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine. diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..55e6688a8f --- /dev/null +++ b/tests/README.md @@ -0,0 +1,43 @@ +# Testing TDengine + +## Unit Test + +Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run +the script with below command: + +```bash +cd tests/unit-test/ +bash test.sh +``` + +## System Test + +Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only +work for TDengine enterprise version, you can run the script with below command: + +```bash +cd tests/system-test +python3 ./test.py -f 2-query/floor.py +``` + +## Smoke Test + +Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the +script with commands below: + +```bash +cd /root/TDengine/packaging/smokeTest +./test_smoking_selfhost.sh +``` + +## Legacy Test + +In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. To run the legacy test cases, please execute the following commands: + +```bash +cd tests/script +./test.sh -f tsim/db/basic1.sim +``` + + + From a100d96ef3395ab7b890394f3b9eac66f7303ace Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Wed, 8 Jan 2025 19:44:56 +0800 Subject: [PATCH 011/120] docs: collaps different platforms --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 5fadbd22d8..91bcd40f55 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,10 @@ To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in ## 4.1 Build on Linux +
+ +Detailed steps to build on Linux + You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: ```bash @@ -149,8 +153,14 @@ aarch64: cmake .. -DCPUTYPE=aarch64 && cmake --build . ``` +
+ ## 4.2 Build on macOS +
+ +Detailed steps to build on macOS + Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. ```shell @@ -158,8 +168,14 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` +
+ ## 4.3 Build on Windows +
+ +Detailed steps to build on Windows + If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. @@ -189,6 +205,7 @@ mkdir debug && cd debug cmake .. -G "NMake Makefiles" nmake ``` +
# 5. Installation From 40a89c04bf088ababb8b1f48085c9104acb7111d Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Thu, 9 Jan 2025 14:23:00 +0800 Subject: [PATCH 012/120] add note for sql using in parameter binding insertion --- docs/en/07-develop/05-stmt.md | 10 ++++++++++ docs/zh/07-develop/05-stmt.md | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/docs/en/07-develop/05-stmt.md b/docs/en/07-develop/05-stmt.md index 11b055bcf9..c62918fad7 100644 --- a/docs/en/07-develop/05-stmt.md +++ b/docs/en/07-develop/05-stmt.md @@ -15,6 +15,16 @@ When inserting data using parameter binding, it can avoid the resource consumpti **Tips: It is recommended to use parameter binding for data insertion** + :::note + We only recommend using the following two forms of SQL for parameter binding data insertion: + + ```sql + 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) + 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) + ``` + + ::: + Next, we continue to use smart meters as an example to demonstrate the efficient writing functionality of parameter binding with various language connectors: 1. Prepare a parameterized SQL insert statement for inserting data into the supertable `meters`. This statement allows dynamically specifying subtable names, tags, and column values. diff --git a/docs/zh/07-develop/05-stmt.md b/docs/zh/07-develop/05-stmt.md index 1917a86e74..f30969cb5b 100644 --- a/docs/zh/07-develop/05-stmt.md +++ b/docs/zh/07-develop/05-stmt.md @@ -15,6 +15,16 @@ import TabItem from "@theme/TabItem"; **Tips: 数据写入推荐使用参数绑定方式** + :::note + 我们只推荐使用下面两种形式的 SQL 进行参数绑定写入: + + ```sql + 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) + 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) + ``` + + ::: + 下面我们继续以智能电表为例,展示各语言连接器使用参数绑定高效写入的功能: 1. 准备一个参数化的 SQL 插入语句,用于向超级表 `meters` 中插入数据。这个语句允许动态地指定子表名、标签和列值。 2. 循环生成多个子表及其对应的数据行。对于每个子表: From 7c1363f34c22f8638118ad095764968a09faf0a4 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 9 Jan 2025 17:30:04 +0800 Subject: [PATCH 013/120] update README and test/README by charles --- README.md | 36 +++++++++++++++++++++++++----------- tests/README.md | 21 +++++++++++++++++++++ 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 91bcd40f55..953bb7c6b1 100644 --- a/README.md +++ b/README.md @@ -67,19 +67,32 @@ For user manual, system design and architecture, please refer to [TDengine Docum ### For Ubuntu 18.04 or Later ```bash - +sudo apt-get udpate +sudo apt-get install -y gcc cmake build-essential git libjansson-dev libsnappy-dev liblzma-dev zlib1g-dev pkg-config ``` -### For CentOS 7 +### For CentOS 8 ```bash - +sudo yum update +yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core +yum config-manager --set-enabled powertools +yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig ``` ### For Fedora or Rocky Linux ```bash +sudo dnf install -y gcc gcc-c++ make cmake git perl +sudo dnf install -y zlib-devel xz-devel snappy-devel jansson-devel pkgconfig +``` +To build the [taosTools](https://github.com/taosdata/taos-tools) on Fedora or Rocky, the following packages need to be installed. + +```bash +sudo dnf install -y dnf-plugins-core +sudo dnf config-manager --set-enabled powertools +sudo dnf install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static ``` ## 3.2 On macOS @@ -110,7 +123,7 @@ cd TDengine At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. -You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source. +You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to install from source. TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. @@ -140,7 +153,6 @@ make You can use Jemalloc as memory allocator instead of glibc: ```bash -apt install autoconf cmake .. -DJEMALLOC_ENABLED=true ``` @@ -271,18 +283,20 @@ option "-c test/cfg" specifies the system configuration file directory. ## 6.2 Run TDengine on Windows -To start the service after installation on Windows, in a terminal, use: +You can start TDengine server on windows platform with below commands: -```bash -to be updated +```cmd +.\build\bin\taosd.exe -c test\cfg ``` -Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: +In another terminal, use the TDengine CLI to connect the server: -```bash -taos +```cmd +.\build\bin\taos.exe -c test\cfg ``` +option "-c test/cfg" specifies the system configuration file directory. + ## 6.3 Run TDengine on macOS To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: diff --git a/tests/README.md b/tests/README.md index 55e6688a8f..bd5696fc83 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,5 +1,26 @@ # Testing TDengine +## Install the required tools + +Install Python3. + +```bash +apt install python3 +apt install python3-pip +``` + +Install the dependent Python components. + +```bash +pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog +``` + +Install the Python connector for TDengine. + +```bash +pip3 install taospy taos-ws-py +``` + ## Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run From c7b436fb62e93d1ca3af09f0833e5543b9f047b9 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 9 Jan 2025 18:18:03 +0800 Subject: [PATCH 014/120] test:alter coverage describe --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 953bb7c6b1..00306aaf2b 100644 --- a/README.md +++ b/README.md @@ -338,8 +338,11 @@ Latest TDengine test coverage report can be found on [coveralls.io](https://cove ```bash cd tests -./run_local_coverage.sh +bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task +# on main branch and run cases in longtimeruning_cases.task +# for more infomation about options please refer to ./run_local_coverage.sh -h ``` +Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. # 11. Contributing From e9945c096940c6a51a445732f282e76dd9d4532e Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Fri, 10 Jan 2025 11:46:29 +0800 Subject: [PATCH 015/120] udpate testing section to add -e parameter for unit test and two packages for centos 8 building section by charles --- README.md | 5 +++-- tests/README.md | 2 +- tests/unit-test/test.sh | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 00306aaf2b..8824365fc6 100644 --- a/README.md +++ b/README.md @@ -77,7 +77,7 @@ sudo apt-get install -y gcc cmake build-essential git libjansson-dev libsnappy-d sudo yum update yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core yum config-manager --set-enabled powertools -yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig +yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static ``` ### For Fedora or Rocky Linux @@ -342,7 +342,8 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task # on main branch and run cases in longtimeruning_cases.task # for more infomation about options please refer to ./run_local_coverage.sh -h ``` -Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. +> [!NOTE] +> Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. # 11. Contributing diff --git a/tests/README.md b/tests/README.md index bd5696fc83..9d7f98b564 100644 --- a/tests/README.md +++ b/tests/README.md @@ -27,7 +27,7 @@ Unit test script is the smallest testable part and developed for some function, the script with below command: ```bash -cd tests/unit-test/ +cd tests/unit-test/ -e 0 bash test.sh ``` diff --git a/tests/unit-test/test.sh b/tests/unit-test/test.sh index 21461bc6a5..46fc0aedb3 100755 --- a/tests/unit-test/test.sh +++ b/tests/unit-test/test.sh @@ -7,10 +7,10 @@ function usage() { } ent=1 -while getopts "eh" opt; do +while getopts "e:h" opt; do case $opt in e) - ent=1 + ent="$OPTARG" ;; h) usage From 53b3ff8f6cc5fdede941ec5cfb88df43d9816516 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Fri, 10 Jan 2025 13:38:12 +0800 Subject: [PATCH 016/120] update readme --- README.md | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 8824365fc6..33874cf05d 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,8 @@ For user manual, system design and architecture, please refer to [TDengine Docum ```bash sudo apt-get udpate -sudo apt-get install -y gcc cmake build-essential git libjansson-dev libsnappy-dev liblzma-dev zlib1g-dev pkg-config +sudo apt-get install -y gcc cmake build-essential git libjansson-dev \ + libsnappy-dev liblzma-dev zlib1g-dev pkg-config ``` ### For CentOS 8 @@ -92,7 +93,8 @@ To build the [taosTools](https://github.com/taosdata/taos-tools) on Fedora or Ro ```bash sudo dnf install -y dnf-plugins-core sudo dnf config-manager --set-enabled powertools -sudo dnf install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static +sudo dnf install -y zlib-devel zlib-static xz-devel snappy-devel jansson \ + jansson-devel pkgconfig libatomic libatomic-static libstdc++-static ``` ## 3.2 On macOS @@ -144,8 +146,7 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl It equals to execute following commands: ```bash -mkdir debug -cd debug +mkdir debug && cd debug cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true make ``` @@ -156,10 +157,8 @@ You can use Jemalloc as memory allocator instead of glibc: cmake .. -DJEMALLOC_ENABLED=true ``` -TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform. -You can also specify CPUTYPE option like aarch64 too if the detection result is not correct: - -aarch64: +TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform. +You can also specify architecture manually by CPUTYPE option: ```bash cmake .. -DCPUTYPE=aarch64 && cmake --build . @@ -223,15 +222,13 @@ nmake ## 5.1 Install on Linux -After building successfully, TDengine can be installed by +After building successfully, TDengine can be installed by: ```bash sudo make install ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. - -Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. +Installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. ## 5.2 Install on macOS @@ -279,11 +276,11 @@ In another terminal, use the TDengine CLI to connect the server: ./build/bin/taos -c test/cfg ``` -option "-c test/cfg" specifies the system configuration file directory. +Option `-c test/cfg` specifies the system configuration file directory. ## 6.2 Run TDengine on Windows -You can start TDengine server on windows platform with below commands: +You can start TDengine server on Windows platform with below commands: ```cmd .\build\bin\taosd.exe -c test\cfg From 8505d9b7a9fe0481c76d55e45406a23221af2cb7 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Sat, 11 Jan 2025 15:49:41 +0800 Subject: [PATCH 017/120] feat: add trivy --- packaging/setup_env.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/packaging/setup_env.sh b/packaging/setup_env.sh index e1a7a26579..2cf1e9e952 100644 --- a/packaging/setup_env.sh +++ b/packaging/setup_env.sh @@ -174,6 +174,7 @@ help() { echo " config_qemu_guest_agent - Configure QEMU guest agent" echo " deploy_docker - Deploy Docker" echo " deploy_docker_compose - Deploy Docker Compose" + echo " install_trivy - Install Trivy" echo " clone_enterprise - Clone the enterprise repository" echo " clone_community - Clone the community repository" echo " clone_taosx - Clone TaosX repository" @@ -1731,6 +1732,32 @@ deploy_docker_compose() { fi } +# Instal trivy +install_trivy() { + # Install jq + install_package jq + # Get latest version + LATEST_VERSION=$(curl -s https://api.github.com/repos/aquasecurity/trivy/releases/latest | jq -r .tag_name) + # Download + if [ -f /etc/debian_version ]; then + wget https://github.com/aquasecurity/trivy/releases/download/"${LATEST_VERSION}"/trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb + # Install + dpkg -i trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb + + elif [ -f /etc/redhat-release ]; then + wget https://github.com/aquasecurity/trivy/releases/download/"${LATEST_VERSION}"/trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm + # Install + rpm -ivh trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm + else + echo "Unsupported Linux distribution." + exit 1 + fi + # Check + trivy --version + check_status "Failed to install Trivy" "Trivy installed successfully." $? + rm -rf trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm +} + # Reconfigure cloud-init reconfig_cloud_init() { echo "Reconfiguring cloud-init..." @@ -2004,6 +2031,7 @@ deploy_dev() { install_nginx deploy_docker deploy_docker_compose + install_trivy check_status "Failed to deploy some tools" "Deploy all tools successfully" $? } @@ -2159,6 +2187,9 @@ main() { deploy_docker_compose) deploy_docker_compose ;; + install_trivy) + install_trivy + ;; clone_enterprise) clone_enterprise ;; From 2b10e8a9b78bf71fd439345fc61b248a7fd0f279 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Sat, 11 Jan 2025 16:06:49 +0800 Subject: [PATCH 018/120] feat: add trivy --- packaging/setup_env.sh | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/packaging/setup_env.sh b/packaging/setup_env.sh index 2cf1e9e952..9a624bfa9e 100644 --- a/packaging/setup_env.sh +++ b/packaging/setup_env.sh @@ -317,6 +317,17 @@ add_config_if_not_exist() { grep -qF -- "$config" "$file" || echo "$config" >> "$file" } +# Function to check if a tool is installed +check_installed() { + local command_name="$1" + if command -v "$command_name" >/dev/null 2>&1; then + echo "$command_name is already installed. Skipping installation." + return 0 + else + echo "$command_name is not installed." + return 1 + fi +} # General error handling function check_status() { local message_on_failure="$1" @@ -585,9 +596,12 @@ centos_skip_check() { # Deploy cmake deploy_cmake() { # Check if cmake is installed - if command -v cmake >/dev/null 2>&1; then - echo "Cmake is already installed. Skipping installation." - cmake --version + # if command -v cmake >/dev/null 2>&1; then + # echo "Cmake is already installed. Skipping installation." + # cmake --version + # return + # fi + if check_installed "cmake"; then return fi install_package "cmake3" @@ -1059,11 +1073,13 @@ deploy_go() { GOPATH_DIR="/root/go" # Check if Go is installed - if command -v go >/dev/null 2>&1; then - echo "Go is already installed. Skipping installation." + # if command -v go >/dev/null 2>&1; then + # echo "Go is already installed. Skipping installation." + # return + # fi + if check_installed "gp"; then return fi - # Fetch the latest version number of Go GO_LATEST_DATA=$(curl --retry 10 --retry-delay 5 --retry-max-time 120 -s https://golang.google.cn/VERSION?m=text) GO_LATEST_VERSION=$(echo "$GO_LATEST_DATA" | grep -oP 'go[0-9]+\.[0-9]+\.[0-9]+') @@ -1734,6 +1750,16 @@ deploy_docker_compose() { # Instal trivy install_trivy() { + echo -e "${YELLOW}Installing Trivy...${NO_COLOR}" + # Check if Trivy is already installed + # if command -v trivy >/dev/null 2>&1; then + # echo "Trivy is already installed. Skipping installation." + # trivy --version + # return + # fi + if check_installed "trivy"; then + return + fi # Install jq install_package jq # Get latest version From 1f656c0ef0624d6fdd18c16bf7fa65fd17dc4f81 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Sat, 11 Jan 2025 16:08:35 +0800 Subject: [PATCH 019/120] fix: typo --- packaging/setup_env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/setup_env.sh b/packaging/setup_env.sh index 9a624bfa9e..32451072ab 100644 --- a/packaging/setup_env.sh +++ b/packaging/setup_env.sh @@ -1077,7 +1077,7 @@ deploy_go() { # echo "Go is already installed. Skipping installation." # return # fi - if check_installed "gp"; then + if check_installed "go"; then return fi # Fetch the latest version number of Go From dc4feb8690496fede5c2b5efc4917035b70827ca Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Mon, 13 Jan 2025 15:01:58 +0800 Subject: [PATCH 020/120] improve stmt doc, fix jdbc demo resultset bug --- docs/en/07-develop/05-stmt.md | 7 +++++-- .../src/main/java/com/taosdata/example/JdbcBasicDemo.java | 2 -- .../src/main/java/com/taosdata/example/JdbcDemo.java | 5 +++-- docs/zh/07-develop/05-stmt.md | 7 +++++-- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/docs/en/07-develop/05-stmt.md b/docs/en/07-develop/05-stmt.md index c62918fad7..16fe156cc3 100644 --- a/docs/en/07-develop/05-stmt.md +++ b/docs/en/07-develop/05-stmt.md @@ -19,8 +19,11 @@ When inserting data using parameter binding, it can avoid the resource consumpti We only recommend using the following two forms of SQL for parameter binding data insertion: ```sql - 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) - 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) + a. Subtables already exists: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?) + b. Automatic table creation on insert: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) + 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) ``` ::: diff --git a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java index 0de386447c..0a63504b91 100644 --- a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java +++ b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java @@ -1,6 +1,4 @@ package com.taosdata.example; - -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.AbstractStatement; import java.sql.*; diff --git a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java index ec4adf8db9..7fba500c49 100644 --- a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java +++ b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java @@ -104,8 +104,9 @@ public class JdbcDemo { private void executeQuery(String sql) { long start = System.currentTimeMillis(); - try (Statement statement = connection.createStatement()) { - ResultSet resultSet = statement.executeQuery(sql); + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(sql)) { + long end = System.currentTimeMillis(); printSql(sql, true, (end - start)); Util.printResult(resultSet); diff --git a/docs/zh/07-develop/05-stmt.md b/docs/zh/07-develop/05-stmt.md index f30969cb5b..5f218689be 100644 --- a/docs/zh/07-develop/05-stmt.md +++ b/docs/zh/07-develop/05-stmt.md @@ -19,8 +19,11 @@ import TabItem from "@theme/TabItem"; 我们只推荐使用下面两种形式的 SQL 进行参数绑定写入: ```sql - 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) - 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) + 一、确定子表存在: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?) + 二、自动建表: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) + 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) ``` ::: From 847ce16506490acfc8dce326ea05bfbfe2e7c69c Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 13 Jan 2025 10:31:46 +0800 Subject: [PATCH 021/120] fix stmt/stmt2 unit test --- source/client/test/stmt2Test.cpp | 298 ++++++++++++++++++------------- source/client/test/stmtTest.cpp | 72 +++++--- 2 files changed, 213 insertions(+), 157 deletions(-) diff --git a/source/client/test/stmt2Test.cpp b/source/client/test/stmt2Test.cpp index 1ce844e98f..3e21721c47 100644 --- a/source/client/test/stmt2Test.cpp +++ b/source/client/test/stmt2Test.cpp @@ -112,6 +112,11 @@ void do_query(TAOS* taos, const char* sql) { TAOS_RES* result = taos_query(taos, sql); // printf("sql: %s\n", sql); int code = taos_errno(result); + while (code == TSDB_CODE_MND_DB_IN_CREATING || code == TSDB_CODE_MND_DB_IN_DROPPING) { + taosMsleep(2000); + result = taos_query(taos, sql); + code = taos_errno(result); + } if (code != TSDB_CODE_SUCCESS) { printf("query failen sql : %s\n errstr : %s\n", sql, taos_errstr(result)); ASSERT_EQ(taos_errno(result), TSDB_CODE_SUCCESS); @@ -122,9 +127,9 @@ void do_query(TAOS* taos, const char* sql) { void do_stmt(TAOS* taos, TAOS_STMT2_OPTION* option, const char* sql, int CTB_NUMS, int ROW_NUMS, int CYC_NUMS, bool hastags, bool createTable) { printf("test sql : %s\n", sql); - do_query(taos, "drop database if exists testdb1"); - do_query(taos, "create database IF NOT EXISTS testdb1"); - do_query(taos, "create stable testdb1.stb (ts timestamp, b binary(10)) tags(t1 int, t2 binary(10))"); + do_query(taos, "drop database if exists stmt2_testdb_1"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_1"); + do_query(taos, "create stable stmt2_testdb_1.stb (ts timestamp, b binary(10)) tags(t1 int, t2 binary(10))"); TAOS_STMT2* stmt = taos_stmt2_init(taos, option); ASSERT_NE(stmt, nullptr); @@ -139,7 +144,7 @@ void do_stmt(TAOS* taos, TAOS_STMT2_OPTION* option, const char* sql, int CTB_NUM sprintf(tbs[i], "ctb_%d", i); if (createTable) { char* tmp = (char*)taosMemoryMalloc(sizeof(char) * 100); - sprintf(tmp, "create table testdb1.%s using testdb1.stb tags(0, 'after')", tbs[i]); + sprintf(tmp, "create table stmt2_testdb_1.%s using stmt2_testdb_1.stb tags(0, 'after')", tbs[i]); do_query(taos, tmp); } } @@ -235,14 +240,15 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists testdb2"); - do_query(taos, "create database IF NOT EXISTS testdb2 PRECISION 'ns'"); + do_query(taos, "drop database if exists stmt2_testdb_2"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_2 PRECISION 'ns'"); do_query(taos, - "create stable testdb2.stb (ts timestamp, b binary(10)) tags(t1 " + "create stable stmt2_testdb_2.stb (ts timestamp, b binary(10)) tags(t1 " "int, t2 binary(10))"); do_query( taos, - "create stable if not exists testdb2.all_stb(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, " + "create stable if not exists stmt2_testdb_2.all_stb(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 " + "bigint, " "v6 tinyint unsigned, v7 smallint unsigned, v8 int unsigned, v9 bigint unsigned, v10 float, v11 double, v12 " "binary(20), v13 varbinary(20), v14 geometry(100), v15 nchar(20))tags(tts timestamp, tv1 bool, tv2 tinyint, tv3 " "smallint, tv4 int, tv5 bigint, tv6 tinyint unsigned, tv7 smallint unsigned, tv8 int unsigned, tv9 bigint " @@ -251,7 +257,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 1 : test super table { - const char* sql = "insert into testdb2.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, {"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, @@ -263,7 +269,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { { // case 2 : no tag - const char* sql = "insert into testdb2.stb(ts,b,tbname) values(?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(ts,b,tbname) values(?,?,?)"; TAOS_FIELD_ALL expectedFields[3] = {{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, {"b", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_COL}, {"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}}; @@ -273,7 +279,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 3 : random order { - const char* sql = "insert into testdb2.stb(tbname,ts,t2,b,t1) values(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(tbname,ts,t2,b,t1) values(?,?,?,?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, @@ -285,7 +291,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 4 : random order 2 { - const char* sql = "insert into testdb2.stb(ts,tbname,b,t2,t1) values(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(ts,tbname,b,t2,t1) values(?,?,?,?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, {"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"b", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_COL}, @@ -297,7 +303,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 5 : 'db'.'stb' { - const char* sql = "insert into 'testdb2'.'stb'(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; + const char* sql = "insert into 'stmt2_testdb_2'.'stb'(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, {"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, @@ -309,7 +315,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 6 : use db { - do_query(taos, "use testdb2"); + do_query(taos, "use stmt2_testdb_2"); const char* sql = "insert into stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, @@ -322,7 +328,7 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 7 : less param { - const char* sql = "insert into testdb2.stb(ts,tbname) values(?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(ts,tbname) values(?,?)"; TAOS_FIELD_ALL expectedFields[2] = {{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, {"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}}; printf("case 7 : %s\n", sql); @@ -378,67 +384,68 @@ TEST(stmt2Case, insert_stb_get_fields_Test) { // case 1 : add in main TD-33353 { - const char* sql = "insert into testdb2.stb(t1,t2,ts,b,tbname) values(1,?,?,'abc',?)"; + const char* sql = "insert into stmt2_testdb_2.stb(t1,t2,ts,b,tbname) values(1,?,?,'abc',?)"; printf("case 1dif : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); } // case 2 : no pk { - const char* sql = "insert into testdb2.stb(b,tbname) values(?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(b,tbname) values(?,?)"; printf("case 2 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION); } // case 3 : no tbname and tag(not support bind) { - const char* sql = "insert into testdb2.stb(ts,b) values(?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(ts,b) values(?,?)"; printf("case 3 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION); } // case 4 : no col and tag(not support bind) { - const char* sql = "insert into testdb2.stb(tbname) values(?)"; + const char* sql = "insert into stmt2_testdb_2.stb(tbname) values(?)"; printf("case 4 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION); } // case 5 : no field name { - const char* sql = "insert into testdb2.stb(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(?,?,?,?,?)"; printf("case 5 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_SYNTAX_ERROR); } // case 6 : test super table not exist { - const char* sql = "insert into testdb2.nstb(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.nstb(?,?,?,?,?)"; printf("case 6 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_SYNTAX_ERROR); } // case 7 : no col { - const char* sql = "insert into testdb2.stb(t1,t2,tbname) values(?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(t1,t2,tbname) values(?,?,?)"; printf("case 7 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION); } // case 8 : wrong para nums { - const char* sql = "insert into testdb2.stb(ts,b,tbname) values(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_2.stb(ts,b,tbname) values(?,?,?,?,?)"; printf("case 8 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); } // case 9 : wrong simbol { - const char* sql = "insert into testdb2.stb(t1,t2,ts,b,tbname) values(*,*,*,*,*)"; + const char* sql = "insert into stmt2_testdb_2.stb(t1,t2,ts,b,tbname) values(*,*,*,*,*)"; printf("case 9 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); } + do_query(taos, "drop database if exists stmt2_testdb_2"); taos_close(taos); } @@ -446,24 +453,25 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists testdb3"); - do_query(taos, "create database IF NOT EXISTS testdb3 PRECISION 'ns'"); + do_query(taos, "drop database if exists stmt2_testdb_3"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_3 PRECISION 'ns'"); do_query(taos, - "create stable testdb3.stb (ts timestamp, b binary(10)) tags(t1 " + "create stable stmt2_testdb_3.stb (ts timestamp, b binary(10)) tags(t1 " "int, t2 binary(10))"); do_query( taos, - "create stable if not exists testdb3.all_stb(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, " + "create stable if not exists stmt2_testdb_3.all_stb(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 " + "bigint, " "v6 tinyint unsigned, v7 smallint unsigned, v8 int unsigned, v9 bigint unsigned, v10 float, v11 double, v12 " "binary(20), v13 varbinary(20), v14 geometry(100), v15 nchar(20))tags(tts timestamp, tv1 bool, tv2 tinyint, tv3 " "smallint, tv4 int, tv5 bigint, tv6 tinyint unsigned, tv7 smallint unsigned, tv8 int unsigned, tv9 bigint " "unsigned, tv10 float, tv11 double, tv12 binary(20), tv13 varbinary(20), tv14 geometry(100), tv15 nchar(20));"); - do_query(taos, "CREATE TABLE testdb3.t0 USING testdb3.stb (t1,t2) TAGS (7,'Cali');"); + do_query(taos, "CREATE TABLE stmt2_testdb_3.t0 USING stmt2_testdb_3.stb (t1,t2) TAGS (7,'Cali');"); printf("support case \n"); // case 1 : test child table already exist { - const char* sql = "INSERT INTO testdb3.t0(ts,b)using testdb3.stb (t1,t2) TAGS(?,?) VALUES (?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.t0(ts,b)using stmt2_testdb_3.stb (t1,t2) TAGS(?,?) VALUES (?,?)"; TAOS_FIELD_ALL expectedFields[4] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, {"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, @@ -474,7 +482,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 2 : insert clause { - const char* sql = "INSERT INTO testdb3.? using testdb3.stb (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.? using stmt2_testdb_3.stb (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, @@ -486,7 +494,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 3 : insert child table not exist { - const char* sql = "INSERT INTO testdb3.d1 using testdb3.stb (t1,t2)TAGS(?,?) (ts,b)VALUES(?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.d1 using stmt2_testdb_3.stb (t1,t2)TAGS(?,?) (ts,b)VALUES(?,?)"; TAOS_FIELD_ALL expectedFields[4] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, {"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}, @@ -497,7 +505,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 4 : random order { - const char* sql = "INSERT INTO testdb3.? using testdb3.stb (t2,t1)TAGS(?,?) (b,ts)VALUES(?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.? using stmt2_testdb_3.stb (t2,t1)TAGS(?,?) (b,ts)VALUES(?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, {"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, @@ -509,7 +517,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 5 : less para { - const char* sql = "insert into testdb3.? using testdb3.stb (t2)tags(?) (ts)values(?)"; + const char* sql = "insert into stmt2_testdb_3.? using stmt2_testdb_3.stb (t2)tags(?) (ts)values(?)"; TAOS_FIELD_ALL expectedFields[3] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, {"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}}; @@ -520,7 +528,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 6 : insert into db.? using db.stb tags(?, ?) values(?,?) // no field name { - const char* sql = "insert into testdb3.? using testdb3.stb tags(?, ?) values(?,?)"; + const char* sql = "insert into stmt2_testdb_3.? using stmt2_testdb_3.stb tags(?, ?) values(?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, @@ -533,7 +541,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 7 : insert into db.d0 (ts)values(?) // less para { - const char* sql = "insert into testdb3.t0 (ts)values(?)"; + const char* sql = "insert into stmt2_testdb_3.t0 (ts)values(?)"; TAOS_FIELD_ALL expectedFields[1] = {{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL}}; printf("case 7 : %s\n", sql); getFieldsSuccess(taos, sql, expectedFields, 1); @@ -541,7 +549,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 8 : 'db' 'stb' { - const char* sql = "INSERT INTO 'testdb3'.? using 'testdb3'.'stb' (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)"; + const char* sql = "INSERT INTO 'stmt2_testdb_3'.? using 'stmt2_testdb_3'.'stb' (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, {"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG}, @@ -553,7 +561,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 9 : use db { - do_query(taos, "use testdb3"); + do_query(taos, "use stmt2_testdb_3"); const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)"; TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME}, {"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG}, @@ -608,38 +616,40 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) { // case 1 : test super table not exist { - const char* sql = "INSERT INTO testdb3.?(ts,b)using testdb3.nstb (t1,t2) TAGS(?,?) VALUES (?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.?(ts,b)using stmt2_testdb_3.nstb (t1,t2) TAGS(?,?) VALUES (?,?)"; printf("case 1 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_SYNTAX_ERROR); } // case 2 : no pk { - const char* sql = "INSERT INTO testdb3.?(ts,b)using testdb3.nstb (t1,t2) TAGS(?,?) (n)VALUES (?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.?(ts,b)using stmt2_testdb_3.nstb (t1,t2) TAGS(?,?) (n)VALUES (?)"; printf("case 2 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_SYNTAX_ERROR); } // case 3 : less param and no filed name { - const char* sql = "INSERT INTO testdb3.?(ts,b)using testdb3.stb TAGS(?)VALUES (?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.?(ts,b)using stmt2_testdb_3.stb TAGS(?)VALUES (?,?)"; printf("case 3 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_SYNTAX_ERROR); } // case 4 : none para for ctbname { - const char* sql = "INSERT INTO testdb3.d0 using testdb3.stb values(?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_3.d0 using stmt2_testdb_3.stb values(?,?)"; printf("case 4 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_TSC_SQL_SYNTAX_ERROR); } // case 5 : none para for ctbname { - const char* sql = "insert into ! using testdb3.stb tags(?, ?) values(?,?)"; + const char* sql = "insert into ! using stmt2_testdb_3.stb tags(?, ?) values(?,?)"; printf("case 5 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_TSC_SQL_SYNTAX_ERROR); } + + do_query(taos, "drop database if exists stmt2_testdb_3"); taos_close(taos); } @@ -647,19 +657,20 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists testdb4"); - do_query(taos, "create database IF NOT EXISTS testdb4 PRECISION 'ms'"); - do_query(taos, "CREATE TABLE testdb4.ntb(nts timestamp, nb binary(10),nvc varchar(16),ni int);"); - do_query(taos, - "create table if not exists testdb4.all_ntb(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 " - "bigint, v6 tinyint unsigned, v7 smallint unsigned, v8 int unsigned, v9 bigint unsigned, v10 float, v11 " - "double, v12 binary(20), v13 varbinary(20), v14 geometry(100), v15 nchar(20));"); + do_query(taos, "drop database if exists stmt2_testdb_4"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_4 PRECISION 'ms'"); + do_query(taos, "CREATE TABLE stmt2_testdb_4.ntb(nts timestamp, nb binary(10),nvc varchar(16),ni int);"); + do_query( + taos, + "create table if not exists stmt2_testdb_4.all_ntb(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 " + "bigint, v6 tinyint unsigned, v7 smallint unsigned, v8 int unsigned, v9 bigint unsigned, v10 float, v11 " + "double, v12 binary(20), v13 varbinary(20), v14 geometry(100), v15 nchar(20));"); printf("support case \n"); // case 1 : test normal table no field name { - const char* sql = "INSERT INTO testdb4.ntb VALUES(?,?,?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_4.ntb VALUES(?,?,?,?)"; TAOS_FIELD_ALL expectedFields[4] = {{"nts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0, 8, TAOS_FIELD_COL}, {"nb", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_COL}, {"nvc", TSDB_DATA_TYPE_BINARY, 0, 0, 18, TAOS_FIELD_COL}, @@ -670,7 +681,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { // case 2 : test random order { - const char* sql = "INSERT INTO testdb4.ntb (ni,nb,nvc,nts)VALUES(?,?,?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_4.ntb (ni,nb,nvc,nts)VALUES(?,?,?,?)"; TAOS_FIELD_ALL expectedFields[4] = {{"ni", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_COL}, {"nb", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_COL}, {"nvc", TSDB_DATA_TYPE_BINARY, 0, 0, 18, TAOS_FIELD_COL}, @@ -681,7 +692,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { // case 3 : less param { - const char* sql = "INSERT INTO testdb4.ntb (nts)VALUES(?)"; + const char* sql = "INSERT INTO stmt2_testdb_4.ntb (nts)VALUES(?)"; TAOS_FIELD_ALL expectedFields[1] = {{"nts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0, 8, TAOS_FIELD_COL}}; printf("case 3 : %s\n", sql); getFieldsSuccess(taos, sql, expectedFields, 1); @@ -689,7 +700,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { // case 4 : test all types { - const char* sql = "insert into testdb4.all_ntb values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_4.all_ntb values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; TAOS_FIELD_ALL expectedFields[16] = {{"ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0, 8, TAOS_FIELD_COL}, {"v1", TSDB_DATA_TYPE_BOOL, 0, 0, 1, TAOS_FIELD_COL}, {"v2", TSDB_DATA_TYPE_TINYINT, 0, 0, 1, TAOS_FIELD_COL}, @@ -721,26 +732,29 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { // case 2 : normal table must have tbnam { - const char* sql = "insert into testdb4.? values(?,?)"; + const char* sql = "insert into stmt2_testdb_4.? values(?,?)"; printf("case 2 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_TABLE_NOT_EXIST); } // case 3 : wrong para nums { - const char* sql = "insert into testdb4.ntb(nts,ni) values(?,?,?,?,?)"; + const char* sql = "insert into stmt2_testdb_4.ntb(nts,ni) values(?,?,?,?,?)"; printf("case 3 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); } + + do_query(taos, "drop database if exists stmt2_testdb_4"); + taos_close(taos); } TEST(stmt2Case, select_get_fields_Test) { TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists testdb5"); - do_query(taos, "create database IF NOT EXISTS testdb5 PRECISION 'ns'"); - do_query(taos, "use testdb5"); - do_query(taos, "CREATE TABLE testdb5.ntb(nts timestamp, nb binary(10),nvc varchar(16),ni int);"); + do_query(taos, "drop database if exists stmt2_testdb_5"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_5 PRECISION 'ns'"); + do_query(taos, "use stmt2_testdb_5"); + do_query(taos, "CREATE TABLE stmt2_testdb_5.ntb(nts timestamp, nb binary(10),nvc varchar(16),ni int);"); { // case 1 : const char* sql = "select * from ntb where ts = ?"; @@ -761,6 +775,8 @@ TEST(stmt2Case, select_get_fields_Test) { printf("case 3 : %s\n", sql); getFieldsError(taos, sql, TSDB_CODE_PAR_SYNTAX_ERROR); } + + do_query(taos, "drop database if exists stmt2_testdb_5"); taos_close(taos); } @@ -797,9 +813,9 @@ TEST(stmt2Case, stmt2_init_prepare_Test) { ASSERT_NE(stmt, nullptr); ASSERT_EQ(((STscStmt2*)stmt)->db, nullptr); - code = taos_stmt2_prepare(stmt, "insert into 'testdb5'.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)", 0); + code = taos_stmt2_prepare(stmt, "insert into 'stmt2_testdb_5'.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)", 0); ASSERT_NE(stmt, nullptr); - ASSERT_STREQ(((STscStmt2*)stmt)->db, "testdb5"); // add in main TD-33332 + ASSERT_STREQ(((STscStmt2*)stmt)->db, "stmt2_testdb_5"); // add in main TD-33332 taos_stmt2_close(stmt); } @@ -824,22 +840,28 @@ TEST(stmt2Case, stmt2_stb_insert) { ASSERT_NE(taos, nullptr); // normal TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL}; - { do_stmt(taos, &option, "insert into `testdb1`.`stb` (tbname,ts,b,t1,t2) values(?,?,?,?,?)", 3, 3, 3, true, true); } { - do_stmt(taos, &option, "insert into `testdb1`.? using `testdb1`.`stb` tags(?,?) values(?,?)", 3, 3, 3, true, true); + do_stmt(taos, &option, "insert into `stmt2_testdb_1`.`stb` (tbname,ts,b,t1,t2) values(?,?,?,?,?)", 3, 3, 3, true, + true); + } + { + do_stmt(taos, &option, "insert into `stmt2_testdb_1`.? using `stmt2_testdb_1`.`stb` tags(?,?) values(?,?)", 3, 3, 3, + true, true); } // async option = {0, true, true, stmtAsyncQueryCb, NULL}; - { do_stmt(taos, &option, "insert into testdb1.stb (ts,b,tbname,t1,t2) values(?,?,?,?,?)", 3, 3, 3, true, true); } { - do_stmt(taos, &option, "insert into testdb1.? using testdb1.stb (t1,t2)tags(?,?) (ts,b)values(?,?)", 3, 3, 3, true, - true); + do_stmt(taos, &option, "insert into stmt2_testdb_1.stb (ts,b,tbname,t1,t2) values(?,?,?,?,?)", 3, 3, 3, true, true); + } + { + do_stmt(taos, &option, "insert into stmt2_testdb_1.? using stmt2_testdb_1.stb (t1,t2)tags(?,?) (ts,b)values(?,?)", + 3, 3, 3, true, true); } // { do_stmt(taos, &option, "insert into db.? values(?,?)", 3, 3, 3, false, true); } // interlace = 0 & use db] - do_query(taos, "use testdb1"); + do_query(taos, "use stmt2_testdb_1"); option = {0, false, false, NULL, NULL}; { do_stmt(taos, &option, "insert into stb (tbname,ts,b) values(?,?,?)", 3, 3, 3, false, true); } { do_stmt(taos, &option, "insert into ? using stb (t1,t2)tags(?,?) (ts,b)values(?,?)", 3, 3, 3, true, true); } @@ -851,6 +873,7 @@ TEST(stmt2Case, stmt2_stb_insert) { option = {0, true, true, NULL, NULL}; { do_stmt(taos, &option, "insert into ? values(?,?)", 3, 3, 3, false, true); } + do_query(taos, "drop database if exists stmt2_testdb_1"); taos_close(taos); } @@ -858,10 +881,10 @@ TEST(stmt2Case, stmt2_stb_insert) { TEST(stmt2Case, stmt2_insert_non_statndard) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists example_all_type_stmt1"); - do_query(taos, "create database IF NOT EXISTS example_all_type_stmt1"); + do_query(taos, "drop database if exists stmt2_testdb_6"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_6"); do_query(taos, - "create stable example_all_type_stmt1.stb1 (ts timestamp, int_col int,long_col bigint,double_col " + "create stable stmt2_testdb_6.stb1 (ts timestamp, int_col int,long_col bigint,double_col " "double,bool_col bool,binary_col binary(20),nchar_col nchar(20),varbinary_col varbinary(20),geometry_col " "geometry(200)) tags(int_tag int,long_tag bigint,double_tag double,bool_tag bool,binary_tag " "binary(20),nchar_tag nchar(20),varbinary_tag varbinary(20),geometry_tag geometry(200));"); @@ -872,7 +895,7 @@ TEST(stmt2Case, stmt2_insert_non_statndard) { { TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); - const char* sql = "INSERT INTO example_all_type_stmt1.stb1 (ts,int_tag,tbname) VALUES (?,?,?)"; + const char* sql = "INSERT INTO stmt2_testdb_6.stb1 (ts,int_tag,tbname) VALUES (?,?,?)"; int code = taos_stmt2_prepare(stmt, sql, 0); checkError(stmt, code); int total_affect_rows = 0; @@ -912,9 +935,8 @@ TEST(stmt2Case, stmt2_insert_non_statndard) { { TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); - const char* sql = - "INSERT INTO example_all_type_stmt1.stb1 (binary_tag,int_col,tbname,ts,int_tag) VALUES (?,?,?,?,?)"; - int code = taos_stmt2_prepare(stmt, sql, 0); + const char* sql = "INSERT INTO stmt2_testdb_6.stb1 (binary_tag,int_col,tbname,ts,int_tag) VALUES (?,?,?,?,?)"; + int code = taos_stmt2_prepare(stmt, sql, 0); checkError(stmt, code); int tag_i = 0; @@ -954,6 +976,7 @@ TEST(stmt2Case, stmt2_insert_non_statndard) { taos_stmt2_close(stmt); } + do_query(taos, "drop database if exists stmt2_testdb_6"); taos_close(taos); } @@ -961,10 +984,10 @@ TEST(stmt2Case, stmt2_insert_non_statndard) { TEST(stmt2Case, stmt2_insert_db) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists example_all_type_stmt1"); - do_query(taos, "create database IF NOT EXISTS example_all_type_stmt1"); + do_query(taos, "drop database if exists stmt2_testdb_12"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_12"); do_query(taos, - "create stable `example_all_type_stmt1`.`stb1` (ts timestamp, int_col int,long_col bigint,double_col " + "create stable `stmt2_testdb_12`.`stb1` (ts timestamp, int_col int,long_col bigint,double_col " "double,bool_col bool,binary_col binary(20),nchar_col nchar(20),varbinary_col varbinary(20),geometry_col " "geometry(200)) tags(int_tag int,long_tag bigint,double_tag double,bool_tag bool,binary_tag " "binary(20),nchar_tag nchar(20),varbinary_tag varbinary(20),geometry_tag geometry(200));"); @@ -973,7 +996,7 @@ TEST(stmt2Case, stmt2_insert_db) { TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); - const char* sql = "INSERT INTO `example_all_type_stmt1`.`stb1` (ts,int_tag,tbname) VALUES (?,?,?)"; + const char* sql = "INSERT INTO `stmt2_testdb_12`.`stb1` (ts,int_tag,tbname) VALUES (?,?,?)"; int code = taos_stmt2_prepare(stmt, sql, 0); checkError(stmt, code); @@ -1006,38 +1029,38 @@ TEST(stmt2Case, stmt2_insert_db) { ASSERT_EQ(total_affect_rows, 12); taos_stmt2_close(stmt); + do_query(taos, "drop database if exists stmt2_testdb_12"); taos_close(taos); } TEST(stmt2Case, stmt2_query) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists testdb7"); - do_query(taos, "create database IF NOT EXISTS testdb7"); - do_query(taos, "create stable testdb7.stb (ts timestamp, b binary(10)) tags(t1 int, t2 binary(10))"); + do_query(taos, "drop database if exists stmt2_testdb_7"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_7"); + do_query(taos, "create stable stmt2_testdb_7.stb (ts timestamp, b binary(10)) tags(t1 int, t2 binary(10))"); do_query(taos, - "insert into testdb7.tb1 using testdb7.stb tags(1,'abc') values(1591060628000, " + "insert into stmt2_testdb_7.tb1 using stmt2_testdb_7.stb tags(1,'abc') values(1591060628000, " "'abc'),(1591060628001,'def'),(1591060628002, 'hij')"); do_query(taos, - "insert into testdb7.tb2 using testdb7.stb tags(2,'xyz') values(1591060628000, " - "'abc'),(1591060628001,'def'),(1591060628002, 'hij')"); - do_query(taos, "use testdb7"); + "insert into stmt2_testdb_7.tb2 using stmt2_testdb_7.stb tags(2,'xyz') values(1591060628000, " + "'abc'),(1591060628001,'def'),(1591060628004, 'hij')"); + do_query(taos, "use stmt2_testdb_7"); TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL}; TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); - const char* sql = "select * from testdb7.stb where ts = ? and tbname = ?"; + const char* sql = "select * from stmt2_testdb_7.stb where ts = ?"; int code = taos_stmt2_prepare(stmt, sql, 0); checkError(stmt, code); int t64_len[1] = {sizeof(int64_t)}; int b_len[1] = {3}; int64_t ts = 1591060628000; - TAOS_STMT2_BIND params[2] = {{TSDB_DATA_TYPE_TIMESTAMP, &ts, t64_len, NULL, 1}, - {TSDB_DATA_TYPE_BINARY, (void*)"tb1", b_len, NULL, 1}}; - TAOS_STMT2_BIND* paramv = ¶ms[0]; + TAOS_STMT2_BIND params = {TSDB_DATA_TYPE_TIMESTAMP, &ts, t64_len, NULL, 1}; + TAOS_STMT2_BIND* paramv = ¶ms; TAOS_STMT2_BINDV bindv = {1, NULL, NULL, ¶mv}; code = taos_stmt2_bind_param(stmt, &bindv, -1); checkError(stmt, code); @@ -1048,15 +1071,31 @@ TEST(stmt2Case, stmt2_query) { TAOS_RES* pRes = taos_stmt2_result(stmt); ASSERT_NE(pRes, nullptr); - int getRecordCounts = 0; - TAOS_ROW row; - while ((row = taos_fetch_row(pRes))) { + int getRecordCounts = 0; + while ((taos_fetch_row(pRes))) { + getRecordCounts++; + } + ASSERT_EQ(getRecordCounts, 2); + // test 1 result + ts = 1591060628004; + params = {TSDB_DATA_TYPE_TIMESTAMP, &ts, t64_len, NULL, 1}; + code = taos_stmt2_bind_param(stmt, &bindv, -1); + checkError(stmt, code); + + taos_stmt2_exec(stmt, NULL); + checkError(stmt, code); + + pRes = taos_stmt2_result(stmt); + ASSERT_NE(pRes, nullptr); + + getRecordCounts = 0; + while ((taos_fetch_row(pRes))) { getRecordCounts++; } ASSERT_EQ(getRecordCounts, 1); // taos_free_result(pRes); - taos_stmt2_close(stmt); + do_query(taos, "drop database if exists stmt2_testdb_7"); taos_close(taos); } @@ -1064,16 +1103,16 @@ TEST(stmt2Case, stmt2_ntb_insert) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); ASSERT_NE(taos, nullptr); TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL}; - do_query(taos, "drop database if exists testdb8"); - do_query(taos, "create database IF NOT EXISTS testdb8"); - do_query(taos, "create table testdb8.ntb(ts timestamp, b binary(10))"); - do_query(taos, "use testdb8"); + do_query(taos, "drop database if exists stmt2_testdb_8"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_8"); + do_query(taos, "create table stmt2_testdb_8.ntb(ts timestamp, b binary(10))"); + do_query(taos, "use stmt2_testdb_8"); TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); int total_affected_rows = 0; - const char* sql = "insert into testdb8.ntb values(?,?)"; + const char* sql = "insert into stmt2_testdb_8.ntb values(?,?)"; int code = taos_stmt2_prepare(stmt, sql, 0); checkError(stmt, code); for (int i = 0; i < 3; i++) { @@ -1101,6 +1140,7 @@ TEST(stmt2Case, stmt2_ntb_insert) { ASSERT_EQ(total_affected_rows, 9); taos_stmt2_close(stmt); + do_query(taos, "drop database if exists stmt2_testdb_8"); taos_close(taos); } @@ -1125,7 +1165,7 @@ TEST(stmt2Case, stmt2_status_Test) { ASSERT_EQ(code, TSDB_CODE_TSC_STMT_API_ERROR); ASSERT_STREQ(taos_stmt2_error(stmt), "Stmt API usage error"); - const char* sql = "insert into testdb9.ntb values(?,?)"; + const char* sql = "insert into stmt2_testdb_9.ntb values(?,?)"; code = taos_stmt2_prepare(stmt, sql, 0); ASSERT_EQ(code, TSDB_CODE_TSC_STMT_API_ERROR); ASSERT_STREQ(taos_stmt2_error(stmt), "Stmt API usage error"); @@ -1136,9 +1176,9 @@ TEST(stmt2Case, stmt2_status_Test) { TEST(stmt2Case, stmt2_nchar) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); - do_query(taos, "drop database if exists testdb10;"); - do_query(taos, "create database IF NOT EXISTS testdb10;"); - do_query(taos, "use testdb10;"); + do_query(taos, "drop database if exists stmt2_testdb_10;"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_10;"); + do_query(taos, "use stmt2_testdb_10;"); do_query(taos, "create table m1 (ts timestamp, blob2 nchar(10), blob nchar(10),blob3 nchar(10),blob4 nchar(10),blob5 " "nchar(10))"); @@ -1244,6 +1284,7 @@ TEST(stmt2Case, stmt2_nchar) { ASSERT_EQ(affected_rows, 10); taos_stmt2_close(stmt); + do_query(taos, "drop database if exists stmt2_testdb_10;"); taos_close(taos); taosMemoryFree(blob_len); taosMemoryFree(blob_len2); @@ -1256,11 +1297,12 @@ TEST(stmt2Case, all_type) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); ASSERT_NE(taos, nullptr); - do_query(taos, "drop database if exists testdb11"); - do_query(taos, "create database IF NOT EXISTS testdb11"); + do_query(taos, "drop database if exists stmt2_testdb_11"); + do_query(taos, "create database IF NOT EXISTS stmt2_testdb_11"); do_query( taos, - "create stable testdb11.stb(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 smallint, c7 " + "create stable stmt2_testdb_11.stb(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 " + "smallint, c7 " "tinyint, c8 bool, c9 nchar(8), c10 geometry(256))TAGS(tts timestamp, t1 int, t2 bigint, t3 float, t4 double, t5 " "binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8), t10 geometry(256))"); @@ -1370,7 +1412,7 @@ TEST(stmt2Case, all_type) { params[10].is_null = NULL; params[10].num = 1; - char* stmt_sql = "insert into testdb11.? using stb tags(?,?,?,?,?,?,?,?,?,?,?)values (?,?,?,?,?,?,?,?,?,?,?)"; + char* stmt_sql = "insert into stmt2_testdb_11.? using stb tags(?,?,?,?,?,?,?,?,?,?,?)values (?,?,?,?,?,?,?,?,?,?,?)"; code = taos_stmt2_prepare(stmt, stmt_sql, 0); checkError(stmt, code); @@ -1388,6 +1430,7 @@ TEST(stmt2Case, all_type) { geosFreeBuffer(outputGeom1); taos_stmt2_close(stmt); + do_query(taos, "drop database if exists stmt2_testdb_11"); taos_close(taos); } @@ -1395,31 +1438,29 @@ TEST(stmt2Case, geometry) { TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "DROP DATABASE IF EXISTS testdb15"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS testdb15"); - do_query(taos, "CREATE TABLE testdb15.tb1(ts timestamp,c1 geometry(256))"); + do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt2_testdb_13"); + do_query(taos, "CREATE TABLE stmt2_testdb_13.tb1(ts timestamp,c1 geometry(256))"); TAOS_STMT2_OPTION option = {0}; TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); - unsigned char wkb1[] = { // 1 - 0x01, // 字节顺序:小端字节序 - 0x01, 0x00, 0x00, 0x00, // 几何类型:Point (1) - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F, // p1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, // p2 - // 2 - 0x01, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, - 0x3f, - // 3 - 0x01, - 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, - 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x40}; + unsigned char wkb1[] = { + // 1 + 0x01, // 字节顺序:小端字节序 + 0x01, 0x00, 0x00, 0x00, // 几何类型:Point (1) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F, // p1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, // p2 + // 2 + 0x01, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xf0, 0x3f, + // 3 + 0x01, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x40}; // unsigned char* wkb_all[3]{&wkb1[0], &wkb2[0], &wkb3[0]}; int32_t wkb_len[3] = {21, 61, 41}; @@ -1440,7 +1481,7 @@ TEST(stmt2Case, geometry) { params[1].is_null = NULL; params[1].num = 3; - char* stmt_sql = "insert into testdb15.tb1 (ts,c1)values(?,?)"; + char* stmt_sql = "insert into stmt2_testdb_13.tb1 (ts,c1)values(?,?)"; int code = taos_stmt2_prepare(stmt, stmt_sql, 0); checkError(stmt, code); @@ -1455,6 +1496,7 @@ TEST(stmt2Case, geometry) { ASSERT_EQ(affected_rows, 3); taos_stmt2_close(stmt); + do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13"); taos_close(taos); } #pragma GCC diagnostic pop diff --git a/source/client/test/stmtTest.cpp b/source/client/test/stmtTest.cpp index 510322ca87..77130e41db 100644 --- a/source/client/test/stmtTest.cpp +++ b/source/client/test/stmtTest.cpp @@ -52,6 +52,11 @@ void do_query(TAOS *taos, const char *sql) { TAOS_RES *result = taos_query(taos, sql); // printf("sql: %s\n", sql); int code = taos_errno(result); + while (code == TSDB_CODE_MND_DB_IN_CREATING || code == TSDB_CODE_MND_DB_IN_DROPPING) { + taosMsleep(2000); + result = taos_query(taos, sql); + code = taos_errno(result); + } if (code != TSDB_CODE_SUCCESS) { printf("query failen sql : %s\n errstr : %s\n", sql, taos_errstr(result)); ASSERT_EQ(taos_errno(result), TSDB_CODE_SUCCESS); @@ -69,12 +74,13 @@ typedef struct { void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_NUMS, int ROW_NUMS, int CYC_NUMS, bool isCreateTable) { // create database and table - do_query(taos, "DROP DATABASE IF EXISTS testdb2"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS testdb2"); - do_query(taos, - "CREATE STABLE IF NOT EXISTS testdb2.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " - "(groupId INT, location BINARY(24))"); - do_query(taos, "USE testdb2"); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_2"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_2"); + do_query( + taos, + "CREATE STABLE IF NOT EXISTS stmt_testdb_2.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); + do_query(taos, "USE stmt_testdb_2"); // init TAOS_STMT *stmt; @@ -173,7 +179,7 @@ void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_ for (int j = 0; j < ROW_NUMS; j++) { struct timeval tv; (&tv, NULL); - int64_t ts = 1591060628000 + j + k * 100; + int64_t ts = 1591060628000 + j + k * 100000; float current = (float)0.0001f * j; int voltage = j; float phase = (float)0.0001f * j; @@ -207,12 +213,13 @@ void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_E *expectedTagFields, int expectedTagFieldNum, TAOS_FIELD_E *expectedColFields, int expectedColFieldNum) { // create database and table - do_query(taos, "DROP DATABASE IF EXISTS testdb3"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS testdb3"); - do_query(taos, "USE testdb3"); - do_query(taos, - "CREATE STABLE IF NOT EXISTS testdb3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " - "(groupId INT, location BINARY(24))"); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3"); + do_query(taos, "USE stmt_testdb_3"); + do_query( + taos, + "CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); TAOS_STMT *stmt = taos_stmt_init(taos); ASSERT_NE(stmt, nullptr); @@ -271,7 +278,7 @@ TEST(stmtCase, stb_insert) { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); // interlace = 0 - { insertData(taos, nullptr, "INSERT INTO testdb2.? USING meters TAGS(?,?) VALUES (?,?,?,?)", 1, 1, 1, false); } + { insertData(taos, nullptr, "INSERT INTO stmt_testdb_2.? USING meters TAGS(?,?) VALUES (?,?,?,?)", 1, 1, 1, false); } { insertData(taos, nullptr, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 3, 3, 3, false); } @@ -283,6 +290,7 @@ TEST(stmtCase, stb_insert) { insertData(taos, &options, "INSERT INTO ? VALUES (?,?,?,?)", 3, 3, 3, true); } + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_2"); taos_close(taos); } @@ -299,18 +307,20 @@ TEST(stmtCase, get_fields) { {"phase", TSDB_DATA_TYPE_FLOAT, 0, 0, sizeof(float)}}; getFields(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 7, &tagFields[0], 2, &colFields[0], 4); } + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); taos_close(taos); } -/* + TEST(stmtCase, all_type) { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "DROP DATABASE IF EXISTS testdb1"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS testdb1"); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_1"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_1"); do_query( taos, - "CREATE STABLE testdb1.stb(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 smallint, c7 " + "CREATE STABLE stmt_testdb_1.stb1(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 " + "smallint, c7 " "tinyint, c8 bool, c9 nchar(8), c10 geometry(100))TAGS(tts timestamp, t1 int, t2 bigint, t3 float, t4 double, t5 " "binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8), t10 geometry(100))"); @@ -418,18 +428,22 @@ TEST(stmtCase, all_type) { params[9].is_null = NULL; params[9].num = 1; + size_t size; + int code = initCtxGeomFromText(); + checkError(stmt, code); + unsigned char *outputGeom1; - size_t size1; - initCtxMakePoint(); - int code = doMakePoint(1.000, 2.000, &outputGeom1, &size1); + const char *wkt = "LINESTRING(1.0 1.0, 2.0 2.0)"; + code = doGeomFromText(wkt, &outputGeom1, &size); checkError(stmt, code); params[10].buffer_type = TSDB_DATA_TYPE_GEOMETRY; params[10].buffer = outputGeom1; - params[10].length = (int32_t *)&size1; + params[9].buffer_length = size; + params[10].length = (int32_t *)&size; params[10].is_null = NULL; params[10].num = 1; - char *stmt_sql = "insert into testdb1.? using stb tags(?,?,?,?,?,?,?,?,?,?,?)values (?,?,?,?,?,?,?,?,?,?,?)"; + char *stmt_sql = "insert into stmt_testdb_1.? using stb1 tags(?,?,?,?,?,?,?,?,?,?,?)values (?,?,?,?,?,?,?,?,?,?,?)"; code = taos_stmt_prepare(stmt, stmt_sql, 0); checkError(stmt, code); @@ -449,17 +463,17 @@ TEST(stmtCase, all_type) { checkError(stmt, code); taos_stmt_close(stmt); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_1"); taos_close(taos); } -*/ TEST(stmtCase, geometry) { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); - do_query(taos, "DROP DATABASE IF EXISTS testdb5"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS testdb5"); - do_query(taos, "CREATE TABLE testdb5.tb1(ts timestamp,c1 geometry(256))"); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_5"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_5"); + do_query(taos, "CREATE TABLE stmt_testdb_5.tb1(ts timestamp,c1 geometry(256))"); TAOS_STMT *stmt = taos_stmt_init(taos); ASSERT_NE(stmt, nullptr); @@ -468,7 +482,6 @@ TEST(stmtCase, geometry) { 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, }, - // {0x01, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, @@ -503,7 +516,7 @@ TEST(stmtCase, geometry) { params[1].is_null = NULL; params[1].num = 3; - char *stmt_sql = "insert into testdb5.tb1 (ts,c1)values(?,?)"; + char *stmt_sql = "insert into stmt_testdb_5.tb1 (ts,c1)values(?,?)"; int code = taos_stmt_prepare(stmt, stmt_sql, 0); checkError(stmt, code); @@ -522,6 +535,7 @@ TEST(stmtCase, geometry) { taosMemoryFree(t64_len); taosMemoryFree(wkb_len); taos_stmt_close(stmt); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_5"); taos_close(taos); } From 9729b8b9dce16867cfaa1c6c803a6739a25e6e69 Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 13 Jan 2025 15:51:15 +0800 Subject: [PATCH 022/120] fix other async create database case --- source/client/test/clientTests.cpp | 12 ++++++++++++ source/client/test/connectOptionsTest.cpp | 8 +++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 60f0a72e39..54c0e59817 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -532,6 +532,10 @@ TEST(clientCase, create_stable_Test) { taos_free_result(pRes); pRes = taos_query(pConn, "use abc1"); + while (taos_errno(pRes) == TSDB_CODE_MND_DB_IN_CREATING || taos_errno(pRes) == TSDB_CODE_MND_DB_IN_DROPPING) { + taosMsleep(2000); + pRes = taos_query(pConn, "use abc1"); + } taos_free_result(pRes); pRes = taos_query(pConn, "create table if not exists abc1.st1(ts timestamp, k int) tags(a int)"); @@ -664,6 +668,10 @@ TEST(clientCase, create_multiple_tables) { taos_free_result(pRes); pRes = taos_query(pConn, "use abc1"); + while (taos_errno(pRes) == TSDB_CODE_MND_DB_IN_CREATING || taos_errno(pRes) == TSDB_CODE_MND_DB_IN_DROPPING) { + taosMsleep(2000); + pRes = taos_query(pConn, "use abc1"); + } if (taos_errno(pRes) != 0) { (void)printf("failed to use db, reason:%s\n", taos_errstr(pRes)); taos_free_result(pRes); @@ -1524,6 +1532,10 @@ TEST(clientCase, timezone_Test) { taos_free_result(pRes); pRes = taos_query(pConn, "create table db1.t1 (ts timestamp, v int)"); + while (taos_errno(pRes) == TSDB_CODE_MND_DB_IN_CREATING || taos_errno(pRes) == TSDB_CODE_MND_DB_IN_DROPPING) { + taosMsleep(2000); + pRes = taos_query(pConn, "create table db1.t1 (ts timestamp, v int)"); + } ASSERT_EQ(taos_errno(pRes), TSDB_CODE_SUCCESS); taos_free_result(pRes); diff --git a/source/client/test/connectOptionsTest.cpp b/source/client/test/connectOptionsTest.cpp index 95596e9ed3..4f0dbb579b 100644 --- a/source/client/test/connectOptionsTest.cpp +++ b/source/client/test/connectOptionsTest.cpp @@ -55,7 +55,13 @@ TAOS* getConnWithOption(const char *tz){ void execQuery(TAOS* pConn, const char *sql){ TAOS_RES* pRes = taos_query(pConn, sql); - ASSERT(taos_errno(pRes) == TSDB_CODE_SUCCESS); + int code = taos_errno(pRes); + while (code == TSDB_CODE_MND_DB_IN_CREATING || code == TSDB_CODE_MND_DB_IN_DROPPING) { + taosMsleep(2000); + TAOS_RES* pRes = taos_query(pConn, sql); + code = taos_errno(pRes); + } + ASSERT(code == TSDB_CODE_SUCCESS); taos_free_result(pRes); } From f0fb2fb55dd4052ef7130a1ae04d1716f6138bf7 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 13 Jan 2025 18:27:59 +0800 Subject: [PATCH 023/120] fix:[TD-33504]add test case --- tests/parallel_test/cases.task | 1 + tests/system-test/7-tmq/tmq_td33504.py | 84 ++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 tests/system-test/7-tmq/tmq_td33504.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index ea92f0bef7..15e073e8e6 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -328,6 +328,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts5466.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_td33504.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts-5473.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-32187.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-33225.py diff --git a/tests/system-test/7-tmq/tmq_td33504.py b/tests/system-test/7-tmq/tmq_td33504.py new file mode 100644 index 0000000000..085b245dd5 --- /dev/null +++ b/tests/system-test/7-tmq/tmq_td33504.py @@ -0,0 +1,84 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from taos.tmq import * +from taos import * + +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def test(self): + tdSql.execute(f'create database if not exists db') + tdSql.execute(f'use db') + tdSql.execute(f'CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)') + tdSql.execute("INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-05 14:38:05.000',10.30000,219,0.31000)") + tdSql.execute("INSERT INTO d1002 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-05 14:38:05.000',10.30000,219,0.31000)") + tdSql.execute("INSERT INTO d1003 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-05 14:38:05.000',10.30000,219,0.31000)") + tdSql.execute("INSERT INTO d1004 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-05 14:38:05.000',10.30000,219,0.31000)") + + tdSql.execute(f'create topic t0 as select * from meters') + tdSql.execute(f'create topic t1 as select * from meters') + + consumer_dict = { + "group.id": "g1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "earliest", + } + consumer = Consumer(consumer_dict) + + try: + consumer.subscribe(["t0"]) + except TmqError: + tdLog.exit(f"subscribe error") + + try: + res = consumer.poll(1) + print(res) + + consumer.unsubscribe() + + try: + consumer.subscribe(["t1"]) + except TmqError: + tdLog.exit(f"subscribe error") + + + res = consumer.poll(1) + print(res) + if res == None and taos_errno(None) != 0: + tdLog.exit(f"poll error %d" % taos_errno(None)) + + except TmqError: + tdLog.exit(f"poll error") + finally: + consumer.close() + + + def run(self): + self.test() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 982790b93a5e0ae1949e2e75075926ac36fd29d0 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 14 Jan 2025 11:32:25 +0800 Subject: [PATCH 024/120] Update build step notice and commands by charles --- tests/README.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/README.md b/tests/README.md index 9d7f98b564..6ac47ff7e0 100644 --- a/tests/README.md +++ b/tests/README.md @@ -21,14 +21,23 @@ Install the Python connector for TDengine. pip3 install taospy taos-ws-py ``` +>[!NTOE] +>Please make sure building operation with option '-DBUILD_TEST=true' has been finished, execute the below commands if not: + +```bash +cd debug +cmake .. -DBUILD_TEST=true -DBUILD_CONTRIB=true +make && make install +``` + ## Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run the script with below command: ```bash -cd tests/unit-test/ -e 0 -bash test.sh +cd tests/unit-test/ +bash test.sh -e 0 ``` ## System Test From b3b4623fab3b52e06009db1573dc1bd4d8f7e1d1 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 14 Jan 2025 11:36:11 +0800 Subject: [PATCH 025/120] fix spelling issue --- tests/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/README.md b/tests/README.md index 6ac47ff7e0..35acb3f019 100644 --- a/tests/README.md +++ b/tests/README.md @@ -21,8 +21,8 @@ Install the Python connector for TDengine. pip3 install taospy taos-ws-py ``` ->[!NTOE] ->Please make sure building operation with option '-DBUILD_TEST=true' has been finished, execute the below commands if not: +> [!NOTE] +> Please make sure building operation with option '-DBUILD_TEST=true' has been finished, execute the below commands if not: ```bash cd debug From ae28c958d573a78dba02932ddd8025fc97f24510 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 14 Jan 2025 11:44:06 +0800 Subject: [PATCH 026/120] fix: use ps -C to find process pid --- packaging/tools/remove.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 5bbfd2a0de..77d46627a7 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -90,7 +90,7 @@ fi kill_service_of() { _service=$1 - pid=$(ps -C $_service | grep -v $uninstallScript | awk '{print $2}') + pid=$(ps -C $_service |grep -w $_service | grep -v $uninstallScript | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi @@ -140,9 +140,8 @@ clean_service_of() { clean_service_on_systemd_of $_service elif ((${service_mod} == 1)); then clean_service_on_sysvinit_of $_service - else - kill_service_of $_service fi + kill_service_of $_service } remove_service_of() { From 6e71f41e8c2c0e86f0acac1a989e45a67e04b142 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 14 Jan 2025 12:23:59 +0800 Subject: [PATCH 027/120] fix: use ps -C to find process pid --- packaging/tools/remove_client.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 7798bbf16a..c883fa0af2 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -40,7 +40,7 @@ if command -v sudo > /dev/null; then fi function kill_client() { - pid=$(ps -C ${clientName2} | grep -v $uninstallScript2 | awk '{print $2}') + pid=$(ps -C ${clientName2} | gerep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi From 184162545ba5d7a0ea1186f78315f0b79ccf8fca Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 14 Jan 2025 13:49:27 +0800 Subject: [PATCH 028/120] Update packaging/tools/remove.sh Co-authored-by: WANG Xu --- packaging/tools/remove.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 77d46627a7..6e309f1f67 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -90,7 +90,7 @@ fi kill_service_of() { _service=$1 - pid=$(ps -C $_service |grep -w $_service | grep -v $uninstallScript | awk '{print $1}') + pid=$(ps -C $_service | grep -w $_service | grep -v $uninstallScript | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi From 379be9da02bc8604a2175ad53dd80414db6e4aca Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 14 Jan 2025 13:49:33 +0800 Subject: [PATCH 029/120] Update packaging/tools/remove_client.sh Co-authored-by: WANG Xu --- packaging/tools/remove_client.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index c883fa0af2..3414992ad9 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -40,7 +40,7 @@ if command -v sudo > /dev/null; then fi function kill_client() { - pid=$(ps -C ${clientName2} | gerep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}') + pid=$(ps -C ${clientName2} | grep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi From 318a82f30f7a3f1bf88840255319c2379a3eadec Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Tue, 14 Jan 2025 13:55:23 +0800 Subject: [PATCH 030/120] remove space --- packaging/tools/remove.sh | 2 +- packaging/tools/remove_client.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 6e309f1f67..43c2de4ba4 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -90,7 +90,7 @@ fi kill_service_of() { _service=$1 - pid=$(ps -C $_service | grep -w $_service | grep -v $uninstallScript | awk '{print $1}') + pid=$(ps -C $_service | grep -w $_service | grep -v $uninstallScript | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 3414992ad9..1d2965f66b 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -40,7 +40,7 @@ if command -v sudo > /dev/null; then fi function kill_client() { - pid=$(ps -C ${clientName2} | grep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}') + pid=$(ps -C ${clientName2} | grep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi From 5b64c3429c72134b58f4c1a8dc903ec670a875b6 Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Tue, 14 Jan 2025 14:56:17 +0800 Subject: [PATCH 031/120] fix async error in unit test --- source/client/test/stmt2Test.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/source/client/test/stmt2Test.cpp b/source/client/test/stmt2Test.cpp index 3e21721c47..52c89e97ab 100644 --- a/source/client/test/stmt2Test.cpp +++ b/source/client/test/stmt2Test.cpp @@ -197,7 +197,7 @@ void do_stmt(TAOS* taos, TAOS_STMT2_OPTION* option, const char* sql, int CTB_NUM checkError(stmt, code); // exec - int affected; + int affected = 0; code = taos_stmt2_exec(stmt, &affected); total_affected += affected; checkError(stmt, code); @@ -219,8 +219,9 @@ void do_stmt(TAOS* taos, TAOS_STMT2_OPTION* option, const char* sql, int CTB_NUM taosMemoryFree(tags); } } - - ASSERT_EQ(total_affected, CYC_NUMS * ROW_NUMS * CTB_NUMS); + if (option->asyncExecFn == NULL) { + ASSERT_EQ(total_affected, CYC_NUMS * ROW_NUMS * CTB_NUMS); + } for (int i = 0; i < CTB_NUMS; i++) { taosMemoryFree(tbs[i]); } From cae21da2d40593dd43f69e7e56691cd13a35c7fc Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 14 Jan 2025 15:35:43 +0800 Subject: [PATCH 032/120] fix:[TS-5906]clear meta cache for subscription if tag is changed --- source/libs/executor/src/executor.c | 7 +++ tests/parallel_test/cases.task | 1 + tests/system-test/7-tmq/tmq_ts5906.py | 90 +++++++++++++++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 tests/system-test/7-tmq/tmq_ts5906.py diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index dffab1b163..1386b0b82f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -469,6 +469,13 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI } SStreamScanInfo* pScanInfo = pInfo->info; + if (pInfo->pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { // clear meta cache for subscription if tag is changed + for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) { + int64_t* uid = (int64_t*)taosArrayGet(tableIdList, i); + STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info; + taosLRUCacheErase(pTableScanInfo->base.metaCache.pTableMetaEntryCache, uid, LONG_BYTES); + } + } if (isAdd) { // add new table id SArray* qa = NULL; diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index ea92f0bef7..97509e453a 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -329,6 +329,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts5466.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts-5473.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts5906.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-32187.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-33225.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts4563.py diff --git a/tests/system-test/7-tmq/tmq_ts5906.py b/tests/system-test/7-tmq/tmq_ts5906.py new file mode 100644 index 0000000000..13e756baa3 --- /dev/null +++ b/tests/system-test/7-tmq/tmq_ts5906.py @@ -0,0 +1,90 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from taos.tmq import * +from taos import * + +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143, 'asynclog': 0} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def test(self): + tdSql.execute(f'create database if not exists db vgroups 1') + tdSql.execute(f'use db') + tdSql.execute(f'CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)') + tdSql.execute("INSERT INTO d1001 USING meters TAGS('California.SanFrancisco1', 2) VALUES('2018-10-05 14:38:05.000',10.30000,219,0.31000)") + + + tdSql.execute(f'create topic t0 as select * from meters') + + consumer_dict = { + "group.id": "g1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "earliest", + } + consumer = Consumer(consumer_dict) + + try: + consumer.subscribe(["t0"]) + except TmqError: + tdLog.exit(f"subscribe error") + + index = 0; + try: + while True: + if index == 2: + break + res = consumer.poll(5) + print(res) + if not res: + print("res null") + break + val = res.value() + if val is None: + continue + for block in val: + data = block.fetchall() + for element in data: + print(f"data len: {len(data)}") + print(element) + if index == 0 and data[0][-1] != 2: + tdLog.exit(f"error: {data[0][-1]}") + if index == 1 and data[0][-1] != 100: + tdLog.exit(f"error: {data[0][-1]}") + + tdSql.execute("alter table d1001 set tag groupId = 100") + tdSql.execute("INSERT INTO d1001 VALUES('2018-10-05 14:38:06.000',10.30000,219,0.31000)") + index += 1 + finally: + consumer.close() + + + def run(self): + self.test() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 4ceef6ddf1cf6384d3ad366dbd881718d3a4228c Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 14 Jan 2025 15:36:33 +0800 Subject: [PATCH 033/120] docs: fix configuration error in doc --- docs/en/14-reference/01-components/01-taosd.md | 5 ----- docs/en/14-reference/03-taos-sql/10-function.md | 2 +- docs/zh/14-reference/01-components/01-taosd.md | 5 ----- docs/zh/14-reference/03-taos-sql/10-function.md | 2 +- 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/docs/en/14-reference/01-components/01-taosd.md b/docs/en/14-reference/01-components/01-taosd.md index 55db20bef0..7456593ddb 100644 --- a/docs/en/14-reference/01-components/01-taosd.md +++ b/docs/en/14-reference/01-components/01-taosd.md @@ -77,12 +77,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s |minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000| |singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000| |filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0| -|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs| -|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan| -|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan| -|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan| |queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds| -|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages| |pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting| ### Region Related diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index ab5c48bce2..8397c59177 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -2171,7 +2171,7 @@ ignore_negative: { **Usage Instructions**: -- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE() from. +- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE(col1, 1s, 1) from tb1. ### DIFF diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index ff1d7617d8..4c3350df7c 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -73,12 +73,7 @@ taosd 命令行参数如下 |minReservedMemorySize | |不支持动态修改 |最小预留的系统可用内存数量,除预留外的内存都可以被用于查询,单位:MB,默认预留大小为系统物理内存的 20%,取值范围 1024 - 1000000000| |singleQueryMaxMemorySize| |不支持动态修改 |单个查询在单个节点(dnode)上可以使用的内存上限,超过该上限将返回错误,单位:MB,默认值:0(无上限),取值范围 0 - 1000000000| |filterScalarMode | |不支持动态修改 |强制使用标量过滤模式,0:关闭;1:开启,默认值 0| -|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志| -|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小| -|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法| -|queryMaxConcurrentTables| |不支持动态修改 |内部参数,查询计划的并发数目| |queryRsmaTolerance | |不支持动态修改 |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒| -|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息| |pqSortMemThreshold | |不支持动态修改 |内部参数,排序使用的内存阈值| ### 区域相关 diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index eb3a4bb0ed..c0e80e80df 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -2099,7 +2099,7 @@ ignore_negative: { **使用说明**: -- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE(col1, 1s, 1) from tb1。 ### DIFF From a1a6312db71f17b63300661cda0e5cad40bbc59d Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 14 Jan 2025 16:01:26 +0800 Subject: [PATCH 034/120] udpate test README to add build options by charles --- tests/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/README.md b/tests/README.md index 35acb3f019..2a25740dc3 100644 --- a/tests/README.md +++ b/tests/README.md @@ -22,11 +22,11 @@ pip3 install taospy taos-ws-py ``` > [!NOTE] -> Please make sure building operation with option '-DBUILD_TEST=true' has been finished, execute the below commands if not: +> Please make sure building operation with option '-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true' has been finished, execute the below commands if not: ```bash cd debug -cmake .. -DBUILD_TEST=true -DBUILD_CONTRIB=true +cmake .. -DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true make && make install ``` From bb8ef7272573644681df3bd7e5aa32ec889dae1a Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 14 Jan 2025 16:46:39 +0800 Subject: [PATCH 035/120] Update test_smoking_selfhost.sh --- packaging/smokeTest/test_smoking_selfhost.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packaging/smokeTest/test_smoking_selfhost.sh b/packaging/smokeTest/test_smoking_selfhost.sh index a25c5a6d90..6ed0b9c715 100755 --- a/packaging/smokeTest/test_smoking_selfhost.sh +++ b/packaging/smokeTest/test_smoking_selfhost.sh @@ -6,12 +6,6 @@ SUCCESS_FILE="success.txt" FAILED_FILE="failed.txt" REPORT_FILE="report.txt" -# Initialize/clear result files -> "$SUCCESS_FILE" -> "$FAILED_FILE" -> "$LOG_FILE" -> "$REPORT_FILE" - # Switch to the target directory TARGET_DIR="../../tests/system-test/" @@ -24,6 +18,12 @@ else exit 1 fi +# Initialize/clear result files +> "$SUCCESS_FILE" +> "$FAILED_FILE" +> "$LOG_FILE" +> "$REPORT_FILE" + # Define the Python commands to execute commands=( "python3 ./test.py -f 2-query/join.py" @@ -102,4 +102,4 @@ fi echo "Detailed logs can be found in: $(realpath "$LOG_FILE")" echo "Successful commands can be found in: $(realpath "$SUCCESS_FILE")" echo "Failed commands can be found in: $(realpath "$FAILED_FILE")" -echo "Test report can be found in: $(realpath "$REPORT_FILE")" \ No newline at end of file +echo "Test report can be found in: $(realpath "$REPORT_FILE")" From 11d34f728a8c9defe8c8aae42ff0a7ca998bd069 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 14 Jan 2025 17:04:07 +0800 Subject: [PATCH 036/120] fix: data load required --- source/libs/function/src/builtinsimpl.c | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 707018ac65..0f6db39cc8 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -771,7 +771,34 @@ bool getSumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } +static bool funcNotSupportStringSma(SFunctionNode* pFunc) { + SNode* pParam; + switch (pFunc->funcType) { + case FUNCTION_TYPE_MAX: + case FUNCTION_TYPE_MIN: + case FUNCTION_TYPE_SUM: + case FUNCTION_TYPE_AVG: + case FUNCTION_TYPE_AVG_PARTIAL: + case FUNCTION_TYPE_PERCENTILE: + case FUNCTION_TYPE_SPREAD: + case FUNCTION_TYPE_SPREAD_PARTIAL: + case FUNCTION_TYPE_SPREAD_MERGE: + case FUNCTION_TYPE_TWA: + pParam = nodesListGetNode(pFunc->pParameterList, 0); + if (pParam && nodesIsExprNode(pParam) && (IS_VAR_DATA_TYPE(((SExprNode*)pParam)->resType.type))) { + return true; + } + break; + default: + break; + } + return false; +} + EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) { + if(funcNotSupportStringSma(pFunc)) { + return FUNC_DATA_REQUIRED_DATA_LOAD; + } return FUNC_DATA_REQUIRED_SMA_LOAD; } From ad50cde25e637342d1a07b6e88a6e6f8859d7bea Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 14 Jan 2025 17:31:42 +0800 Subject: [PATCH 037/120] add TSBS and TestNG test description and fold up for installation, running section by charles --- README.md | 81 +++++++++++++++++++++++++++++++++++++------------ tests/README.md | 12 +++++++- 2 files changed, 72 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 33874cf05d..a39ddc0f78 100644 --- a/README.md +++ b/README.md @@ -30,13 +30,14 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine 1. [Documentation](#2-documentation) 1. [Prerequisites](#3-prerequisites) 1. [Building](#4-building) -1. [Installation](#5-installation) -1. [Running](#6-running) -1. [Testing](#7-testing) -1. [Releasing](#8-releasing) -1. [CI/CD](#9-cicd) -1. [Coverage](#10-coverage) -1. [Contributing](#11-contributing) +1. [Packaging](#5-packaging) +1. [Installation](#6-installation) +1. [Running](#7-running) +1. [Testing](#8-testing) +1. [Releasing](#9-releasing) +1. [CI/CD](#10-cicd) +1. [Coverage](#11-coverage) +1. [Contributing](#12-contributing) # 1. Introduction @@ -218,9 +219,17 @@ nmake ``` -# 5. Installation +# 5. Packaging -## 5.1 Install on Linux +TDengine packaging scripts depends on some private repositries currently, you can refer the link for detail steps. [Packaging](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#5-packaging) + +# 6. Installation + +## 6.1 Install on Linux + +
+ +Detailed steps to install on Linux After building successfully, TDengine can be installed by: @@ -230,7 +239,13 @@ sudo make install Installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. -## 5.2 Install on macOS +
+ +## 6.2 Install on macOS + +
+ +Detailed steps to install on macOS After building successfully, TDengine can be installed by: @@ -238,7 +253,13 @@ After building successfully, TDengine can be installed by: sudo make install ``` -## 5.3 Install on Windows +
+ +## 6.3 Install on Windows + +
+ +Detailed steps to install on windows After building successfully, TDengine can be installed by: @@ -246,9 +267,15 @@ After building successfully, TDengine can be installed by: nmake install ``` -# 6. Running +
-## 6.1 Run TDengine on Linux +# 7. Running + +## 7.1 Run TDengine on Linux + +
+ +Detailed steps to run on Linux To start the service after installation on linux, in a terminal, use: @@ -278,7 +305,13 @@ In another terminal, use the TDengine CLI to connect the server: Option `-c test/cfg` specifies the system configuration file directory. -## 6.2 Run TDengine on Windows +
+ +## 7.2 Run TDengine on Windows + +
+ +Detailed steps to run on windows You can start TDengine server on Windows platform with below commands: @@ -294,7 +327,13 @@ In another terminal, use the TDengine CLI to connect the server: option "-c test/cfg" specifies the system configuration file directory. -## 6.3 Run TDengine on macOS +
+ +## 7.3 Run TDengine on macOS + +
+ +Detailed steps to run on macOS To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: @@ -310,15 +349,17 @@ taos If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. -# 7. Testing +
+ +# 8. Testing For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md). -# 8. Releasing +# 9. Releasing For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases). -# 9. CI/CD +# 10. CI/CD Now, Jenkins is mainly used to build CI/CD pipeline for TDengine. To run the tests in the CI/CD pipeline, please run following commands: @@ -329,7 +370,7 @@ cd tests TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). -# 10. Coverage +# 11. Coverage Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine). To create the test coverage report (in HTML format) locally, please run following commands: @@ -342,6 +383,6 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task > [!NOTE] > Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. -# 11. Contributing +# 12. Contributing Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine. diff --git a/tests/README.md b/tests/README.md index 2a25740dc3..843b9379da 100644 --- a/tests/README.md +++ b/tests/README.md @@ -43,7 +43,7 @@ bash test.sh -e 0 ## System Test Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only -work for TDengine enterprise version, you can run the script with below command: +work for TDengine Enterprise Edition, you can run the script with below command: ```bash cd tests/system-test @@ -69,5 +69,15 @@ cd tests/script ./test.sh -f tsim/db/basic1.sim ``` +## TSBS Test +Time Series Benchmark Suite (TSBS) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. +TSBS Test is based on the TDengine Enterprise Edition and need private repositry privilege, you can refer the link for detail steps. [TSBS Test](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#85-tsbs-test) + +## TestNG Test + +TestNG Test is another test framwork which developed by python, functionally speaking, it's a supplement for system test, and +also run longer time than system test for stability testing purposes. + +TestNG Test is based on the TDengine Enterprise Edition and need private repositry privilege, you can refer the link for detail steps. [TestNG Test](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#87-testng-test) From c947101b8f09263868450a5fbd58b200eaeb042a Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Tue, 14 Jan 2025 17:55:17 +0800 Subject: [PATCH 038/120] test:add how to add test cases --- tests/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/README.md b/tests/README.md index 843b9379da..83e77ac20b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -69,6 +69,10 @@ cd tests/script ./test.sh -f tsim/db/basic1.sim ``` +## How TO Add Test Cases + +You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI. + ## TSBS Test Time Series Benchmark Suite (TSBS) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. From d9cbe335a7d96f525ede6caef736500004f2885a Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Tue, 14 Jan 2025 17:56:58 +0800 Subject: [PATCH 039/120] test:add how to add test cases --- tests/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 83e77ac20b..d269a43f90 100644 --- a/tests/README.md +++ b/tests/README.md @@ -47,7 +47,7 @@ work for TDengine Enterprise Edition, you can run the script with below command: ```bash cd tests/system-test -python3 ./test.py -f 2-query/floor.py +python3 ./test.py -f 2-query/avg.py ``` ## Smoke Test From f9a7dc0ea353614fd4ff4345d4943b877ba68999 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Wed, 15 Jan 2025 14:35:33 +0800 Subject: [PATCH 040/120] test case --- tests/system-test/2-query/smaTest.py | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py index 355ac04707..c0a81720ae 100644 --- a/tests/system-test/2-query/smaTest.py +++ b/tests/system-test/2-query/smaTest.py @@ -75,6 +75,7 @@ class TDTestCase: tdLog.debug(" LIMIT test_case2 ............ [OK]") self.test_TD_33336() + self.ts5900() # stop def stop(self): @@ -137,6 +138,47 @@ class TDTestCase: tdLog.debug("INSERT TABLE DATA ............ [OK]") return + + def ts5900query(self): + sql = "select max(c0) from ts5900.tt1" + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, '99.0') + sql = "select min(c0) from ts5900.tt1" + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, '1.0') + + def ts5900(self): + tdSql.execute("drop database if exists ts5900;") + tdSql.execute("create database ts5900;") + + tdSql.execute("create table ts5900.meters (ts timestamp, c0 varchar(64)) tags(t0 varchar(64));") + + sql = "CREATE TABLE ts5900.`tt1` USING ts5900.`meters` TAGS ('t11')" + tdSql.execute(sql) + for i in range(155): + tdSql.query(f"insert into ts5900.tt1 values(now+{i*10}s, '{i+1}.0')") + tdSql.query("insert into ts5900.tt1 values(now, '1.2')") + tdSql.query("insert into ts5900.tt1 values(now+1s, '2.0')") + tdSql.query("insert into ts5900.tt1 values(now+2s, '3.0')") + tdSql.query("insert into ts5900.tt1 values(now+3s, '105.0')") + tdSql.query("insert into ts5900.tt1 values(now+4s, '4.0')") + + sql = "select count(*) from ts5900.tt1" + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, '160') + + for i in range(10): + tdSql.execute("flush database ts5900") + time.sleep(1) + self.ts5900query() + tdSql.query(f"insert into ts5900.tt1 values(now, '23.0')") + self.ts5900query() + tdLog.info(f"ts5900 test {i} ............ [OK]") + time.sleep(1) + # test case1 base # def test_case1(self): From 904c7c79d3e5f74201bf5f48abf7bf4e70eb0e22 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 15 Jan 2025 15:18:28 +0800 Subject: [PATCH 041/120] optimize the description in Update 02-database.md --- docs/zh/14-reference/03-taos-sql/02-database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 32df6c60c1..33d327b620 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -209,7 +209,7 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3 BALANCE VGROUP LEADER ``` -触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。 +触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。(企业版功能) ## 查看数据库工作状态 From 9fa1ac534dfbd7665172240058b7d8cc9ce4065f Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Wed, 15 Jan 2025 17:54:39 +0800 Subject: [PATCH 042/120] enh: elapsed sma --- source/libs/function/src/builtinsimpl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0f6db39cc8..efe16ce662 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -784,6 +784,7 @@ static bool funcNotSupportStringSma(SFunctionNode* pFunc) { case FUNCTION_TYPE_SPREAD_PARTIAL: case FUNCTION_TYPE_SPREAD_MERGE: case FUNCTION_TYPE_TWA: + case FUNCTION_TYPE_ELAPSED: pParam = nodesListGetNode(pFunc->pParameterList, 0); if (pParam && nodesIsExprNode(pParam) && (IS_VAR_DATA_TYPE(((SExprNode*)pParam)->resType.type))) { return true; From d15189291b1bbf927f4f2abbfdca1f4405c2caa6 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 09:57:29 +0800 Subject: [PATCH 043/120] update testing README --- tests/README.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/README.md b/tests/README.md index d269a43f90..b47129e872 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,6 +1,13 @@ -# Testing TDengine +# Table of Contents +1. [Introduction](#1-introduction) +1. [Prerequisites](#2-prerequisites) +2. [Testing Guide](#3-testing-guide) -## Install the required tools +# 1. Introduction + +This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. The document is divided into three main sections: introduction, prerequisites and testing guide. + +# 2. Prerequisites Install Python3. @@ -30,6 +37,8 @@ cmake .. -DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true make && make install ``` +# 3. Testing Guide + ## Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run @@ -47,7 +56,7 @@ work for TDengine Enterprise Edition, you can run the script with below command: ```bash cd tests/system-test -python3 ./test.py -f 2-query/avg.py +python3 ./test.py -f 2-query/floor.py ``` ## Smoke Test @@ -68,7 +77,6 @@ In the early stage of TDengine development, test cases are run by an internal te cd tests/script ./test.sh -f tsim/db/basic1.sim ``` - ## How TO Add Test Cases You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI. From 5c052c1325e908c7e7bd4d2981206d59cb33f42f Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Thu, 16 Jan 2025 11:55:57 +0800 Subject: [PATCH 044/120] docs: update the structure for testing readme --- tests/README.md | 102 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 30 deletions(-) diff --git a/tests/README.md b/tests/README.md index b47129e872..07d576ab4c 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,35 +1,44 @@ # Table of Contents + 1. [Introduction](#1-introduction) 1. [Prerequisites](#2-prerequisites) -2. [Testing Guide](#3-testing-guide) +1. [Testing Guide](#3-testing-guide) + 1. [Unit Test](#31-unit-test) + 1. [System Test](#32-system-test) + 1. [Smoke Test](#33-smoke-test) + 1. [Legacy Test](#34-legacy-test) + 1. [Chaos Test](#35-chaos-test) + 1. [TSBS Test](#36-tsbs-test) # 1. Introduction -This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. The document is divided into three main sections: introduction, prerequisites and testing guide. +This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. The document is divided into three main sections: introduction, prerequisites and testing guide. -# 2. Prerequisites +# 2. Prerequisites -Install Python3. +- Install Python3 ```bash apt install python3 apt install python3-pip ``` -Install the dependent Python components. +- Install Python dependencies ```bash -pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog +pip3 install pandas psutil fabric2 requests faker simplejson \ + toml pexpect tzlocal distro decorator loguru hyperloglog ``` -Install the Python connector for TDengine. +- Install Python connector for TDengine ```bash pip3 install taospy taos-ws-py ``` -> [!NOTE] -> Please make sure building operation with option '-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true' has been finished, execute the below commands if not: +- Building + +Please make sure building operation with option `-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true` has been finished, otherwise execute commands below: ```bash cd debug @@ -39,57 +48,90 @@ make && make install # 3. Testing Guide -## Unit Test +In `tests` directory, there are different types of tests for TDengine. Below is a brief introduction about how to run them and how to add new cases. -Unit test script is the smallest testable part and developed for some function, method or class of TDengine, you can run -the script with below command: +## 3.1. Unit Test + +Unit test script is the smallest testable part and developed for some function, method or class of TDengine. + +### How to run tests? ```bash cd tests/unit-test/ bash test.sh -e 0 ``` -## System Test +### How to add new cases? + +Copy from the old version, need updates: +You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI. + +## 3.2. System Test Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only -work for TDengine Enterprise Edition, you can run the script with below command: +work for TDengine Enterprise Edition. + +### How to run tests? ```bash cd tests/system-test python3 ./test.py -f 2-query/floor.py ``` -## Smoke Test +### How to add new cases? -Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine, you can run the -script with commands below: +[Placeholder] + +## 3.3. Smoke Test + +Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine. + +### How to run tests? ```bash cd /root/TDengine/packaging/smokeTest ./test_smoking_selfhost.sh ``` -## Legacy Test +### How to add new cases? -In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. To run the legacy test cases, please execute the following commands: +[Placeholder] + +## 3.4. Legacy Test + +In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. + +### How to run tests? + +To run the legacy test cases, please execute the following commands: ```bash cd tests/script ./test.sh -f tsim/db/basic1.sim ``` -## How TO Add Test Cases -You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI. +### How to add new cases? -## TSBS Test +[Placeholder] -Time Series Benchmark Suite (TSBS) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. +## 3.5. Chaos Test + +[Desciprtion] + +### How to run tests? + +[Placeholder] + +### How to add new cases? + +[Placeholder] + +## 3.6. TSBS Test + +[Time Series Benchmark Suite (TSBS)](https://github.com/timescale/tsbs) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. + +### How to run tests? + +Need updates: must be run from public repo! TSBS Test is based on the TDengine Enterprise Edition and need private repositry privilege, you can refer the link for detail steps. [TSBS Test](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#85-tsbs-test) - -## TestNG Test - -TestNG Test is another test framwork which developed by python, functionally speaking, it's a supplement for system test, and -also run longer time than system test for stability testing purposes. - -TestNG Test is based on the TDengine Enterprise Edition and need private repositry privilege, you can refer the link for detail steps. [TestNG Test](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#87-testng-test) From 674a3818be425829a57cdaef6f03a11a7e59ee5d Mon Sep 17 00:00:00 2001 From: haoranchen Date: Thu, 16 Jan 2025 12:07:43 +0800 Subject: [PATCH 045/120] Update README.md --- tests/README.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/README.md b/tests/README.md index 07d576ab4c..318024e8ff 100644 --- a/tests/README.md +++ b/tests/README.md @@ -130,6 +130,31 @@ cd tests/script [Time Series Benchmark Suite (TSBS)](https://github.com/timescale/tsbs) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. +1. Clone the code and run the tests locally on your machine. Ensure that your virtual machine supports the AVX instruction set: +```bash + cd /usr/local/src && git clone https://github.com/taosdata/tsbs-internal.git tsbs && \ + cd tsbs && git checkout enh/chr-td-33357 && \ + cd scripts/tsdbComp && ./testTsbs.sh +``` +2. When testing the client and server on separate machines, you should set up your environment as outlined in the steps below: + + 2.1. Modify IP and host of client and server in `test.ini` + ```ini + clientIP="192.168.0.203" # client ip + clientHost="trd03" # client hostname + serverIP="192.168.0.204" # server ip + serverHost="trd04" # server hostname + ``` + 2.2. Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: + ```ini + serverPass="taosdata123" # server root password + ``` + 2.3. Run the following command to start the test: + ```bash + ./testTsbs.sh + ``` +3. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. + ### How to run tests? Need updates: must be run from public repo! From 78479871339b39e3e3f89c63c9bd1a222c4db1e1 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Thu, 16 Jan 2025 12:09:18 +0800 Subject: [PATCH 046/120] add tsbs for README.md --- tests/README.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/README.md b/tests/README.md index 318024e8ff..0e870b1fea 100644 --- a/tests/README.md +++ b/tests/README.md @@ -130,6 +130,8 @@ cd tests/script [Time Series Benchmark Suite (TSBS)](https://github.com/timescale/tsbs) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. +### How to run tests? + 1. Clone the code and run the tests locally on your machine. Ensure that your virtual machine supports the AVX instruction set: ```bash cd /usr/local/src && git clone https://github.com/taosdata/tsbs-internal.git tsbs && \ @@ -154,9 +156,3 @@ cd tests/script ./testTsbs.sh ``` 3. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. - -### How to run tests? - -Need updates: must be run from public repo! - -TSBS Test is based on the TDengine Enterprise Edition and need private repositry privilege, you can refer the link for detail steps. [TSBS Test](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#85-tsbs-test) From d4aec7a3d2b0ffa7a5c609ee857db9e76b7a37e1 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Thu, 16 Jan 2025 12:22:03 +0800 Subject: [PATCH 047/120] docs: add note for tsbs test --- tests/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/README.md b/tests/README.md index 0e870b1fea..1466285096 100644 --- a/tests/README.md +++ b/tests/README.md @@ -138,6 +138,10 @@ cd tests/script cd tsbs && git checkout enh/chr-td-33357 && \ cd scripts/tsdbComp && ./testTsbs.sh ``` +> [!NOTE] +> If you want to quickly verify the test environment, please set the `caseType` in `test.ini` file to `cputest`. The current default test is the CPU scenario. After running, the test will take a very long time. + + 2. When testing the client and server on separate machines, you should set up your environment as outlined in the steps below: 2.1. Modify IP and host of client and server in `test.ini` From 28af1cc214b8108b045b3c992774ff7590be8a51 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Thu, 16 Jan 2025 12:29:34 +0800 Subject: [PATCH 048/120] Update README.md --- tests/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 1466285096..da70c9078d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -139,7 +139,9 @@ cd tests/script cd scripts/tsdbComp && ./testTsbs.sh ``` > [!NOTE] -> If you want to quickly verify the test environment, please set the `caseType` in `test.ini` file to `cputest`. The current default test is the CPU scenario. After running, the test will take a very long time. +> The default case type, CPU test, takes a long time to run. +> +> If you want to run quick test to verify the test environment, please set `caseType=cputest` in test.ini . 2. When testing the client and server on separate machines, you should set up your environment as outlined in the steps below: From 64c6562be3c38d901ec443f5927aff3470db4c00 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 15:02:32 +0800 Subject: [PATCH 049/120] update README to adjust the structure --- README.md | 127 +++++++++++++++++++++++++++--------------------- tests/README.md | 41 +++++----------- 2 files changed, 84 insertions(+), 84 deletions(-) diff --git a/README.md b/README.md index a39ddc0f78..495c28c2c3 100644 --- a/README.md +++ b/README.md @@ -29,15 +29,28 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine 1. [Introduction](#1-introduction) 1. [Documentation](#2-documentation) 1. [Prerequisites](#3-prerequisites) -1. [Building](#4-building) -1. [Packaging](#5-packaging) -1. [Installation](#6-installation) -1. [Running](#7-running) -1. [Testing](#8-testing) -1. [Releasing](#9-releasing) -1. [CI/CD](#10-cicd) -1. [Coverage](#11-coverage) -1. [Contributing](#12-contributing) + - [3.1 Prerequisites On Linux](#31-on-linux) + - [3.2 Prerequisites On macOS](#32-on-macos) + - [3.3 Prerequisites On Windows](#33-on-windows) + - [3.4 Clone the repo](#34-clone-the-repo) +2. [Building](#4-building) + - [4.1 Build on Linux](#41-build-on-linux) + - [4.2 Build on macOS](#42-build-on-macos) + - [4.3 Build On Windows](#43-build-on-windows) +3. [Packaging](#5-packaging) +4. [Installation](#6-installation) + - [6.1 Install on Linux](#61-install-on-linux) + - [6.2 Install on macOS](#62-install-on-macos) + - [6.3 Install on Windows](#63-install-on-windows) +5. [Running](#7-running) + - [7.1 Run TDengine on Linux](#71-run-tdengine-on-linux) + - [7.2 Run TDengine on macOS](#72-run-tdengine-on-macos) + - [7.3 Run TDengine on Windows](#73-run-tdengine-on-windows) +6. [Testing](#8-testing) +7. [Releasing](#9-releasing) +8. [CI/CD](#10-cicd) +9. [Coverage](#11-coverage) +10. [Contributing](#12-contributing) # 1. Introduction @@ -65,7 +78,11 @@ For user manual, system design and architecture, please refer to [TDengine Docum ## 3.1 On Linux -### For Ubuntu 18.04 or Later +
+ +Install required tools on Linux + +### For Ubuntu 18.04、20.04、22.04 ```bash sudo apt-get udpate @@ -82,36 +99,38 @@ yum config-manager --set-enabled powertools yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static ``` -### For Fedora or Rocky Linux - -```bash -sudo dnf install -y gcc gcc-c++ make cmake git perl -sudo dnf install -y zlib-devel xz-devel snappy-devel jansson-devel pkgconfig -``` - -To build the [taosTools](https://github.com/taosdata/taos-tools) on Fedora or Rocky, the following packages need to be installed. - -```bash -sudo dnf install -y dnf-plugins-core -sudo dnf config-manager --set-enabled powertools -sudo dnf install -y zlib-devel zlib-static xz-devel snappy-devel jansson \ - jansson-devel pkgconfig libatomic libatomic-static libstdc++-static -``` +
## 3.2 On macOS +
+ +Install required tools on macOS + Please intall the dependencies with [brew](https://brew.sh/). ```bash brew install argp-standalone gflags pkgconfig ``` +
+ ## 3.3 On Windows +
+ +Install required tools on Windows + Work in Progress. +
+ ## 3.4 Clone the repo +
+ +Clone the repo + Clone the repository to the target machine: ```bash @@ -122,6 +141,8 @@ cd TDengine > [!NOTE] > TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust). +
+ # 4. Building At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. @@ -307,29 +328,7 @@ Option `-c test/cfg` specifies the system configuration file directory. -## 7.2 Run TDengine on Windows - -
- -Detailed steps to run on windows - -You can start TDengine server on Windows platform with below commands: - -```cmd -.\build\bin\taosd.exe -c test\cfg -``` - -In another terminal, use the TDengine CLI to connect the server: - -```cmd -.\build\bin\taos.exe -c test\cfg -``` - -option "-c test/cfg" specifies the system configuration file directory. - -
- -## 7.3 Run TDengine on macOS +## 7.2 Run TDengine on macOS
@@ -351,6 +350,29 @@ If TDengine CLI connects the server successfully, welcome messages and version i
+ +## 7.3 Run TDengine on Windows + +
+ +Detailed steps to run on windows + +You can start TDengine server on Windows platform with below commands: + +```cmd +.\build\bin\taosd.exe -c test\cfg +``` + +In another terminal, use the TDengine CLI to connect the server: + +```cmd +.\build\bin\taos.exe -c test\cfg +``` + +option "-c test/cfg" specifies the system configuration file directory. + +
+ # 8. Testing For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md). @@ -359,14 +381,9 @@ For how to run different types of tests on TDengine, please see [Testing TDengin For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases). -# 10. CI/CD +# 10. Workflow -Now, Jenkins is mainly used to build CI/CD pipeline for TDengine. To run the tests in the CI/CD pipeline, please run following commands: - -```bash -cd tests -./run_all_ci_cases.sh -b main # on main branch -``` +Now, Jenkins is mainly used to build CI/CD pipeline for TDengine. TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). diff --git a/tests/README.md b/tests/README.md index da70c9078d..3333ae2489 100644 --- a/tests/README.md +++ b/tests/README.md @@ -8,12 +8,15 @@ 1. [Smoke Test](#33-smoke-test) 1. [Legacy Test](#34-legacy-test) 1. [Chaos Test](#35-chaos-test) - 1. [TSBS Test](#36-tsbs-test) + 1. [CI Test](#36-ci-test) # 1. Introduction This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. The document is divided into three main sections: introduction, prerequisites and testing guide. +> [!NOTICE] +> The below commands and test scripts are verified on linux (Ubuntu 18.04、20.04、22.04) locally. + # 2. Prerequisites - Install Python3 @@ -126,39 +129,19 @@ cd tests/script [Placeholder] -## 3.6. TSBS Test +## 3.6 CI Test -[Time Series Benchmark Suite (TSBS)](https://github.com/timescale/tsbs) is an open-source performance benchmarking platform specifically designed for time-series data processing systems, such as databases. It provides a standardized approach to evaluating the performance of various databases by simulating typical use cases such as IoT and DevOps. +[Desciprtion] ### How to run tests? -1. Clone the code and run the tests locally on your machine. Ensure that your virtual machine supports the AVX instruction set: +To run the CI tests, please run following commands: + ```bash - cd /usr/local/src && git clone https://github.com/taosdata/tsbs-internal.git tsbs && \ - cd tsbs && git checkout enh/chr-td-33357 && \ - cd scripts/tsdbComp && ./testTsbs.sh +cd tests +./run_all_ci_cases.sh -b main # on main branch ``` -> [!NOTE] -> The default case type, CPU test, takes a long time to run. -> -> If you want to run quick test to verify the test environment, please set `caseType=cputest` in test.ini . +### How to add new cases? -2. When testing the client and server on separate machines, you should set up your environment as outlined in the steps below: - - 2.1. Modify IP and host of client and server in `test.ini` - ```ini - clientIP="192.168.0.203" # client ip - clientHost="trd03" # client hostname - serverIP="192.168.0.204" # server ip - serverHost="trd04" # server hostname - ``` - 2.2. Set up passwordless login between the client and server; otherwise, you'll need to configure the server password: - ```ini - serverPass="taosdata123" # server root password - ``` - 2.3. Run the following command to start the test: - ```bash - ./testTsbs.sh - ``` -3. When the test is done, the result can be found in `/data2/` directory, which can also be configured in `test.ini`. +[Placeholder] From 250f410bbbc5d3ec340fc98114007a369dc9b1aa Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 16 Jan 2025 15:08:26 +0800 Subject: [PATCH 050/120] enh: remove stt_trigger limitation of community version. --- docs/en/14-reference/03-taos-sql/02-database.md | 4 ---- docs/zh/14-reference/03-taos-sql/02-database.md | 4 ---- include/util/tdef.h | 10 +++++----- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/docs/en/14-reference/03-taos-sql/02-database.md b/docs/en/14-reference/03-taos-sql/02-database.md index 54548fe297..6a46620418 100644 --- a/docs/en/14-reference/03-taos-sql/02-database.md +++ b/docs/en/14-reference/03-taos-sql/02-database.md @@ -144,10 +144,6 @@ You can view cacheload through show \.vgroups; If cacheload is very close to cachesize, then cachesize may be too small. If cacheload is significantly less than cachesize, then cachesize is sufficient. You can decide whether to modify cachesize based on this principle. The specific modification value can be determined based on the available system memory, whether to double it or increase it several times. -4. stt_trigger - -Please stop database writing before modifying the stt_trigger parameter. - :::note Other parameters are not supported for modification in version 3.0.0.0 diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 32df6c60c1..4e64c1257b 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -146,10 +146,6 @@ alter_database_option: { 如果 cacheload 非常接近 cachesize,则 cachesize 可能过小。 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。可以根据这个原则判断是否需要修改 cachesize 。具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。 -4. stt_trigger - -在修改 stt_trigger 参数之前请先停止数据库写入。 - :::note 其它参数在 3.0.0.0 中暂不支持修改 diff --git a/include/util/tdef.h b/include/util/tdef.h index e7f40f1092..0cfc7ab591 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -456,13 +456,13 @@ typedef enum ELogicConditionType { #define TSDB_DB_SCHEMALESS_OFF 0 #define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF #define TSDB_MIN_STT_TRIGGER 1 -#ifdef TD_ENTERPRISE +// #ifdef TD_ENTERPRISE #define TSDB_MAX_STT_TRIGGER 16 #define TSDB_DEFAULT_SST_TRIGGER 2 -#else -#define TSDB_MAX_STT_TRIGGER 1 -#define TSDB_DEFAULT_SST_TRIGGER 1 -#endif +// #else +// #define TSDB_MAX_STT_TRIGGER 1 +// #define TSDB_DEFAULT_SST_TRIGGER 1 +// #endif #define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY #define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN) #define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2) From c7337fe4a34d5ac1d7237b0e47162562bfe35fe2 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 15:10:47 +0800 Subject: [PATCH 051/120] test:add test type --- tests/run_all_ci_cases.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh index 41040f3c43..43127f75e2 100755 --- a/tests/run_all_ci_cases.sh +++ b/tests/run_all_ci_cases.sh @@ -23,16 +23,20 @@ function printHelp() { echo " -b [Build test branch] Build test branch (default: null)" echo " Options: " echo " e.g., -b main (pull main branch, build and install)" + echo " -t [Run test cases] Run test cases type(default: all)" + echo " Options: " + echo " e.g., -t all/python/legacy" echo " -s [Save cases log] Save cases log(default: notsave)" echo " Options:" - echo " e.g., -c notsave : do not save the log " - echo " -c save : default save ci case log in Project dir/tests/ci_bak" + echo " e.g., -s notsave : do not save the log " + echo " -s save : default save ci case log in Project dir/tests/ci_bak" exit 0 } # Initialization parameter PROJECT_DIR="" BRANCH="" +TEST_TYPE="all" SAVE_LOG="notsave" # Parse command line parameters @@ -44,6 +48,9 @@ while getopts "hb:d:s:" arg; do b) BRANCH=$OPTARG ;; + t) + TEST_TYPE=$OPTARG + ;; s) SAVE_LOG=$OPTARG ;; @@ -315,9 +322,9 @@ function runTest() { [ -d sim ] && rm -rf sim [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT - runUnitTest runSimCases runPythonCases + runUnitTest stopTaosd cd $TDENGINE_DIR/tests/script @@ -361,7 +368,13 @@ print_color "$GREEN" "Run all ci test cases" | tee -a $WORK_DIR/date.log stopTaosd -runTest +if [ -z "$TEST_TYPE" ]; then + runTest +elif [ "$TEST_TYPE" = "python" -o "$TEST_TYPE" = "PYTHON"]; then + runPythonCases +elif [ "$TEST_TYPE" = "legacy" -o "$TEST_TYPE" = "LEGACY"]; then + runSimCases +fi date >> $WORK_DIR/date.log print_color "$GREEN" "End of ci test cases" | tee -a $WORK_DIR/date.log \ No newline at end of file From 130e8a3e068d3037d3f38b885e898e1584556a14 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 15:14:29 +0800 Subject: [PATCH 052/120] test:add test type --- tests/run_all_ci_cases.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh index 43127f75e2..1edc279e57 100755 --- a/tests/run_all_ci_cases.sh +++ b/tests/run_all_ci_cases.sh @@ -40,7 +40,7 @@ TEST_TYPE="all" SAVE_LOG="notsave" # Parse command line parameters -while getopts "hb:d:s:" arg; do +while getopts "hb:d:t:s:" arg; do case $arg in d) PROJECT_DIR=$OPTARG From a5d327df412987b5f56f724b0da229d1f451e279 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 15:17:03 +0800 Subject: [PATCH 053/120] remove CICD cotents of TDengine README and update testing sructure of testing guide --- README.md | 2 -- tests/README.md | 96 +++++++++++++++++++++++++------------------------ 2 files changed, 50 insertions(+), 48 deletions(-) diff --git a/README.md b/README.md index 495c28c2c3..baf6097c30 100644 --- a/README.md +++ b/README.md @@ -383,8 +383,6 @@ For the complete list of TDengine Releases, please see [Releases](https://github # 10. Workflow -Now, Jenkins is mainly used to build CI/CD pipeline for TDengine. - TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). # 11. Coverage diff --git a/tests/README.md b/tests/README.md index 3333ae2489..dcec10eb7a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -3,18 +3,19 @@ 1. [Introduction](#1-introduction) 1. [Prerequisites](#2-prerequisites) 1. [Testing Guide](#3-testing-guide) - 1. [Unit Test](#31-unit-test) - 1. [System Test](#32-system-test) - 1. [Smoke Test](#33-smoke-test) - 1. [Legacy Test](#34-legacy-test) - 1. [Chaos Test](#35-chaos-test) - 1. [CI Test](#36-ci-test) + - [3.1 CI Test](#31-ci-test) + - [3.1.1 Unit Test](#311-unit-test) + - [3.1.2 System Test](#312-system-test) + - [3.1.3 Legacy Test](#313-legacy-test) + - [3.2 Smoke Test](#32-smoke-test) + - [3.3 Chaos Test](#33-chaos-test) + # 1. Introduction This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. The document is divided into three main sections: introduction, prerequisites and testing guide. -> [!NOTICE] +> [!NOTE] > The below commands and test scripts are verified on linux (Ubuntu 18.04、20.04、22.04) locally. # 2. Prerequisites @@ -53,25 +54,44 @@ make && make install In `tests` directory, there are different types of tests for TDengine. Below is a brief introduction about how to run them and how to add new cases. -## 3.1. Unit Test + +## 3.1 CI Test + +[Desciprtion] + +### How to run tests? + +To run the CI tests, please run following commands: + +```bash +cd tests +./run_all_ci_cases.sh -b main # on main branch +``` + +### How to add new cases? + +[Placeholder] + + +### 3.1.1 Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine. -### How to run tests? +#### How to run tests? ```bash cd tests/unit-test/ bash test.sh -e 0 ``` -### How to add new cases? +#### How to add new cases? Copy from the old version, need updates: You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI. -## 3.2. System Test +## 3.1.2 System Test -Python test script includes almost all of the functions of TDengine, so some test case maybe fail cause the function only +Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only work for TDengine Enterprise Edition. ### How to run tests? @@ -85,22 +105,7 @@ python3 ./test.py -f 2-query/floor.py [Placeholder] -## 3.3. Smoke Test - -Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine. - -### How to run tests? - -```bash -cd /root/TDengine/packaging/smokeTest -./test_smoking_selfhost.sh -``` - -### How to add new cases? - -[Placeholder] - -## 3.4. Legacy Test +## 3.1.3 Legacy Test In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. @@ -117,31 +122,30 @@ cd tests/script [Placeholder] -## 3.5. Chaos Test -[Desciprtion] +## 3.2 Smoke Test + +Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine. ### How to run tests? -[Placeholder] - -### How to add new cases? - -[Placeholder] - -## 3.6 CI Test - -[Desciprtion] - -### How to run tests? - -To run the CI tests, please run following commands: - ```bash -cd tests -./run_all_ci_cases.sh -b main # on main branch +cd /root/TDengine/packaging/smokeTest +./test_smoking_selfhost.sh ``` ### How to add new cases? [Placeholder] + +## 3.3 Chaos Test + +[Desciprtion] + +### How to run tests? + +[Placeholder] + +### How to add new cases? + +[Placeholder] From 11d6768ba5af60bd736a356ee0f6be3e4f6f61c2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 16 Jan 2025 15:19:47 +0800 Subject: [PATCH 054/120] Update 01-taosd.md --- docs/zh/14-reference/01-components/01-taosd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index 4c3350df7c..0b7189897c 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -189,7 +189,7 @@ charset 的有效值是 UTF-8。 |numOfQnodeQueryThreads | |支持动态修改 重启生效 |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)| |numOfSnodeSharedThreads | |支持动态修改 重启生效 |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| |numOfSnodeUniqueThreads | |支持动态修改 重启生效 |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| -|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4| +|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 0.5| |ttlUnit | |不支持动态修改 |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400| |ttlPushInterval | |支持动态修改 立即生效 |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10| |ttlChangeOnWrite | |支持动态修改 立即生效 |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0| From ac10ed5af5c317f4538516c74d14c9a53ee73819 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 15:25:06 +0800 Subject: [PATCH 055/120] test:add test type --- tests/run_all_ci_cases.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh index 1edc279e57..b8e68b5907 100755 --- a/tests/run_all_ci_cases.sh +++ b/tests/run_all_ci_cases.sh @@ -370,9 +370,9 @@ stopTaosd if [ -z "$TEST_TYPE" ]; then runTest -elif [ "$TEST_TYPE" = "python" -o "$TEST_TYPE" = "PYTHON"]; then +elif [ "$TEST_TYPE" = "python" -o "$TEST_TYPE" = "PYTHON" ]; then runPythonCases -elif [ "$TEST_TYPE" = "legacy" -o "$TEST_TYPE" = "LEGACY"]; then +elif [ "$TEST_TYPE" = "legacy" -o "$TEST_TYPE" = "LEGACY" ]; then runSimCases fi From 297f7bb5d5b54bcab14614a6ef4bec6f432c78ba Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Thu, 16 Jan 2025 15:33:08 +0800 Subject: [PATCH 056/120] fix(query)[TS-5907]: skip some decompressTest sub-cases on machines without AVX2 support - Check CPU instruction set before running AVX2 tests - Skip tests if AVX2 is not supported to avoid errors --- source/util/test/CMakeLists.txt | 4 ---- source/util/test/decompressTest.cpp | 18 ++++++------------ 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index ec05a4e415..768e465fea 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -142,10 +142,6 @@ target_include_directories( PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -IF(COMPILER_SUPPORT_AVX2) - MESSAGE(STATUS "AVX2 instructions is ACTIVATED") - set_source_files_properties(decompressTest.cpp PROPERTIES COMPILE_FLAGS -mavx2) -ENDIF() add_executable(decompressTest "decompressTest.cpp") target_link_libraries(decompressTest os util common gtest_main) add_test( diff --git a/source/util/test/decompressTest.cpp b/source/util/test/decompressTest.cpp index e508c489df..b1f7f7e85c 100644 --- a/source/util/test/decompressTest.cpp +++ b/source/util/test/decompressTest.cpp @@ -524,23 +524,20 @@ static void decompressBasicTest(size_t dataSize, const CompF& compress, const De decltype(origData) decompData(origData.size()); // test simple implementation without SIMD instructions - tsSIMDEnable = 0; + tsAVX2Supported = 0; cnt = decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), ONE_STAGE_COMP, nullptr, 0); ASSERT_EQ(cnt, compData.size() - 1); EXPECT_EQ(origData, decompData); -#ifdef __AVX2__ - if (DataTypeSupportAvx::value) { + taosGetSystemInfo(); + if (DataTypeSupportAvx::value && tsAVX2Supported) { // test AVX2 implementation - tsSIMDEnable = 1; - tsAVX2Supported = 1; cnt = decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), ONE_STAGE_COMP, nullptr, 0); ASSERT_EQ(cnt, compData.size() - 1); EXPECT_EQ(origData, decompData); } -#endif } template @@ -557,7 +554,7 @@ static void decompressPerfTest(const char* typname, const CompF& compress, const << "; Compression ratio: " << 1.0 * (compData.size() - 1) / cnt << "\n"; decltype(origData) decompData(origData.size()); - tsSIMDEnable = 0; + tsAVX2Supported = 0; auto ms = measureRunTime( [&]() { decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), @@ -567,10 +564,8 @@ static void decompressPerfTest(const char* typname, const CompF& compress, const std::cout << "Decompression of " << NROUND * DATA_SIZE << " " << typname << " without SIMD costs " << ms << " ms, avg speed: " << NROUND * DATA_SIZE * 1000 / ms << " tuples/s\n"; -#ifdef __AVX2__ - if (DataTypeSupportAvx::value) { - tsSIMDEnable = 1; - tsAVX2Supported = 1; + taosGetSystemInfo(); + if (DataTypeSupportAvx::value && tsAVX2Supported) { ms = measureRunTime( [&]() { decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), @@ -580,7 +575,6 @@ static void decompressPerfTest(const char* typname, const CompF& compress, const std::cout << "Decompression of " << NROUND * DATA_SIZE << " " << typname << " using AVX2 costs " << ms << " ms, avg speed: " << NROUND * DATA_SIZE * 1000 / ms << " tuples/s\n"; } -#endif } #define RUN_PERF_TEST(typname, comp, decomp, min, max) \ From b2ac2d51c59e5bbbad8bbce040236231b4ced5de Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 15:37:57 +0800 Subject: [PATCH 057/120] test:add test type --- tests/run_all_ci_cases.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh index b8e68b5907..1374fc1431 100755 --- a/tests/run_all_ci_cases.sh +++ b/tests/run_all_ci_cases.sh @@ -36,7 +36,7 @@ function printHelp() { # Initialization parameter PROJECT_DIR="" BRANCH="" -TEST_TYPE="all" +TEST_TYPE="" SAVE_LOG="notsave" # Parse command line parameters @@ -368,7 +368,7 @@ print_color "$GREEN" "Run all ci test cases" | tee -a $WORK_DIR/date.log stopTaosd -if [ -z "$TEST_TYPE" ]; then +if [ -z "$TEST_TYPE" -o "$TEST_TYPE" = "all" -o "$TEST_TYPE" = "ALL" ]; then runTest elif [ "$TEST_TYPE" = "python" -o "$TEST_TYPE" = "PYTHON" ]; then runPythonCases From d92870e6818749c3df90f30cbd8c5331334ccaa7 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 16:01:17 +0800 Subject: [PATCH 058/120] test:add test type --- tests/README.md | 45 +++++++++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/tests/README.md b/tests/README.md index dcec10eb7a..225bd2797d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -61,7 +61,7 @@ In `tests` directory, there are different types of tests for TDengine. Below is ### How to run tests? -To run the CI tests, please run following commands: +If this is the first time to run all the CI tests, it is recommended to add the test branch, please run like following commands: ```bash cd tests @@ -70,14 +70,21 @@ cd tests ### How to add new cases? -[Placeholder] +[You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI.] ### 3.1.1 Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine. -#### How to run tests? +### How to run single test case? + +```bash +cd debug/build/bin +./osTimeTests +``` + +### How to run all unit test cases? ```bash cd tests/unit-test/ @@ -87,21 +94,28 @@ bash test.sh -e 0 #### How to add new cases? Copy from the old version, need updates: -You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI. + ## 3.1.2 System Test Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only work for TDengine Enterprise Edition. -### How to run tests? +### How to run single test case? ```bash cd tests/system-test python3 ./test.py -f 2-query/floor.py ``` -### How to add new cases? +### How to run all system test cases? + +```bash +cd tests +./run_all_ci_cases.sh -t python # all python cases +``` + +### How to add new case? [Placeholder] @@ -109,7 +123,7 @@ python3 ./test.py -f 2-query/floor.py In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. -### How to run tests? +### How to run single test case? To run the legacy test cases, please execute the following commands: @@ -118,7 +132,14 @@ cd tests/script ./test.sh -f tsim/db/basic1.sim ``` -### How to add new cases? +### How to run all legacy test cases? + +```bash +cd tests +./run_all_ci_cases.sh -t legacy # all legacy cases +``` + +### How to add new case? [Placeholder] @@ -127,14 +148,14 @@ cd tests/script Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine. -### How to run tests? +### How to run test? ```bash cd /root/TDengine/packaging/smokeTest ./test_smoking_selfhost.sh ``` -### How to add new cases? +### How to add new case? [Placeholder] @@ -142,10 +163,10 @@ cd /root/TDengine/packaging/smokeTest [Desciprtion] -### How to run tests? +### How to run test? [Placeholder] -### How to add new cases? +### How to add new case? [Placeholder] From 4c852e29b44ba5b8f92e607a0312126f27a7ddfa Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Jan 2025 16:05:01 +0800 Subject: [PATCH 059/120] gitignore: ignore contrib/xml2-cmake directory --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 07003bda4c..242433adb1 100644 --- a/.gitignore +++ b/.gitignore @@ -123,6 +123,7 @@ contrib/* !contrib/CMakeLists.txt !contrib/test !contrib/azure-cmake +!contrib/xml2-cmake sql debug*/ .env From 9cd1113c93d06ce22d7d00c718bb7ef1db60dc71 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 16:11:49 +0800 Subject: [PATCH 060/120] update TDengine README workflow link --- README.md | 18 +++++++++--------- tests/README.md | 2 ++ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index baf6097c30..91232136c5 100644 --- a/README.md +++ b/README.md @@ -33,24 +33,24 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine - [3.2 Prerequisites On macOS](#32-on-macos) - [3.3 Prerequisites On Windows](#33-on-windows) - [3.4 Clone the repo](#34-clone-the-repo) -2. [Building](#4-building) +1. [Building](#4-building) - [4.1 Build on Linux](#41-build-on-linux) - [4.2 Build on macOS](#42-build-on-macos) - [4.3 Build On Windows](#43-build-on-windows) -3. [Packaging](#5-packaging) -4. [Installation](#6-installation) +1. [Packaging](#5-packaging) +1. [Installation](#6-installation) - [6.1 Install on Linux](#61-install-on-linux) - [6.2 Install on macOS](#62-install-on-macos) - [6.3 Install on Windows](#63-install-on-windows) -5. [Running](#7-running) +1. [Running](#7-running) - [7.1 Run TDengine on Linux](#71-run-tdengine-on-linux) - [7.2 Run TDengine on macOS](#72-run-tdengine-on-macos) - [7.3 Run TDengine on Windows](#73-run-tdengine-on-windows) -6. [Testing](#8-testing) -7. [Releasing](#9-releasing) -8. [CI/CD](#10-cicd) -9. [Coverage](#11-coverage) -10. [Contributing](#12-contributing) +1. [Testing](#8-testing) +1. [Releasing](#9-releasing) +1. [Workflow](#10-workflow) +1. [Coverage](#11-coverage) +1. [Contributing](#12-contributing) # 1. Introduction diff --git a/tests/README.md b/tests/README.md index 225bd2797d..e51a6a0e49 100644 --- a/tests/README.md +++ b/tests/README.md @@ -143,6 +143,8 @@ cd tests [Placeholder] +> [!NOTE] +> TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test. ## 3.2 Smoke Test From 625f338d77ac1c3036f043fc50b5c002b5158adc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Jan 2025 16:13:36 +0800 Subject: [PATCH 061/120] gitignore: fix ignore patterns --- contrib/xml2-cmake/CMakeLists.txt | 59 +++ .../xml2-cmake/linux_x86_64/include/config.h | 285 ++++++++++ .../linux_x86_64/include/libxml/xmlversion.h | 501 ++++++++++++++++++ 3 files changed, 845 insertions(+) create mode 100644 contrib/xml2-cmake/CMakeLists.txt create mode 100644 contrib/xml2-cmake/linux_x86_64/include/config.h create mode 100644 contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h diff --git a/contrib/xml2-cmake/CMakeLists.txt b/contrib/xml2-cmake/CMakeLists.txt new file mode 100644 index 0000000000..30b00160ed --- /dev/null +++ b/contrib/xml2-cmake/CMakeLists.txt @@ -0,0 +1,59 @@ +#set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2") +set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/xml2") + +set(SRCS + "${LIBXML2_SOURCE_DIR}/SAX.c" + "${LIBXML2_SOURCE_DIR}/entities.c" + "${LIBXML2_SOURCE_DIR}/encoding.c" + "${LIBXML2_SOURCE_DIR}/error.c" + "${LIBXML2_SOURCE_DIR}/parserInternals.c" + "${LIBXML2_SOURCE_DIR}/parser.c" + "${LIBXML2_SOURCE_DIR}/tree.c" + "${LIBXML2_SOURCE_DIR}/hash.c" + "${LIBXML2_SOURCE_DIR}/list.c" + "${LIBXML2_SOURCE_DIR}/xmlIO.c" + "${LIBXML2_SOURCE_DIR}/xmlmemory.c" + "${LIBXML2_SOURCE_DIR}/uri.c" + "${LIBXML2_SOURCE_DIR}/valid.c" + "${LIBXML2_SOURCE_DIR}/xlink.c" + "${LIBXML2_SOURCE_DIR}/HTMLparser.c" + "${LIBXML2_SOURCE_DIR}/HTMLtree.c" + "${LIBXML2_SOURCE_DIR}/debugXML.c" + "${LIBXML2_SOURCE_DIR}/xpath.c" + "${LIBXML2_SOURCE_DIR}/xpointer.c" + "${LIBXML2_SOURCE_DIR}/xinclude.c" + "${LIBXML2_SOURCE_DIR}/nanohttp.c" + "${LIBXML2_SOURCE_DIR}/nanoftp.c" + "${LIBXML2_SOURCE_DIR}/catalog.c" + "${LIBXML2_SOURCE_DIR}/globals.c" + "${LIBXML2_SOURCE_DIR}/threads.c" + "${LIBXML2_SOURCE_DIR}/c14n.c" + "${LIBXML2_SOURCE_DIR}/xmlstring.c" + "${LIBXML2_SOURCE_DIR}/buf.c" + "${LIBXML2_SOURCE_DIR}/xmlregexp.c" + "${LIBXML2_SOURCE_DIR}/xmlschemas.c" + "${LIBXML2_SOURCE_DIR}/xmlschemastypes.c" + "${LIBXML2_SOURCE_DIR}/xmlunicode.c" + "${LIBXML2_SOURCE_DIR}/triostr.c" + "${LIBXML2_SOURCE_DIR}/xmlreader.c" + "${LIBXML2_SOURCE_DIR}/relaxng.c" + "${LIBXML2_SOURCE_DIR}/dict.c" + "${LIBXML2_SOURCE_DIR}/SAX2.c" + "${LIBXML2_SOURCE_DIR}/xmlwriter.c" + "${LIBXML2_SOURCE_DIR}/legacy.c" + "${LIBXML2_SOURCE_DIR}/chvalid.c" + "${LIBXML2_SOURCE_DIR}/pattern.c" + "${LIBXML2_SOURCE_DIR}/xmlsave.c" + "${LIBXML2_SOURCE_DIR}/xmlmodule.c" + "${LIBXML2_SOURCE_DIR}/schematron.c" + "${LIBXML2_SOURCE_DIR}/xzlib.c" +) +add_library(_libxml2 ${SRCS}) + +#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib) +target_link_libraries(_libxml2 PRIVATE zlib) + +target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include") +target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include") + +add_library(td_contrib::libxml2 ALIAS _libxml2) diff --git a/contrib/xml2-cmake/linux_x86_64/include/config.h b/contrib/xml2-cmake/linux_x86_64/include/config.h new file mode 100644 index 0000000000..7969b377dc --- /dev/null +++ b/contrib/xml2-cmake/linux_x86_64/include/config.h @@ -0,0 +1,285 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Type cast for the gethostbyname() argument */ +#define GETHOSTBYNAME_ARG_CAST /**/ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Whether struct sockaddr::__ss_family exists */ +/* #undef HAVE_BROKEN_SS_FAMILY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_CTYPE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Have dlopen based dso */ +#define HAVE_DLOPEN /**/ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FLOAT_H 1 + +/* Define to 1 if you have the `fprintf' function. */ +#define HAVE_FPRINTF 1 + +/* Define to 1 if you have the `ftime' function. */ +#define HAVE_FTIME 1 + +/* Define if getaddrinfo is there */ +#define HAVE_GETADDRINFO /**/ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `isascii' function. */ +#define HAVE_ISASCII 1 + +/* Define if isinf is there */ +#define HAVE_ISINF /**/ + +/* Define if isnan is there */ +#define HAVE_ISNAN /**/ + +/* Define if history library is there (-lhistory) */ +/* #undef HAVE_LIBHISTORY */ + +/* Define if pthread library is there (-lpthread) */ +#define HAVE_LIBPTHREAD /**/ + +/* Define if readline library is there (-lreadline) */ +/* #undef HAVE_LIBREADLINE */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the `localtime' function. */ +#define HAVE_LOCALTIME 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LZMA_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MATH_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define to 1 if you have the `munmap' function. */ +#define HAVE_MUNMAP 1 + +/* mmap() is no good without munmap() */ +#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP) +# undef /**/ HAVE_MMAP +#endif + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETDB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_IN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the `printf' function. */ +#define HAVE_PRINTF 1 + +/* Define if is there */ +#define HAVE_PTHREAD_H /**/ + +/* Define to 1 if you have the `putenv' function. */ +#define HAVE_PUTENV 1 + +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + +/* Define to 1 if you have the `rand_r' function. */ +#define HAVE_RAND_R 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_RESOLV_H 1 + +/* Have shl_load based dso */ +/* #undef HAVE_SHLLOAD */ + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SIGNAL_H 1 + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* Define to 1 if you have the `sprintf' function. */ +#define HAVE_SPRINTF 1 + +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if you have the `sscanf' function. */ +#define HAVE_SSCANF 1 + +/* Define to 1 if you have the `stat' function. */ +#define HAVE_STAT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIMEB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the `time' function. */ +#define HAVE_TIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Whether va_copy() is available */ +#define HAVE_VA_COPY 1 + +/* Define to 1 if you have the `vfprintf' function. */ +#define HAVE_VFPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `vsprintf' function. */ +#define HAVE_VSPRINTF 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_ZLIB_H */ + +/* Whether __va_copy() is available */ +/* #undef HAVE___VA_COPY */ + +/* Define as const if the declaration of iconv() needs const. */ +#define ICONV_CONST + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libxml2" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* Type cast for the send() function 2nd arg */ +#define SEND_ARG2_CAST /**/ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Support for IPv6 */ +#define SUPPORT_IP6 /**/ + +/* Define if va_list is an array type */ +#define VA_LIST_IS_ARRAY 1 + +/* Version number of package */ +#define VERSION "2.9.8" + +/* Determine what socket length (socklen_t) data type is */ +#define XML_SOCKLEN_T socklen_t + +/* Define for Solaris 2.5.1 so the uint32_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +/* #undef _UINT32_T */ + +/* ss_family is not defined here, use __ss_family instead */ +/* #undef ss_family */ + +/* Define to the type of an unsigned integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint32_t */ diff --git a/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h b/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h new file mode 100644 index 0000000000..c2faeb47cb --- /dev/null +++ b/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h @@ -0,0 +1,501 @@ +/* + * Summary: compile-time version information + * Description: compile-time version information for the XML library + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_VERSION_H__ +#define __XML_VERSION_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * use those to be sure nothing nasty will happen if + * your library and includes mismatch + */ +#ifndef LIBXML2_COMPILING_MSCCDEF +XMLPUBFUN void XMLCALL xmlCheckVersion(int version); +#endif /* LIBXML2_COMPILING_MSCCDEF */ + +/** + * LIBXML_DOTTED_VERSION: + * + * the version string like "1.2.3" + */ +#define LIBXML_DOTTED_VERSION "2.10.3" + +/** + * LIBXML_VERSION: + * + * the version number: 1.2.3 value is 10203 + */ +#define LIBXML_VERSION 21003 + +/** + * LIBXML_VERSION_STRING: + * + * the version number string, 1.2.3 value is "10203" + */ +#define LIBXML_VERSION_STRING "21003" + +/** + * LIBXML_VERSION_EXTRA: + * + * extra version information, used to show a git commit description + */ +#define LIBXML_VERSION_EXTRA "" + +/** + * LIBXML_TEST_VERSION: + * + * Macro to check that the libxml version in use is compatible with + * the version the software has been compiled against + */ +#define LIBXML_TEST_VERSION xmlCheckVersion(21003); + +#ifndef VMS +#if 0 +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO +#else +/** + * WITHOUT_TRIO: + * + * defined if the trio support should not be configured in + */ +#define WITHOUT_TRIO +#endif +#else /* VMS */ +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO 1 +#endif /* VMS */ + +/** + * LIBXML_THREAD_ENABLED: + * + * Whether the thread support is configured in + */ +#if 1 +#define LIBXML_THREAD_ENABLED +#endif + +/** + * LIBXML_THREAD_ALLOC_ENABLED: + * + * Whether the allocation hooks are per-thread + */ +#if 0 +#define LIBXML_THREAD_ALLOC_ENABLED +#endif + +/** + * LIBXML_TREE_ENABLED: + * + * Whether the DOM like tree manipulation API support is configured in + */ +#if 1 +#define LIBXML_TREE_ENABLED +#endif + +/** + * LIBXML_OUTPUT_ENABLED: + * + * Whether the serialization/saving support is configured in + */ +#if 1 +#define LIBXML_OUTPUT_ENABLED +#endif + +/** + * LIBXML_PUSH_ENABLED: + * + * Whether the push parsing interfaces are configured in + */ +#if 1 +#define LIBXML_PUSH_ENABLED +#endif + +/** + * LIBXML_READER_ENABLED: + * + * Whether the xmlReader parsing interface is configured in + */ +#if 1 +#define LIBXML_READER_ENABLED +#endif + +/** + * LIBXML_PATTERN_ENABLED: + * + * Whether the xmlPattern node selection interface is configured in + */ +#if 1 +#define LIBXML_PATTERN_ENABLED +#endif + +/** + * LIBXML_WRITER_ENABLED: + * + * Whether the xmlWriter saving interface is configured in + */ +#if 1 +#define LIBXML_WRITER_ENABLED +#endif + +/** + * LIBXML_SAX1_ENABLED: + * + * Whether the older SAX1 interface is configured in + */ +#if 1 +#define LIBXML_SAX1_ENABLED +#endif + +/** + * LIBXML_FTP_ENABLED: + * + * Whether the FTP support is configured in + */ +#if 0 +#define LIBXML_FTP_ENABLED +#endif + +/** + * LIBXML_HTTP_ENABLED: + * + * Whether the HTTP support is configured in + */ +#if 1 +#define LIBXML_HTTP_ENABLED +#endif + +/** + * LIBXML_VALID_ENABLED: + * + * Whether the DTD validation support is configured in + */ +#if 1 +#define LIBXML_VALID_ENABLED +#endif + +/** + * LIBXML_HTML_ENABLED: + * + * Whether the HTML support is configured in + */ +#if 1 +#define LIBXML_HTML_ENABLED +#endif + +/** + * LIBXML_LEGACY_ENABLED: + * + * Whether the deprecated APIs are compiled in for compatibility + */ +#if 0 +#define LIBXML_LEGACY_ENABLED +#endif + +/** + * LIBXML_C14N_ENABLED: + * + * Whether the Canonicalization support is configured in + */ +#if 1 +#define LIBXML_C14N_ENABLED +#endif + +/** + * LIBXML_CATALOG_ENABLED: + * + * Whether the Catalog support is configured in + */ +#if 1 +#define LIBXML_CATALOG_ENABLED +#endif + +/** + * LIBXML_XPATH_ENABLED: + * + * Whether XPath is configured in + */ +#if 1 +#define LIBXML_XPATH_ENABLED +#endif + +/** + * LIBXML_XPTR_ENABLED: + * + * Whether XPointer is configured in + */ +#if 1 +#define LIBXML_XPTR_ENABLED +#endif + +/** + * LIBXML_XPTR_LOCS_ENABLED: + * + * Whether support for XPointer locations is configured in + */ +#if 0 +#define LIBXML_XPTR_LOCS_ENABLED +#endif + +/** + * LIBXML_XINCLUDE_ENABLED: + * + * Whether XInclude is configured in + */ +#if 1 +#define LIBXML_XINCLUDE_ENABLED +#endif + +/** + * LIBXML_ICONV_ENABLED: + * + * Whether iconv support is available + */ +#if 0 +#define LIBXML_ICONV_ENABLED +#endif + +/** + * LIBXML_ICU_ENABLED: + * + * Whether icu support is available + */ +#if 0 +#define LIBXML_ICU_ENABLED +#endif + +/** + * LIBXML_ISO8859X_ENABLED: + * + * Whether ISO-8859-* support is made available in case iconv is not + */ +#if 1 +#define LIBXML_ISO8859X_ENABLED +#endif + +/** + * LIBXML_DEBUG_ENABLED: + * + * Whether Debugging module is configured in + */ +#if 1 +#define LIBXML_DEBUG_ENABLED +#endif + +/** + * DEBUG_MEMORY_LOCATION: + * + * Whether the memory debugging is configured in + */ +#if 0 +#define DEBUG_MEMORY_LOCATION +#endif + +/** + * LIBXML_DEBUG_RUNTIME: + * + * Whether the runtime debugging is configured in + */ +#if 0 +#define LIBXML_DEBUG_RUNTIME +#endif + +/** + * LIBXML_UNICODE_ENABLED: + * + * Whether the Unicode related interfaces are compiled in + */ +#if 1 +#define LIBXML_UNICODE_ENABLED +#endif + +/** + * LIBXML_REGEXP_ENABLED: + * + * Whether the regular expressions interfaces are compiled in + */ +#if 1 +#define LIBXML_REGEXP_ENABLED +#endif + +/** + * LIBXML_AUTOMATA_ENABLED: + * + * Whether the automata interfaces are compiled in + */ +#if 1 +#define LIBXML_AUTOMATA_ENABLED +#endif + +/** + * LIBXML_EXPR_ENABLED: + * + * Whether the formal expressions interfaces are compiled in + * + * This code is unused and disabled unconditionally for now. + */ +#if 0 +#define LIBXML_EXPR_ENABLED +#endif + +/** + * LIBXML_SCHEMAS_ENABLED: + * + * Whether the Schemas validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMAS_ENABLED +#endif + +/** + * LIBXML_SCHEMATRON_ENABLED: + * + * Whether the Schematron validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMATRON_ENABLED +#endif + +/** + * LIBXML_MODULES_ENABLED: + * + * Whether the module interfaces are compiled in + */ +#if 1 +#define LIBXML_MODULES_ENABLED +/** + * LIBXML_MODULE_EXTENSION: + * + * the string suffix used by dynamic modules (usually shared libraries) + */ +#define LIBXML_MODULE_EXTENSION ".so" +#endif + +/** + * LIBXML_ZLIB_ENABLED: + * + * Whether the Zlib support is compiled in + */ +#if 1 +#define LIBXML_ZLIB_ENABLED +#endif + +/** + * LIBXML_LZMA_ENABLED: + * + * Whether the Lzma support is compiled in + */ +#if 0 +#define LIBXML_LZMA_ENABLED +#endif + +#ifdef __GNUC__ + +/** + * ATTRIBUTE_UNUSED: + * + * Macro used to signal to GCC unused function parameters + */ + +#ifndef ATTRIBUTE_UNUSED +# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7))) +# define ATTRIBUTE_UNUSED __attribute__((unused)) +# else +# define ATTRIBUTE_UNUSED +# endif +#endif + +/** + * LIBXML_ATTR_ALLOC_SIZE: + * + * Macro used to indicate to GCC this is an allocator function + */ + +#ifndef LIBXML_ATTR_ALLOC_SIZE +# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)))) +# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x))) +# else +# define LIBXML_ATTR_ALLOC_SIZE(x) +# endif +#else +# define LIBXML_ATTR_ALLOC_SIZE(x) +#endif + +/** + * LIBXML_ATTR_FORMAT: + * + * Macro used to indicate to GCC the parameter are printf like + */ + +#ifndef LIBXML_ATTR_FORMAT +# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3))) +# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args))) +# else +# define LIBXML_ATTR_FORMAT(fmt,args) +# endif +#else +# define LIBXML_ATTR_FORMAT(fmt,args) +#endif + +#ifndef XML_DEPRECATED +# ifdef IN_LIBXML +# define XML_DEPRECATED +# else +/* Available since at least GCC 3.1 */ +# define XML_DEPRECATED __attribute__((deprecated)) +# endif +#endif + +#else /* ! __GNUC__ */ +/** + * ATTRIBUTE_UNUSED: + * + * Macro used to signal to GCC unused function parameters + */ +#define ATTRIBUTE_UNUSED +/** + * LIBXML_ATTR_ALLOC_SIZE: + * + * Macro used to indicate to GCC this is an allocator function + */ +#define LIBXML_ATTR_ALLOC_SIZE(x) +/** + * LIBXML_ATTR_FORMAT: + * + * Macro used to indicate to GCC the parameter are printf like + */ +#define LIBXML_ATTR_FORMAT(fmt,args) +/** + * XML_DEPRECATED: + * + * Macro used to indicate that a function, variable, type or struct member + * is deprecated. + */ +#ifndef XML_DEPRECATED +#define XML_DEPRECATED +#endif +#endif /* __GNUC__ */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif From 4eb3563d6df425ba90afd08cd54dc41348c1ce78 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Jan 2025 16:16:24 +0800 Subject: [PATCH 062/120] xml2: use xml2 from github instead of gnome --- .gitignore | 4 +++- cmake/azure_CMakeLists.txt.in | 1 + contrib/CMakeLists.txt | 7 ++++++- contrib/azure-cmake/CMakeLists.txt | 7 +------ source/common/CMakeLists.txt | 3 +-- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 242433adb1..ff27b53139 100644 --- a/.gitignore +++ b/.gitignore @@ -123,7 +123,6 @@ contrib/* !contrib/CMakeLists.txt !contrib/test !contrib/azure-cmake -!contrib/xml2-cmake sql debug*/ .env @@ -157,6 +156,9 @@ pcre2_grep_test.sh pcre2_chartables.c geos-config config.h +!contrib/xml2-cmake +!contrib/xml2-cmake/linux_x86_64/include/config.h +!contrib/xml2-cmake/CMakeLists.txt pcre2.h zconf.h version.h diff --git a/cmake/azure_CMakeLists.txt.in b/cmake/azure_CMakeLists.txt.in index 5aa32b70e5..d9e47ce6b1 100644 --- a/cmake/azure_CMakeLists.txt.in +++ b/cmake/azure_CMakeLists.txt.in @@ -2,6 +2,7 @@ ExternalProject_Add(azure URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9 + DEPENDS xml2 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 78eded3928..2304ad54aa 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -652,7 +652,12 @@ if(${BUILD_PCRE2}) endif(${BUILD_PCRE2}) if(${TD_LINUX} AND ${BUILD_WITH_S3}) - add_subdirectory(azure-cmake EXCLUDE_FROM_ALL) + set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) + string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") + add_subdirectory(xml2-cmake) + set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS}) + + add_subdirectory(azure-cmake) endif() IF(TD_LINUX) diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt index aaa5617860..eaf4c569e7 100644 --- a/contrib/azure-cmake/CMakeLists.txt +++ b/contrib/azure-cmake/CMakeLists.txt @@ -36,10 +36,6 @@ target_include_directories( ) find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) -find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - -# find_library(CURL_LIBRARY curl) -# find_library(XML2_LIBRARY xml2) find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) @@ -50,9 +46,8 @@ target_link_libraries( PRIVATE ${CURL_LIBRARY} PRIVATE ${SSL_LIBRARY} PRIVATE ${CRYPTO_LIBRARY} - PRIVATE ${XML2_LIBRARY} - # PRIVATE xml2 + PRIVATE _libxml2 PRIVATE zlib # PRIVATE ${CoreFoundation_Library} diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 39380a0644..e050eaa16d 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -66,7 +66,6 @@ if(${BUILD_S3}) set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2) find_library(S3_LIBRARY s3) find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(XML2_LIBRARY xml2) find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) target_link_libraries( @@ -77,7 +76,7 @@ if(${BUILD_S3}) PUBLIC ${CURL_LIBRARY} PUBLIC ${SSL_LIBRARY} PUBLIC ${CRYPTO_LIBRARY} - PUBLIC ${XML2_LIBRARY} + PUBLIC _libxml2 ) add_definitions(-DUSE_S3) From ded5e2890f4c4b12a3516f000c500b33392f27d1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Jan 2025 16:19:42 +0800 Subject: [PATCH 063/120] cmake/xml2: use xml2 from github --- cmake/xml2_CMakeLists.txt.in | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/cmake/xml2_CMakeLists.txt.in b/cmake/xml2_CMakeLists.txt.in index 0e7492aea7..1a12d762a0 100644 --- a/cmake/xml2_CMakeLists.txt.in +++ b/cmake/xml2_CMakeLists.txt.in @@ -1,19 +1,16 @@ # xml2 ExternalProject_Add(xml2 - URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz - URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6 - #https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz - #GIT_REPOSITORY https://github.com/GNOME/libxml2 - #GIT_TAG v2.11.5 + URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz + URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" SOURCE_DIR "${TD_CONTRIB_DIR}/xml2" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma - BUILD_COMMAND make -j - INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" TEST_COMMAND "" GIT_SHALLOW true ) From 008233ccf6e8efec22613ad84c31b496b1504c5a Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Thu, 16 Jan 2025 16:22:17 +0800 Subject: [PATCH 064/120] doc: add Chaos Test to README and update some functions --- tests/README.md | 12 ++++++--- tests/pytest/auto_crash_gen.py | 15 ++++++----- tests/pytest/auto_crash_gen_valgrind.py | 14 +++++------ .../pytest/auto_crash_gen_valgrind_cluster.py | 18 ++++++------- tests/system-test/test.py | 25 ++++++++++++------- 5 files changed, 47 insertions(+), 37 deletions(-) diff --git a/tests/README.md b/tests/README.md index e51a6a0e49..e990d622dc 100644 --- a/tests/README.md +++ b/tests/README.md @@ -81,7 +81,7 @@ Unit test script is the smallest testable part and developed for some function, ```bash cd debug/build/bin -./osTimeTests +./osTimeTests ``` ### How to run all unit test cases? @@ -163,12 +163,16 @@ cd /root/TDengine/packaging/smokeTest ## 3.3 Chaos Test -[Desciprtion] +A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems, hopefully without a pre-determined scenario. ### How to run test? -[Placeholder] +```bash +cd tests/pytest +python3 auto_crash_gen.py +``` ### How to add new case? -[Placeholder] +Add a function, such as TaskCreateNewFunction, to pytest/crash_gen/crash_gen_main.py. +Integrate TaskCreateNewFunction into the balance_pickTaskType function in crash_gen_main.py. diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 316f2ead0f..a35beb3395 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -244,7 +244,7 @@ def start_taosd(): else: pass - start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path) + start_cmd = 'cd %s && python3 test.py -G >>/dev/null '%(start_path) os.system(start_cmd) def get_cmds(args_list): @@ -371,7 +371,7 @@ Result: {msg_dict[status]} Details Owner: Jayden Jia Start time: {starttime} -End time: {endtime} +End time: {endtime} Hostname: {hostname} Commit: {git_commit} Cmd: {cmd} @@ -380,14 +380,13 @@ Core dir: {core_dir} ''' text_result=text.split("Result: ")[1].split("Details")[0].strip() print(text_result) - if text_result == "success": - send_msg(notification_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) - send_msg(notification_robot_url, get_msg(text)) - - #send_msg(get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index b7af68cd2f..0bd70ebf3f 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -245,7 +245,7 @@ def start_taosd(): else: pass - start_cmd = 'cd %s && python3 test.py '%(start_path) + start_cmd = 'cd %s && python3 test.py -G'%(start_path) os.system(start_cmd +">>/dev/null") def get_cmds(args_list): @@ -404,24 +404,24 @@ Result: {msg_dict[status]} Details Owner: Jayden Jia Start time: {starttime} -End time: {endtime} +End time: {endtime} Hostname: {hostname} Commit: {git_commit} Cmd: {cmd} Log dir: {log_dir} Core dir: {core_dir} ''' - + text_result=text.split("Result: ")[1].split("Details")[0].strip() print(text_result) - + if text_result == "success": send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) send_msg(notification_robot_url, get_msg(text)) - - #send_msg(get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index df40b60967..b4b90e1f5e 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -236,7 +236,7 @@ def start_taosd(): else: pass - start_cmd = 'cd %s && python3 test.py -N 4 -M 1 '%(start_path) + start_cmd = 'cd %s && python3 test.py -N 4 -M 1 -G '%(start_path) os.system(start_cmd +">>/dev/null") def get_cmds(args_list): @@ -388,28 +388,28 @@ def main(): text = f''' Result: {msg_dict[status]} - + Details Owner: Jayden Jia Start time: {starttime} -End time: {endtime} +End time: {endtime} Hostname: {hostname} Commit: {git_commit} Cmd: {cmd} Log dir: {log_dir} Core dir: {core_dir} ''' - + text_result=text.split("Result: ")[1].split("Details")[0].strip() print(text_result) - + if text_result == "success": send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) - send_msg(notification_robot_url, get_msg(text)) - - #send_msg(get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 0d40544be8..ab1bdc21d3 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -58,12 +58,12 @@ def checkRunTimeError(): if hwnd: os.system("TASKKILL /F /IM taosd.exe") -# +# # run case on previous cluster # def runOnPreviousCluster(host, config, fileName): print("enter run on previeous") - + # load case module sep = "/" if platform.system().lower() == 'windows': @@ -113,8 +113,9 @@ if __name__ == "__main__": asan = False independentMnode = False previousCluster = False - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous']) + crashGen = False + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP:G', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous',"crashGen"]) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -141,6 +142,7 @@ if __name__ == "__main__": tdLog.printNoPrefix('-i independentMnode Mnode') tdLog.printNoPrefix('-a address sanitizer mode') tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.') + tdLog.printNoPrefix('-G crashGen mode') sys.exit(0) @@ -208,7 +210,7 @@ if __name__ == "__main__": if key in ['-R', '--restful']: restful = True - + if key in ['-W', '--websocket']: websocket = True @@ -228,6 +230,10 @@ if __name__ == "__main__": if key in ['-P', '--previous']: previousCluster = True + if key in ['-G', '--crashGen']: + crashGen = True + + # # do exeCmd command # @@ -405,7 +411,7 @@ if __name__ == "__main__": for dnode in tdDnodes.dnodes: tdDnodes.starttaosd(dnode.index) tdCases.logSql(logSql) - + if restful or websocket: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() @@ -450,7 +456,7 @@ if __name__ == "__main__": else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") - + if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True: conn = None else: @@ -640,7 +646,7 @@ if __name__ == "__main__": else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") - + # run case if testCluster: @@ -692,6 +698,7 @@ if __name__ == "__main__": # tdDnodes.StopAllSigint() tdLog.info("Address sanitizer mode finished") else: - tdDnodes.stopAll() + if not crashGen: + tdDnodes.stopAll() tdLog.info("stop all td process finished") sys.exit(0) From 376a30f20de579a29264587a2fc9c5f5d4f3ebdd Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 17:00:39 +0800 Subject: [PATCH 065/120] update testing README by charles --- tests/README.md | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/tests/README.md b/tests/README.md index e990d622dc..78edcac6ac 100644 --- a/tests/README.md +++ b/tests/README.md @@ -57,11 +57,11 @@ In `tests` directory, there are different types of tests for TDengine. Below is ## 3.1 CI Test -[Desciprtion] +CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. TDengine CI testing includes three part of test cases: unit test、system test and legacy test -### How to run tests? +### How to run all CI test cases? -If this is the first time to run all the CI tests, it is recommended to add the test branch, please run like following commands: +If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: ```bash cd tests @@ -70,8 +70,7 @@ cd tests ### How to add new cases? -[You can add sim test case under tests/script, python test case under tests/system-test or tests/army. When the case passes in the test branch, add the case to the cases.task file under tests/parallel_test, and then merge the pr into main branch to run in the future CI.] - +You can refer the below child sections to add new test cases for CI test. ### 3.1.1 Unit Test @@ -93,8 +92,27 @@ bash test.sh -e 0 #### How to add new cases? -Copy from the old version, need updates: +The Google test framwork is used for unit testing to specific function module, you can refer below steps to add one test case: +##### Create test case file and develop the test scripts + +In the test directory corresponding to the target function module, create test files in CPP format and write corresponding test cases. + +##### Update build configuration + +Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the source\os\test\CMakeLists.txt file for configuration examples. + +##### Compile test code + +In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., cmake .. -DBUILD_TEST=1) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. + +##### Execute the test program + +Find the executable file in the compiled directory(e.g. TDengine/debug/build/bin/) and run it. + +##### Integrate into CI tests + +Use the add_test command to add new compiled test cases into CI test collection, ensure that the new added test cases can be run for every build. ## 3.1.2 System Test @@ -141,14 +159,12 @@ cd tests ### How to add new case? -[Placeholder] - > [!NOTE] -> TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test. +> TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#312-system-test) for detail steps. ## 3.2 Smoke Test -Smoke test script is known as sanity testing to ensure that the critical functionalities of TDengine. +Smoke test script is from system test and known as sanity testing to ensure that the critical functionalities of TDengine. ### How to run test? @@ -163,7 +179,7 @@ cd /root/TDengine/packaging/smokeTest ## 3.3 Chaos Test -A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems, hopefully without a pre-determined scenario. +A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems without a pre-determined scenario. ### How to run test? From b63842177cf89269ced40229d6449625d6022fa3 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 16 Jan 2025 17:13:49 +0800 Subject: [PATCH 066/120] doc: update stt_trigger --- docs/en/14-reference/03-taos-sql/02-database.md | 2 +- docs/zh/14-reference/03-taos-sql/02-database.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/03-taos-sql/02-database.md b/docs/en/14-reference/03-taos-sql/02-database.md index 6a46620418..c6865fd162 100644 --- a/docs/en/14-reference/03-taos-sql/02-database.md +++ b/docs/en/14-reference/03-taos-sql/02-database.md @@ -65,7 +65,7 @@ database_option: { - MINROWS: The minimum number of records in a file block, default is 100. - KEEP: Indicates the number of days data files are kept, default value is 3650, range [1, 365000], and must be greater than or equal to 3 times the DURATION parameter value. The database will automatically delete data that has been saved for longer than the KEEP value to free up storage space. KEEP can use unit-specified formats, such as KEEP 100h, KEEP 10d, etc., supports m (minutes), h (hours), and d (days) three units. It can also be written without a unit, like KEEP 50, where the default unit is days. The enterprise version supports multi-tier storage feature, thus, multiple retention times can be set (multiple separated by commas, up to 3, satisfying keep 0 \<= keep 1 \<= keep 2, such as KEEP 100h,100d,3650d); the community version does not support multi-tier storage feature (even if multiple retention times are configured, it will not take effect, KEEP will take the longest retention time). - KEEP_TIME_OFFSET: Effective from version 3.2.0.0. The delay execution time for deleting or migrating data that has been saved for longer than the KEEP value, default value is 0 (hours). After the data file's save time exceeds KEEP, the deletion or migration operation will not be executed immediately, but will wait an additional interval specified by this parameter, to avoid peak business periods. -- STT_TRIGGER: Indicates the number of file merges triggered by disk files. The open-source version is fixed at 1, the enterprise version can be set from 1 to 16. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value. +- STT_TRIGGER: Indicates the number of file merges triggered by disk files. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value. - SINGLE_STABLE: Indicates whether only one supertable can be created in this database, used in cases where the supertable has a very large number of columns. - 0: Indicates that multiple supertables can be created. - 1: Indicates that only one supertable can be created. diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 4e64c1257b..9f60b51efd 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -67,7 +67,7 @@ database_option: { - KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/) - KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。 -- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 +- STT_TRIGGER:表示落盘文件触发文件合并的个数。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 - 0:表示可以创建多张超级表。 - 1:表示只可以创建一张超级表。 From b72e6189dac55857bf2d87d0d27a0f8e260ddf9c Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 17:19:06 +0800 Subject: [PATCH 067/120] udpate smoke test part of testing README by charles --- tests/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 78edcac6ac..ff42c9bb3d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -175,7 +175,7 @@ cd /root/TDengine/packaging/smokeTest ### How to add new case? -[Placeholder] +You can update python commands part of test_smoking_selfhost.sh file to add any system test case into smoke test. ## 3.3 Chaos Test From 9814436320a93f65cd62abd8ea18d681a5bb43ed Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Jan 2025 17:20:55 +0800 Subject: [PATCH 068/120] contrib/xml2: use libxml2 to skip cached ci directories --- cmake/xml2_CMakeLists.txt.in | 2 +- contrib/xml2-cmake/CMakeLists.txt | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cmake/xml2_CMakeLists.txt.in b/cmake/xml2_CMakeLists.txt.in index 1a12d762a0..8dcd89efc0 100644 --- a/cmake/xml2_CMakeLists.txt.in +++ b/cmake/xml2_CMakeLists.txt.in @@ -5,7 +5,7 @@ ExternalProject_Add(xml2 URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" - SOURCE_DIR "${TD_CONTRIB_DIR}/xml2" + SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2" #BINARY_DIR "" BUILD_IN_SOURCE TRUE CONFIGURE_COMMAND "" diff --git a/contrib/xml2-cmake/CMakeLists.txt b/contrib/xml2-cmake/CMakeLists.txt index 30b00160ed..9067c0e6e7 100644 --- a/contrib/xml2-cmake/CMakeLists.txt +++ b/contrib/xml2-cmake/CMakeLists.txt @@ -1,5 +1,4 @@ -#set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2") -set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/xml2") +set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2") set(SRCS "${LIBXML2_SOURCE_DIR}/SAX.c" From ce384e325a9ed7f64e5e8dd3af78b509fa251935 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 17:50:54 +0800 Subject: [PATCH 069/120] ci:modify test readme --- tests/README.md | 45 +++++++++++++-- .../0-others/test_case_template.py | 55 +++++++++++++++++++ 2 files changed, 94 insertions(+), 6 deletions(-) create mode 100644 tests/system-test/0-others/test_case_template.py diff --git a/tests/README.md b/tests/README.md index ff42c9bb3d..f684c8977c 100644 --- a/tests/README.md +++ b/tests/README.md @@ -92,28 +92,34 @@ bash test.sh -e 0 #### How to add new cases? +
+ +Detailed steps to add new unit test case + The Google test framwork is used for unit testing to specific function module, you can refer below steps to add one test case: -##### Create test case file and develop the test scripts +##### 1. Create test case file and develop the test scripts In the test directory corresponding to the target function module, create test files in CPP format and write corresponding test cases. -##### Update build configuration +##### 2. Update build configuration Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the source\os\test\CMakeLists.txt file for configuration examples. -##### Compile test code +##### 3. Compile test code In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., cmake .. -DBUILD_TEST=1) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. -##### Execute the test program +##### 4. Execute the test program Find the executable file in the compiled directory(e.g. TDengine/debug/build/bin/) and run it. -##### Integrate into CI tests +##### 5. Integrate into CI tests Use the add_test command to add new compiled test cases into CI test collection, ensure that the new added test cases can be run for every build. +
+ ## 3.1.2 System Test Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only @@ -135,7 +141,34 @@ cd tests ### How to add new case? -[Placeholder] +
+ +Detailed steps to add new system test case + +you can refer below steps to add one test case: + +##### 1. Create test case file and develop the test scripts + +In the test directory corresponding to the target function module, create test files in CPP format and write corresponding test cases. + +##### 2. Update build configuration + +Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the source\os\test\CMakeLists.txt file for configuration examples. + +##### 3. Compile test code + +In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., cmake .. -DBUILD_TEST=1) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. + +##### 4. Execute the test program + +Find the executable file in the compiled directory(e.g. TDengine/debug/build/bin/) and run it. + +##### 5. Integrate into CI tests + +Use the add_test command to add new compiled test cases into CI test collection, ensure that the new added test cases can be run for every build. + +
+ ## 3.1.3 Legacy Test diff --git a/tests/system-test/0-others/test_case_template.py b/tests/system-test/0-others/test_case_template.py new file mode 100644 index 0000000000..53acbade66 --- /dev/null +++ b/tests/system-test/0-others/test_case_template.py @@ -0,0 +1,55 @@ + +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +from util.dnodes import * +from util.common import * + + +class TDTestCase: + + """ + Here is the class description for the whole file cases + """ + + # add the configuration of the client and server here + clientCfgDict = {'debugFlag': 131} + updatecfgDict = { + "debugFlag" : "131", + "queryBufferSize" : 10240, + 'clientCfg' : clientCfgDict + } + + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.replicaVar = int(replicaVar) + + + def test_function(): # case function should be named start with test_ + """ + Here is the function description for single test: + Test case for custom function + """ + tdLog.info(f"Test case test custom function") + # excute the sql + tdLog.execute(f"create database db_test_function") + tdSql.execute(f"create table db_test_function.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") + # qury the result and return the result + tdSql.query(f"show databases") + # print result and check the result + database_info = tdLog.info(f"{tdSql.queryResult}") + tdSql.checkRows(3) + tdSql.checkData(2,0,"db_test_function") + + + def run(self): + self.test_5dnode_3mnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=2,stopRole='vnode') + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 24db39d4ce4a938baf8f5dce3b9ef427aa502fde Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Thu, 16 Jan 2025 18:08:07 +0800 Subject: [PATCH 070/120] update testing guide structure by charles --- tests/README.md | 61 +++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/tests/README.md b/tests/README.md index f684c8977c..4349e4bc65 100644 --- a/tests/README.md +++ b/tests/README.md @@ -3,13 +3,12 @@ 1. [Introduction](#1-introduction) 1. [Prerequisites](#2-prerequisites) 1. [Testing Guide](#3-testing-guide) - - [3.1 CI Test](#31-ci-test) - - [3.1.1 Unit Test](#311-unit-test) - - [3.1.2 System Test](#312-system-test) - - [3.1.3 Legacy Test](#313-legacy-test) - - [3.2 Smoke Test](#32-smoke-test) - - [3.3 Chaos Test](#33-chaos-test) - + - [3.1 Unit Test](#31-unit-test) + - [3.2 System Test](#32-system-test) + - [3.3 Legacy Test](#33-legacy-test) + - [3.4 Smoke Test](#34-smoke-test) + - [3.5 Chaos Test](#35-chaos-test) + - [3.6 CI Test](#36-ci-test) # 1. Introduction @@ -54,25 +53,7 @@ make && make install In `tests` directory, there are different types of tests for TDengine. Below is a brief introduction about how to run them and how to add new cases. - -## 3.1 CI Test - -CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. TDengine CI testing includes three part of test cases: unit test、system test and legacy test - -### How to run all CI test cases? - -If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: - -```bash -cd tests -./run_all_ci_cases.sh -b main # on main branch -``` - -### How to add new cases? - -You can refer the below child sections to add new test cases for CI test. - -### 3.1.1 Unit Test +### 3.1 Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine. @@ -90,7 +71,7 @@ cd tests/unit-test/ bash test.sh -e 0 ``` -#### How to add new cases? +#### How to add new cases? {#test-id}
@@ -120,7 +101,7 @@ Use the add_test command to add new compiled test cases into CI test collection,
-## 3.1.2 System Test +## 3.2 System Test Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only work for TDengine Enterprise Edition. @@ -169,8 +150,7 @@ Use the add_test command to add new compiled test cases into CI test collection, - -## 3.1.3 Legacy Test +## 3.3 Legacy Test In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. @@ -195,7 +175,7 @@ cd tests > [!NOTE] > TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#312-system-test) for detail steps. -## 3.2 Smoke Test +## 3.4 Smoke Test Smoke test script is from system test and known as sanity testing to ensure that the critical functionalities of TDengine. @@ -210,7 +190,7 @@ cd /root/TDengine/packaging/smokeTest You can update python commands part of test_smoking_selfhost.sh file to add any system test case into smoke test. -## 3.3 Chaos Test +## 3.5 Chaos Test A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems without a pre-determined scenario. @@ -225,3 +205,20 @@ python3 auto_crash_gen.py Add a function, such as TaskCreateNewFunction, to pytest/crash_gen/crash_gen_main.py. Integrate TaskCreateNewFunction into the balance_pickTaskType function in crash_gen_main.py. + +## 3.6 CI Test + +CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. TDengine CI testing includes three part of test cases: unit test、system test and legacy test + +### How to run all CI test cases? + +If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: + +```bash +cd tests +./run_all_ci_cases.sh -b main # on main branch +``` + +### How to add new cases? + +You can refer the [Unit Test](#31-unit-test)、[System Test](#32-system-test) and [Legacy Test](#33-legacy-test) sections for detail steps to add new test cases for CI test. From 59ea64e1a8623ff6c54b7f9e45fb2241bf0190d9 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 18:19:19 +0800 Subject: [PATCH 071/120] ci:modify test readme --- tests/README.md | 23 +++++++++---------- .../0-others/test_case_template.py | 6 ++--- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/tests/README.md b/tests/README.md index f684c8977c..9b903c2cb5 100644 --- a/tests/README.md +++ b/tests/README.md @@ -145,27 +145,26 @@ cd tests Detailed steps to add new system test case -you can refer below steps to add one test case: +test.py is the test case execution and monitoring of the entry program. you can refer below steps to add one test case: -##### 1. Create test case file and develop the test scripts +##### 1.Create a test case file and develop the test cases -In the test directory corresponding to the target function module, create test files in CPP format and write corresponding test cases. +Create a file in `tests/system-test` containing each functional directory and refer to the use case template `tests/system-test/0-others/test_case_template.py` to add a new test case. -##### 2. Update build configuration +##### 2.Execute the test case -Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the source\os\test\CMakeLists.txt file for configuration examples. +cd tests/system-test & python3 ./test.py -f 0-others/test_case_template.py -##### 3. Compile test code +##### 3.Integrate into CI tests -In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., cmake .. -DBUILD_TEST=1) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. +Edit `tests/parallel_test/cases.task` and add the testcase path and executions in the specified format. The third column indicates whether to use Address Sanitizer mode for testing. -##### 4. Execute the test program -Find the executable file in the compiled directory(e.g. TDengine/debug/build/bin/) and run it. -##### 5. Integrate into CI tests - -Use the add_test command to add new compiled test cases into CI test collection, ensure that the new added test cases can be run for every build. +```bash +#caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand +,,n,system-test, python3 ./test.py -f 0-others/test_case_template.py +``` diff --git a/tests/system-test/0-others/test_case_template.py b/tests/system-test/0-others/test_case_template.py index 53acbade66..fa1a9b5ade 100644 --- a/tests/system-test/0-others/test_case_template.py +++ b/tests/system-test/0-others/test_case_template.py @@ -27,14 +27,14 @@ class TDTestCase: self.replicaVar = int(replicaVar) - def test_function(): # case function should be named start with test_ + def test_function(self): # case function should be named start with test_ """ Here is the function description for single test: Test case for custom function """ tdLog.info(f"Test case test custom function") # excute the sql - tdLog.execute(f"create database db_test_function") + tdSql.execute(f"create database db_test_function") tdSql.execute(f"create table db_test_function.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") # qury the result and return the result tdSql.query(f"show databases") @@ -45,7 +45,7 @@ class TDTestCase: def run(self): - self.test_5dnode_3mnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=2,stopRole='vnode') + self.test_function() def stop(self): tdSql.close() From 31e69ea2b2c8ac2db72563f8aaf332c0a80cb69c Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 18:25:29 +0800 Subject: [PATCH 072/120] ci:modify test readme --- tests/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 9b903c2cb5..fac0babe24 100644 --- a/tests/README.md +++ b/tests/README.md @@ -145,7 +145,8 @@ cd tests Detailed steps to add new system test case -test.py is the test case execution and monitoring of the entry program. you can refer below steps to add one test case: +The Python test framework is developed by TDengine teams, and test.py is the test case execution and monitoring of the entry program, Use `python3 ./test.py -h` to view more features. +you can refer below steps to add one test case: ##### 1.Create a test case file and develop the test cases From d155025d9d5067c79de7cb844bb99f5ae3118a1c Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 18:32:07 +0800 Subject: [PATCH 073/120] ci:modify test readme --- tests/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/README.md b/tests/README.md index fac0babe24..3bc57d67e3 100644 --- a/tests/README.md +++ b/tests/README.md @@ -154,7 +154,10 @@ Create a file in `tests/system-test` containing each functional directory and re ##### 2.Execute the test case +Ensure the test case execution is successful. +```bash cd tests/system-test & python3 ./test.py -f 0-others/test_case_template.py +``` ##### 3.Integrate into CI tests From 20f911d009ab106487d8b848d9f1fd2edcffae7e Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 18:52:43 +0800 Subject: [PATCH 074/120] test:add note --- README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 91232136c5..8ceab44b3f 100644 --- a/README.md +++ b/README.md @@ -387,7 +387,10 @@ TDengine build check workflow can be found in this [Github Action](https://githu # 11. Coverage -Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine). To create the test coverage report (in HTML format) locally, please run following commands: +
+ +Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine). +To create the test coverage report (in HTML format) locally, please run following commands: ```bash cd tests @@ -395,9 +398,12 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task # on main branch and run cases in longtimeruning_cases.task # for more infomation about options please refer to ./run_local_coverage.sh -h ``` -> [!NOTE] +> [**NOTE**] > Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. +
+ + # 12. Contributing Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine. From 469cbcbf332b972433f544d8340b8a8c2b16160d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 18:56:49 +0800 Subject: [PATCH 075/120] ci:modify test readme --- README.md | 5 ++-- tests/README.md | 73 +++++++++++++++++++++++-------------------------- 2 files changed, 36 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 91232136c5..1111fc150f 100644 --- a/README.md +++ b/README.md @@ -138,8 +138,7 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -> [!NOTE] -> TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust). +> NOTE: TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust). @@ -147,7 +146,7 @@ cd TDengine At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. -You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to install from source. +You can choose to install through source code, [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/) or [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment). This quick guide only applies to install from source. TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. diff --git a/tests/README.md b/tests/README.md index 3bc57d67e3..aef27c7a6e 100644 --- a/tests/README.md +++ b/tests/README.md @@ -3,13 +3,12 @@ 1. [Introduction](#1-introduction) 1. [Prerequisites](#2-prerequisites) 1. [Testing Guide](#3-testing-guide) - - [3.1 CI Test](#31-ci-test) - - [3.1.1 Unit Test](#311-unit-test) - - [3.1.2 System Test](#312-system-test) - - [3.1.3 Legacy Test](#313-legacy-test) - - [3.2 Smoke Test](#32-smoke-test) - - [3.3 Chaos Test](#33-chaos-test) - + - [3.1 Unit Test](#31-unit-test) + - [3.2 System Test](#32-system-test) + - [3.3 Legacy Test](#33-legacy-test) + - [3.4 Smoke Test](#34-smoke-test) + - [3.5 Chaos Test](#35-chaos-test) + - [3.6 CI Test](#36-ci-test) # 1. Introduction @@ -54,25 +53,7 @@ make && make install In `tests` directory, there are different types of tests for TDengine. Below is a brief introduction about how to run them and how to add new cases. - -## 3.1 CI Test - -CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. TDengine CI testing includes three part of test cases: unit test、system test and legacy test - -### How to run all CI test cases? - -If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: - -```bash -cd tests -./run_all_ci_cases.sh -b main # on main branch -``` - -### How to add new cases? - -You can refer the below child sections to add new test cases for CI test. - -### 3.1.1 Unit Test +### 3.1 Unit Test Unit test script is the smallest testable part and developed for some function, method or class of TDengine. @@ -90,7 +71,7 @@ cd tests/unit-test/ bash test.sh -e 0 ``` -#### How to add new cases? +#### How to add new cases?
@@ -104,15 +85,15 @@ In the test directory corresponding to the target function module, create test f ##### 2. Update build configuration -Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the source\os\test\CMakeLists.txt file for configuration examples. +Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the `source/os/test/CMakeLists.txt` file for configuration examples. ##### 3. Compile test code -In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., cmake .. -DBUILD_TEST=1) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. +In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., `cmake .. -DBUILD_TEST=1`) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. ##### 4. Execute the test program -Find the executable file in the compiled directory(e.g. TDengine/debug/build/bin/) and run it. +Find the executable file in the compiled directory(e.g. `TDengine/debug/build/bin/`) and run it. ##### 5. Integrate into CI tests @@ -120,7 +101,7 @@ Use the add_test command to add new compiled test cases into CI test collection,
-## 3.1.2 System Test +## 3.2 System Test Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only work for TDengine Enterprise Edition. @@ -154,10 +135,7 @@ Create a file in `tests/system-test` containing each functional directory and re ##### 2.Execute the test case -Ensure the test case execution is successful. -```bash cd tests/system-test & python3 ./test.py -f 0-others/test_case_template.py -``` ##### 3.Integrate into CI tests @@ -172,8 +150,7 @@ Edit `tests/parallel_test/cases.task` and add the testcase path and executions i - -## 3.1.3 Legacy Test +## 3.3 Legacy Test In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. @@ -195,10 +172,10 @@ cd tests ### How to add new case? -> [!NOTE] +> [!NOTE] > TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#312-system-test) for detail steps. -## 3.2 Smoke Test +## 3.4 Smoke Test Smoke test script is from system test and known as sanity testing to ensure that the critical functionalities of TDengine. @@ -213,7 +190,7 @@ cd /root/TDengine/packaging/smokeTest You can update python commands part of test_smoking_selfhost.sh file to add any system test case into smoke test. -## 3.3 Chaos Test +## 3.5 Chaos Test A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems without a pre-determined scenario. @@ -227,4 +204,22 @@ python3 auto_crash_gen.py ### How to add new case? Add a function, such as TaskCreateNewFunction, to pytest/crash_gen/crash_gen_main.py. + Integrate TaskCreateNewFunction into the balance_pickTaskType function in crash_gen_main.py. + +## 3.6 CI Test + +CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. TDengine CI testing includes three part of test cases: unit test、system test and legacy test + +### How to run all CI test cases? + +If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: + +```bash +cd tests +./run_all_ci_cases.sh -b main # on main branch +``` + +### How to add new cases? + +You can refer the [Unit Test](#31-unit-test)、[System Test](#32-system-test) and [Legacy Test](#33-legacy-test) sections for detail steps to add new test cases for CI test. From 51f989833bba2ae7495d24ab4c4e3cbdd2da1fae Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 18:59:37 +0800 Subject: [PATCH 076/120] test:add note --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8ceab44b3f..872842423c 100644 --- a/README.md +++ b/README.md @@ -398,7 +398,7 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task # on main branch and run cases in longtimeruning_cases.task # for more infomation about options please refer to ./run_local_coverage.sh -h ``` -> [**NOTE**] +> **NOTE:** > Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. From 687fa1d5c272d4959bb62f4afb4ebd809bc82f85 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 16 Jan 2025 19:01:09 +0800 Subject: [PATCH 077/120] test:add note --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 872842423c..8ef4f1e887 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -> [!NOTE] +> **NOTE:** > TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust). From 4f0335eb9976d5b88bbf515b7f75f924f22217ad Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 19:03:14 +0800 Subject: [PATCH 078/120] ci:modify test readme --- tests/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index aef27c7a6e..b9fa40f93b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -173,7 +173,7 @@ cd tests ### How to add new case? > [!NOTE] -> TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#312-system-test) for detail steps. +> TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#32-system-test) for detail steps. ## 3.4 Smoke Test From 0af9f1005435f003606914d62e560e0836a370ba Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 19:10:10 +0800 Subject: [PATCH 079/120] Merge latest changes from origin/docs/chr1-readme --- tests/README.md | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/README.md b/tests/README.md index b9fa40f93b..bc97d74afd 100644 --- a/tests/README.md +++ b/tests/README.md @@ -12,7 +12,9 @@ # 1. Introduction -This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. The document is divided into three main sections: introduction, prerequisites and testing guide. +This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. + +The document is divided into three main sections: introduction, prerequisites and testing guide. > [!NOTE] > The below commands and test scripts are verified on linux (Ubuntu 18.04、20.04、22.04) locally. @@ -89,7 +91,9 @@ Modify the CMakeLists.txt file in this directory to ensure that the new test fil ##### 3. Compile test code -In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., `cmake .. -DBUILD_TEST=1`) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. +In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., `cmake .. -DBUILD_TEST=1`) to generate a compilation file, + +and then run a compilation command (e.g. make) to complete the compilation of the test code. ##### 4. Execute the test program @@ -110,7 +114,7 @@ work for TDengine Enterprise Edition. ```bash cd tests/system-test -python3 ./test.py -f 2-query/floor.py +python3 ./test.py -f 2-query/avg.py ``` ### How to run all system test cases? @@ -127,6 +131,7 @@ cd tests Detailed steps to add new system test case The Python test framework is developed by TDengine teams, and test.py is the test case execution and monitoring of the entry program, Use `python3 ./test.py -h` to view more features. + you can refer below steps to add one test case: ##### 1.Create a test case file and develop the test cases @@ -209,7 +214,9 @@ Integrate TaskCreateNewFunction into the balance_pickTaskType function in crash_ ## 3.6 CI Test -CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. TDengine CI testing includes three part of test cases: unit test、system test and legacy test +CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. + +TDengine CI testing includes three part of test cases: unit test、system test and legacy test ### How to run all CI test cases? From 35f8fb8153f615d71cb2277d381ab75ea57464d9 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 19:11:36 +0800 Subject: [PATCH 080/120] ci:modify test readme --- tests/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/README.md b/tests/README.md index bc97d74afd..5574a16743 100644 --- a/tests/README.md +++ b/tests/README.md @@ -140,7 +140,10 @@ Create a file in `tests/system-test` containing each functional directory and re ##### 2.Execute the test case +Ensure the test case execution is successful. +``` bash cd tests/system-test & python3 ./test.py -f 0-others/test_case_template.py +``` ##### 3.Integrate into CI tests From 7d2d44cbe328110c12b87cfca340ed6ca33573b4 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 16 Jan 2025 19:25:12 +0800 Subject: [PATCH 081/120] ci:modify test readme --- README.md | 4 +++- tests/README.md | 46 +++++++++++++++++++++++----------------------- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 787c5c6b7a..a7009fb6f2 100644 --- a/README.md +++ b/README.md @@ -388,9 +388,11 @@ TDengine build check workflow can be found in this [Github Action](https://githu # 11. Coverage +Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine) +
-Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine). + how to run the coverage report locally. To create the test coverage report (in HTML format) locally, please run following commands: ```bash diff --git a/tests/README.md b/tests/README.md index 5574a16743..dd1f8ba9a1 100644 --- a/tests/README.md +++ b/tests/README.md @@ -59,21 +59,21 @@ In `tests` directory, there are different types of tests for TDengine. Below is Unit test script is the smallest testable part and developed for some function, method or class of TDengine. -### How to run single test case? +### 3.1.1 How to run single test case? ```bash cd debug/build/bin ./osTimeTests ``` -### How to run all unit test cases? +### 3.1.2 How to run all unit test cases? ```bash cd tests/unit-test/ bash test.sh -e 0 ``` -#### How to add new cases? +### 3.1.3 How to add new cases?
@@ -81,25 +81,25 @@ bash test.sh -e 0 The Google test framwork is used for unit testing to specific function module, you can refer below steps to add one test case: -##### 1. Create test case file and develop the test scripts +##### a. Create test case file and develop the test scripts In the test directory corresponding to the target function module, create test files in CPP format and write corresponding test cases. -##### 2. Update build configuration +##### b. Update build configuration Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the `source/os/test/CMakeLists.txt` file for configuration examples. -##### 3. Compile test code +##### c. Compile test code In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., `cmake .. -DBUILD_TEST=1`) to generate a compilation file, and then run a compilation command (e.g. make) to complete the compilation of the test code. -##### 4. Execute the test program +##### d. Execute the test program Find the executable file in the compiled directory(e.g. `TDengine/debug/build/bin/`) and run it. -##### 5. Integrate into CI tests +##### e. Integrate into CI tests Use the add_test command to add new compiled test cases into CI test collection, ensure that the new added test cases can be run for every build. @@ -110,21 +110,21 @@ Use the add_test command to add new compiled test cases into CI test collection, Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only work for TDengine Enterprise Edition. -### How to run single test case? +### 3.2.1 How to run single test case? ```bash cd tests/system-test python3 ./test.py -f 2-query/avg.py ``` -### How to run all system test cases? +### 3.2.2 How to run all system test cases? ```bash cd tests ./run_all_ci_cases.sh -t python # all python cases ``` -### How to add new case? +### 3.2.3 How to add new case?
@@ -134,18 +134,18 @@ The Python test framework is developed by TDengine teams, and test.py is the tes you can refer below steps to add one test case: -##### 1.Create a test case file and develop the test cases +##### a. Create a test case file and develop the test cases Create a file in `tests/system-test` containing each functional directory and refer to the use case template `tests/system-test/0-others/test_case_template.py` to add a new test case. -##### 2.Execute the test case +##### b. Execute the test case Ensure the test case execution is successful. ``` bash cd tests/system-test & python3 ./test.py -f 0-others/test_case_template.py ``` -##### 3.Integrate into CI tests +##### c. Integrate into CI tests Edit `tests/parallel_test/cases.task` and add the testcase path and executions in the specified format. The third column indicates whether to use Address Sanitizer mode for testing. @@ -162,7 +162,7 @@ Edit `tests/parallel_test/cases.task` and add the testcase path and executions i In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. -### How to run single test case? +### 3.3.1 How to run single test case? To run the legacy test cases, please execute the following commands: @@ -171,14 +171,14 @@ cd tests/script ./test.sh -f tsim/db/basic1.sim ``` -### How to run all legacy test cases? +### 3.3.2 How to run all legacy test cases? ```bash cd tests ./run_all_ci_cases.sh -t legacy # all legacy cases ``` -### How to add new case? +### 3.3.3 How to add new case? > [!NOTE] > TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#32-system-test) for detail steps. @@ -187,14 +187,14 @@ cd tests Smoke test script is from system test and known as sanity testing to ensure that the critical functionalities of TDengine. -### How to run test? +### 3.4.1 How to run test? ```bash cd /root/TDengine/packaging/smokeTest ./test_smoking_selfhost.sh ``` -### How to add new case? +### 3.4.2 How to add new case? You can update python commands part of test_smoking_selfhost.sh file to add any system test case into smoke test. @@ -202,14 +202,14 @@ You can update python commands part of test_smoking_selfhost.sh file to add any A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems without a pre-determined scenario. -### How to run test? +### 3.5.1 How to run test? ```bash cd tests/pytest python3 auto_crash_gen.py ``` -### How to add new case? +### 3.5.2 How to add new case? Add a function, such as TaskCreateNewFunction, to pytest/crash_gen/crash_gen_main.py. @@ -221,7 +221,7 @@ CI testing (Continuous Integration testing), is an important practice in softwar TDengine CI testing includes three part of test cases: unit test、system test and legacy test -### How to run all CI test cases? +### 3.6.1 How to run all CI test cases? If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: @@ -230,6 +230,6 @@ cd tests ./run_all_ci_cases.sh -b main # on main branch ``` -### How to add new cases? +### 3.6.2 How to add new cases? You can refer the [Unit Test](#31-unit-test)、[System Test](#32-system-test) and [Legacy Test](#33-legacy-test) sections for detail steps to add new test cases for CI test. From b992f27ca0154740950f21bb0dc2483683571b37 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Thu, 16 Jan 2025 19:31:17 +0800 Subject: [PATCH 082/120] jdbc update to 3.5.2 --- docs/en/07-develop/01-connect.md | 2 +- docs/en/14-reference/05-connector/14-java.md | 1 + docs/examples/JDBC/JDBCDemo/pom.xml | 2 +- docs/examples/JDBC/SpringJdbcTemplate/pom.xml | 2 +- docs/examples/JDBC/connectionPools/pom.xml | 2 +- docs/examples/JDBC/consumer-demo/pom.xml | 2 +- docs/examples/JDBC/mybatisplus-demo/pom.xml | 2 +- docs/examples/JDBC/springbootdemo/pom.xml | 2 +- docs/examples/JDBC/taosdemo/pom.xml | 2 +- docs/examples/java/pom.xml | 2 +- docs/zh/07-develop/01-connect/index.md | 2 +- docs/zh/14-reference/05-connector/14-java.mdx | 1 + 12 files changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/en/07-develop/01-connect.md b/docs/en/07-develop/01-connect.md index c14eed311a..b6725ed7a4 100644 --- a/docs/en/07-develop/01-connect.md +++ b/docs/en/07-develop/01-connect.md @@ -109,7 +109,7 @@ If you are using Maven to manage your project, simply add the following dependen com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 ``` diff --git a/docs/en/14-reference/05-connector/14-java.md b/docs/en/14-reference/05-connector/14-java.md index c28702440a..0c7fbbdda4 100644 --- a/docs/en/14-reference/05-connector/14-java.md +++ b/docs/en/14-reference/05-connector/14-java.md @@ -33,6 +33,7 @@ The JDBC driver implementation for TDengine strives to be consistent with relati | taos-jdbcdriver Version | Major Changes | TDengine Version | | ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| 3.5.2 | Fixed WebSocket result set free bug. | - | | 3.5.1 | Fixed the getObject issue in data subscription. | - | | 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data.
2. Optimized the performance of small queries in WebSocket connection.
3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher | | 3.4.0 | 1. Replaced fastjson library with jackson.
2. WebSocket uses a separate protocol identifier.
3. Optimized background thread usage to avoid user misuse leading to timeouts. | - | diff --git a/docs/examples/JDBC/JDBCDemo/pom.xml b/docs/examples/JDBC/JDBCDemo/pom.xml index 78262712e9..a80f7a9cdf 100644 --- a/docs/examples/JDBC/JDBCDemo/pom.xml +++ b/docs/examples/JDBC/JDBCDemo/pom.xml @@ -19,7 +19,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 org.locationtech.jts diff --git a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml index 7ff4a72f5e..20da9bfae8 100644 --- a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/connectionPools/pom.xml b/docs/examples/JDBC/connectionPools/pom.xml index 70be6ed527..86e6fb04a4 100644 --- a/docs/examples/JDBC/connectionPools/pom.xml +++ b/docs/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/consumer-demo/pom.xml b/docs/examples/JDBC/consumer-demo/pom.xml index c9537a93bf..e439c22224 100644 --- a/docs/examples/JDBC/consumer-demo/pom.xml +++ b/docs/examples/JDBC/consumer-demo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 com.google.guava diff --git a/docs/examples/JDBC/mybatisplus-demo/pom.xml b/docs/examples/JDBC/mybatisplus-demo/pom.xml index effb13cfe8..8b4777bfb0 100644 --- a/docs/examples/JDBC/mybatisplus-demo/pom.xml +++ b/docs/examples/JDBC/mybatisplus-demo/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/springbootdemo/pom.xml b/docs/examples/JDBC/springbootdemo/pom.xml index 25b503b0e6..825f6fb9c2 100644 --- a/docs/examples/JDBC/springbootdemo/pom.xml +++ b/docs/examples/JDBC/springbootdemo/pom.xml @@ -70,7 +70,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/taosdemo/pom.xml b/docs/examples/JDBC/taosdemo/pom.xml index a80deeff94..c8f9c73d9d 100644 --- a/docs/examples/JDBC/taosdemo/pom.xml +++ b/docs/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index 63ce3159e6..4569742ab4 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -22,7 +22,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index fa22f750f5..a6e30ccb9c 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 ``` diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 7d5096bb66..9325396eb8 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -33,6 +33,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | ------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| 3.5.2 | 解决了 WebSocket 查询结果集释放 bug | - | | 3.5.1 | 解决了数据订阅获取时间戳对象类型问题 | - | | 3.5.0 | 1. 优化了 WebSocket 连接参数绑定性能,支持参数绑定查询使用二进制数据
2. 优化了 WebSocket 连接在小查询上的性能
3. WebSocket 连接上支持设置时区和应用信息 | 3.3.5.0 及更高版本 | | 3.4.0 | 1. 使用 jackson 库替换 fastjson 库
2. WebSocket 采用独立协议标识
3. 优化后台拉取线程使用,避免用户误用导致超时 | - | From f54ee09915172a2545c1facfa6ba5086d79ea17c Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Thu, 16 Jan 2025 20:51:37 +0800 Subject: [PATCH 083/120] Update README.md --- tests/README.md | 53 +++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/tests/README.md b/tests/README.md index dd1f8ba9a1..26ced3da17 100644 --- a/tests/README.md +++ b/tests/README.md @@ -12,12 +12,10 @@ # 1. Introduction -This manual is intended to provide users with comprehensive guidance to help them verify the TDengine function efficiently. - -The document is divided into three main sections: introduction, prerequisites and testing guide. +This manual is intended to give developers a comprehensive guidance to test TDengine efficiently. It is divided into three main sections: introduction, prerequisites and testing guide. > [!NOTE] -> The below commands and test scripts are verified on linux (Ubuntu 18.04、20.04、22.04) locally. +> The commands and scripts below are verified on Linux (Ubuntu 18.04/20.04/22.04). # 2. Prerequisites @@ -43,7 +41,7 @@ pip3 install taospy taos-ws-py - Building -Please make sure building operation with option `-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true` has been finished, otherwise execute commands below: +Before testting, please make sure the building operation with option `-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true` has been done, otherwise execute commands below: ```bash cd debug @@ -57,7 +55,7 @@ In `tests` directory, there are different types of tests for TDengine. Below is ### 3.1 Unit Test -Unit test script is the smallest testable part and developed for some function, method or class of TDengine. +Unit tests are the smallest testable units, which are used to test functions, methods or classes in TDengine code. ### 3.1.1 How to run single test case? @@ -79,7 +77,7 @@ bash test.sh -e 0 Detailed steps to add new unit test case -The Google test framwork is used for unit testing to specific function module, you can refer below steps to add one test case: +The Google test framwork is used for unit testing to specific function module, please refer to steps below to add a new test case: ##### a. Create test case file and develop the test scripts @@ -107,10 +105,11 @@ Use the add_test command to add new compiled test cases into CI test collection, ## 3.2 System Test -Python test script includes all of the functions of TDengine OSS, so some test case maybe fail cause the function only -work for TDengine Enterprise Edition. +System tests are end-to-end test cases written in Python from a system point of view. Some of them are designed to test features only in enterprise ediiton, so when running on community edition, they may fail. We'll fix this issue by separating the cases into different gruops in the future. -### 3.2.1 How to run single test case? +### 3.2.1 How to run a single test case? + +Take test file `system-test/2-query/avg.py` for example: ```bash cd tests/system-test @@ -130,9 +129,9 @@ cd tests Detailed steps to add new system test case -The Python test framework is developed by TDengine teams, and test.py is the test case execution and monitoring of the entry program, Use `python3 ./test.py -h` to view more features. +The Python test framework is developed by TDengine team, and test.py is the test case execution and monitoring of the entry program, Use `python3 ./test.py -h` to view more features. -you can refer below steps to add one test case: +Please refer to steps below for how to add a new test case: ##### a. Create a test case file and develop the test cases @@ -141,16 +140,15 @@ Create a file in `tests/system-test` containing each functional directory and re ##### b. Execute the test case Ensure the test case execution is successful. + ``` bash -cd tests/system-test & python3 ./test.py -f 0-others/test_case_template.py +cd tests/system-test && python3 ./test.py -f 0-others/test_case_template.py ``` ##### c. Integrate into CI tests Edit `tests/parallel_test/cases.task` and add the testcase path and executions in the specified format. The third column indicates whether to use Address Sanitizer mode for testing. - - ```bash #caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand ,,n,system-test, python3 ./test.py -f 0-others/test_case_template.py @@ -178,14 +176,14 @@ cd tests ./run_all_ci_cases.sh -t legacy # all legacy cases ``` -### 3.3.3 How to add new case? +### 3.3.3 How to add new cases? > [!NOTE] -> TSIM test framwork is replaced by system test currently, suggest to add new test scripts to system test, you can refer [System Test](#32-system-test) for detail steps. +> TSIM test framwork is deprecated by system test now, it is encouraged to add new test cases in system test, please refer to [System Test](#32-system-test) for details. ## 3.4 Smoke Test -Smoke test script is from system test and known as sanity testing to ensure that the critical functionalities of TDengine. +Smoke test is a group of test cases selected from system test, which is also known as sanity test to ensure the critical functionalities of TDengine. ### 3.4.1 How to run test? @@ -194,13 +192,13 @@ cd /root/TDengine/packaging/smokeTest ./test_smoking_selfhost.sh ``` -### 3.4.2 How to add new case? +### 3.4.2 How to add new cases? -You can update python commands part of test_smoking_selfhost.sh file to add any system test case into smoke test. +New cases can be added by updating the value of `commands` variable in `test_smoking_selfhost.sh`. ## 3.5 Chaos Test -A simple tool to exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems without a pre-determined scenario. +A simple tool to execute various functions of the system in a randomized way, hoping to expose potential problems without a pre-defined test scenario. ### 3.5.1 How to run test? @@ -209,21 +207,20 @@ cd tests/pytest python3 auto_crash_gen.py ``` -### 3.5.2 How to add new case? +### 3.5.2 How to add new cases? -Add a function, such as TaskCreateNewFunction, to pytest/crash_gen/crash_gen_main.py. - -Integrate TaskCreateNewFunction into the balance_pickTaskType function in crash_gen_main.py. +1. Add a function, such as `TaskCreateNewFunction` in `pytest/crash_gen/crash_gen_main.py`. +2. Integrate `TaskCreateNewFunction` into the `balance_pickTaskType` function in `crash_gen_main.py`. ## 3.6 CI Test CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. -TDengine CI testing includes three part of test cases: unit test、system test and legacy test +TDengine CI testing will run all the test cases from the following three types of tests: unit test, system test and legacy test. ### 3.6.1 How to run all CI test cases? -If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: +If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: ```bash cd tests @@ -232,4 +229,4 @@ cd tests ### 3.6.2 How to add new cases? -You can refer the [Unit Test](#31-unit-test)、[System Test](#32-system-test) and [Legacy Test](#33-legacy-test) sections for detail steps to add new test cases for CI test. +Please refer to the [Unit Test](#31-unit-test)、[System Test](#32-system-test) and [Legacy Test](#33-legacy-test) sections for detailed steps to add new test cases, when new cases are added in aboved tests, they will be run automatically by CI test. From a6d343cdb8ce1d7de3c4cde6e21285b1a63663a0 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Thu, 16 Jan 2025 20:56:47 +0800 Subject: [PATCH 084/120] Update README.md --- tests/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 26ced3da17..23b2ca8700 100644 --- a/tests/README.md +++ b/tests/README.md @@ -41,7 +41,7 @@ pip3 install taospy taos-ws-py - Building -Before testting, please make sure the building operation with option `-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true` has been done, otherwise execute commands below: +Before testing, please make sure the building operation with option `-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true` has been done, otherwise execute commands below: ```bash cd debug From f2ae4a042565db54011de9909f861bde65a15f23 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Thu, 16 Jan 2025 21:11:39 +0800 Subject: [PATCH 085/120] Update README.md --- tests/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 23b2ca8700..58747d93f7 100644 --- a/tests/README.md +++ b/tests/README.md @@ -15,7 +15,8 @@ This manual is intended to give developers a comprehensive guidance to test TDengine efficiently. It is divided into three main sections: introduction, prerequisites and testing guide. > [!NOTE] -> The commands and scripts below are verified on Linux (Ubuntu 18.04/20.04/22.04). +> - The commands and scripts below are verified on Linux (Ubuntu 18.04/20.04/22.04). +> - The commands and steps described below are to run the tests on a single host. # 2. Prerequisites From 1fb80a3b2580855cd7632b9a079f722375644eb4 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Thu, 16 Jan 2025 21:15:15 +0800 Subject: [PATCH 086/120] Update README.md --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index a7009fb6f2..53ddc85bf9 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ nmake # 5. Packaging -TDengine packaging scripts depends on some private repositries currently, you can refer the link for detail steps. [Packaging](https://github.com/taosdata/TDinternal/tree/main?tab=readme-ov-file#5-packaging) +[Placeholder] # 6. Installation @@ -406,7 +406,6 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
- # 12. Contributing Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine. From d95a1ca89a661d68f21a16f8b21569872ccd700e Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Thu, 16 Jan 2025 21:16:56 +0800 Subject: [PATCH 087/120] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53ddc85bf9..a09b9cfea3 100644 --- a/README.md +++ b/README.md @@ -384,7 +384,7 @@ For the complete list of TDengine Releases, please see [Releases](https://github # 10. Workflow -TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). +TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). More workflows will be available soon. # 11. Coverage From 25f5b8673f473704ff17126572bdf2974770e94e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 17 Jan 2025 10:53:02 +0800 Subject: [PATCH 088/120] fix: compile errors --- source/libs/parser/test/parAlterToBalanceTest.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/libs/parser/test/parAlterToBalanceTest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp index 12390cc9d5..2d6ab13c1f 100644 --- a/source/libs/parser/test/parAlterToBalanceTest.cpp +++ b/source/libs/parser/test/parAlterToBalanceTest.cpp @@ -206,10 +206,6 @@ TEST_F(ParserInitialATest, alterDatabase) { setAlterDbReplica(3); setAlterDbWalRetentionPeriod(10); setAlterDbWalRetentionSize(20); -#ifndef _STORAGE - run("ALTER DATABASE test BUFFER 16 CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 PAGES 128 " - "REPLICA 3 WAL_LEVEL 1 WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); -#else run("ALTER DATABASE test BUFFER 16 CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 PAGES 128 " "REPLICA 3 WAL_LEVEL 1 " #ifdef TD_ENTERPRISE From 2312ed4c284ecd4e9f3b12768bb7cebe404a06be Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 17 Jan 2025 04:12:57 +0000 Subject: [PATCH 089/120] fix/insert-when-2-replicas --- source/libs/sync/src/syncMain.c | 3 ++- tests/army/cluster/arbitrator.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 4862a4b963..0933fd48c7 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -3428,7 +3428,8 @@ _out:; ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex); if (code == 0 && ths->state == TAOS_SYNC_STATE_ASSIGNED_LEADER) { - TAOS_CHECK_RETURN(syncNodeUpdateAssignedCommitIndex(ths, matchIndex)); + int64_t index = syncNodeUpdateAssignedCommitIndex(ths, matchIndex); + sTrace("vgId:%d, update assigned commit index %" PRId64 "", ths->vgId, index); if (ths->fsmState != SYNC_FSM_STATE_INCOMPLETE && syncLogBufferCommit(ths->pLogBuf, ths, ths->assignedCommitIndex) < 0) { diff --git a/tests/army/cluster/arbitrator.py b/tests/army/cluster/arbitrator.py index 9fd8e7b1f3..385358e5cc 100644 --- a/tests/army/cluster/arbitrator.py +++ b/tests/army/cluster/arbitrator.py @@ -35,6 +35,12 @@ class TDTestCase(TBase): time.sleep(1) + tdSql.execute("use db;") + + tdSql.execute("CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);") + + tdSql.execute("CREATE TABLE d0 USING meters TAGS (\"California.SanFrancisco\", 2);"); + count = 0 while count < 100: @@ -72,6 +78,8 @@ class TDTestCase(TBase): count += 1 + tdSql.execute("INSERT INTO d0 VALUES (NOW, 10.3, 219, 0.31);") + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") From d4ceb37f8c9a74bf43afba3280895f9525a2d466 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 17 Jan 2025 13:44:21 +0800 Subject: [PATCH 090/120] fix: ut errors --- source/libs/parser/test/parAlterToBalanceTest.cpp | 10 ---------- source/libs/parser/test/parInitialCTest.cpp | 8 -------- 2 files changed, 18 deletions(-) diff --git a/source/libs/parser/test/parAlterToBalanceTest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp index 2d6ab13c1f..172c729f34 100644 --- a/source/libs/parser/test/parAlterToBalanceTest.cpp +++ b/source/libs/parser/test/parAlterToBalanceTest.cpp @@ -196,11 +196,7 @@ TEST_F(ParserInitialATest, alterDatabase) { setAlterDbFsync(200); setAlterDbWal(1); setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW); -#ifdef TD_ENTERPRISE setAlterDbSttTrigger(16); -#else - setAlterDbSttTrigger(1); -#endif setAlterDbBuffer(16); setAlterDbPages(128); setAlterDbReplica(3); @@ -208,11 +204,7 @@ TEST_F(ParserInitialATest, alterDatabase) { setAlterDbWalRetentionSize(20); run("ALTER DATABASE test BUFFER 16 CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 PAGES 128 " "REPLICA 3 WAL_LEVEL 1 " -#ifdef TD_ENTERPRISE "STT_TRIGGER 16 " -#else - "STT_TRIGGER 1 " -#endif "WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); clearAlterDbReq(); @@ -300,12 +292,10 @@ TEST_F(ParserInitialATest, alterDatabase) { initAlterDb("test"); setAlterDbSttTrigger(1); run("ALTER DATABASE test STT_TRIGGER 1"); -#ifdef TD_ENTERPRISE setAlterDbSttTrigger(4); run("ALTER DATABASE test STT_TRIGGER 4"); setAlterDbSttTrigger(16); run("ALTER DATABASE test STT_TRIGGER 16"); -#endif clearAlterDbReq(); #endif diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 0f72f9d882..2412bf4e78 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -292,11 +292,7 @@ TEST_F(ParserInitialCTest, createDatabase) { setDbWalRetentionSize(-1); setDbWalRollPeriod(10); setDbWalSegmentSize(20); -#ifdef TD_ENTERPRISE - setDbSstTrigger(16); -#else setDbSstTrigger(1); -#endif setDbHashPrefix(3); setDbHashSuffix(4); setDbTsdbPageSize(32); @@ -354,11 +350,7 @@ TEST_F(ParserInitialCTest, createDatabase) { "WAL_RETENTION_SIZE -1 " "WAL_ROLL_PERIOD 10 " "WAL_SEGMENT_SIZE 20 " -#ifdef TD_ENTERPRISE - "STT_TRIGGER 16 " -#else "STT_TRIGGER 1 " -#endif "TABLE_PREFIX 3 " "TABLE_SUFFIX 4 " "TSDB_PAGESIZE 32"); From 5b95b519ee4f9553e81ff69176222ee1a6589aea Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Fri, 17 Jan 2025 14:05:22 +0800 Subject: [PATCH 091/120] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a09b9cfea3..f540b1cc43 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ nmake # 5. Packaging -[Placeholder] +The TDengine community installer can NOT be created by this repository only, due to some component dependencies. We are still working on this improvement. # 6. Installation @@ -392,7 +392,7 @@ Latest TDengine test coverage report can be found on [coveralls.io](https://cove
- how to run the coverage report locally. +How to run the coverage report locally? To create the test coverage report (in HTML format) locally, please run following commands: ```bash From 6274eea7c4401b2e1f6bc6d300221bb60fd98497 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 17 Jan 2025 19:15:08 +0800 Subject: [PATCH 092/120] fix(stream): adjust the free stream meta position and check the close flag of streamMeta before starting scan wal. --- source/dnode/vnode/src/tq/tq.c | 20 +++++++++++++++----- source/dnode/vnode/src/tq/tqStreamTask.c | 23 +++++++++++++++++++---- source/libs/stream/src/streamHb.c | 1 - source/libs/stream/src/streamMeta.c | 1 + 4 files changed, 35 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 3bfc50fcb2..fbfe0bee53 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -75,12 +75,14 @@ int32_t tqOpen(const char* path, SVnode* pVnode) { if (pTq == NULL) { return terrno; } + pVnode->pTq = pTq; + pTq->pVnode = pVnode; + pTq->path = taosStrdup(path); if (pTq->path == NULL) { return terrno; } - pTq->pVnode = pVnode; pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); if (pTq->pHandle == NULL) { @@ -131,11 +133,19 @@ void tqClose(STQ* pTq) { return; } + int32_t vgId = 0; + if (pTq->pVnode != NULL) { + vgId = TD_VID(pTq->pVnode); + } else if (pTq->pStreamMeta != NULL) { + vgId = pTq->pStreamMeta->vgId; + } + + // close the stream meta firstly + streamMetaClose(pTq->pStreamMeta); + void* pIter = taosHashIterate(pTq->pPushMgr, NULL); while (pIter) { STqHandle* pHandle = *(STqHandle**)pIter; - int32_t vgId = TD_VID(pTq->pVnode); - if (pHandle->msg != NULL) { tqPushEmptyDataRsp(pHandle, vgId); rpcFreeCont(pHandle->msg->pCont); @@ -151,8 +161,8 @@ void tqClose(STQ* pTq) { taosHashCleanup(pTq->pOffset); taosMemoryFree(pTq->path); tqMetaClose(pTq); - qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1); - streamMetaClose(pTq->pStreamMeta); + + qDebug("vgId:%d end to close tq", vgId); taosMemoryFree(pTq); } diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index bc7e2e28e3..9eef07faf0 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -86,15 +86,30 @@ static void doStartScanWal(void* param, void* tmrId) { return; } + if (pMeta->closeFlag) { + code = taosReleaseRef(streamMetaRefPool, pParam->metaId); + if (code == TSDB_CODE_SUCCESS) { + tqDebug("vgId:%d jump out of scan wal timer since closed", vgId); + } else { + tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId, + tstrerror(code)); + } + + taosMemoryFree(pParam); + return; + } + vgId = pMeta->vgId; pTq = pMeta->ahandle; tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks, pTq->pVnode->restored); - code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); - if (code) { - tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); + if (pTq->pVnode != NULL) { + code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); + if (code) { + tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); + } } code = taosReleaseRef(streamMetaRefPool, pParam->metaId); @@ -330,13 +345,13 @@ int32_t doPutDataIntoInputQ(SStreamTask* pTask, int64_t maxVer, int32_t* numOfIt int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta) { int32_t vgId = pStreamMeta->vgId; + SArray* pTaskList = NULL; int32_t numOfTasks = taosArrayGetSize(pStreamMeta->pTaskList); if (numOfTasks == 0) { return TSDB_CODE_SUCCESS; } // clone the task list, to avoid the task update during scan wal files - SArray* pTaskList = NULL; streamMetaWLock(pStreamMeta); pTaskList = taosArrayDup(pStreamMeta->pTaskList, NULL); streamMetaWUnLock(pStreamMeta); diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index 53b6a38b35..7c157bb05e 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -331,7 +331,6 @@ void streamMetaHbToMnode(void* param, void* tmrId) { } else { stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } -// taosMemoryFree(param); return; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 0de256d86d..9a2eeb9311 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -576,6 +576,7 @@ void streamMetaClose(SStreamMeta* pMeta) { if (pMeta == NULL) { return; } + int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid); if (code) { stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code)); From c93fe999ba2a4a2d515d96e10f9e472c826cd19c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Jan 2025 01:26:49 +0800 Subject: [PATCH 093/120] refactor(stream): injection error. --- source/dnode/vnode/src/inc/vnodeInt.h | 3 +++ source/dnode/vnode/src/tq/tq.c | 6 +++++- source/dnode/vnode/src/tq/tqStreamTask.c | 8 ++++++++ source/libs/stream/src/streamErrorInjection.c | 2 +- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 940116317c..5bf0a9b199 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -255,6 +255,9 @@ int32_t tqProcessTaskCheckpointReadyRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqBuildStreamTask(void* pTq, SStreamTask* pTask, int64_t ver); int32_t tqScanWal(STQ* pTq); +// injection error +void streamMetaFreeTQDuringScanWalError(STQ* pTq); + int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd); int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId); // tq-mq diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index fbfe0bee53..48c5360c01 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -161,8 +161,12 @@ void tqClose(STQ* pTq) { taosHashCleanup(pTq->pOffset); taosMemoryFree(pTq->path); tqMetaClose(pTq); + qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1); + +#if 0 + streamMetaFreeTQDuringScanWalError(pTq); +#endif - qDebug("vgId:%d end to close tq", vgId); taosMemoryFree(pTq); } diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 9eef07faf0..bb04cd8dba 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -462,3 +462,11 @@ int32_t doScanWalAsync(STQ* pTq, bool ckPause) { return streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); } + +void streamMetaFreeTQDuringScanWalError(STQ* pTq) { + SBuildScanWalMsgParam* p = taosMemoryCalloc(1, sizeof(SBuildScanWalMsgParam)); + p->metaId = pTq->pStreamMeta->rid; + p->numOfTasks = 0; + + doStartScanWal(p, 0); +} \ No newline at end of file diff --git a/source/libs/stream/src/streamErrorInjection.c b/source/libs/stream/src/streamErrorInjection.c index 515845ba2b..8bbe403dcc 100644 --- a/source/libs/stream/src/streamErrorInjection.c +++ b/source/libs/stream/src/streamErrorInjection.c @@ -14,4 +14,4 @@ void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId) // the checkpoint interval should be 60s, and the next checkpoint req should be issued by mnode taosMsleep(65*1000); -} \ No newline at end of file +} From b4a6523aa660449daf884e79c761d57f363fc243 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Jan 2025 01:35:24 +0800 Subject: [PATCH 094/120] fix(stream): fix invalid read. --- source/dnode/vnode/src/tq/tq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 48c5360c01..5b19d4cd87 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -161,7 +161,7 @@ void tqClose(STQ* pTq) { taosHashCleanup(pTq->pOffset); taosMemoryFree(pTq->path); tqMetaClose(pTq); - qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1); + qDebug("vgId:%d end to close tq", vgId); #if 0 streamMetaFreeTQDuringScanWalError(pTq); From 84eaed0bbb56abdca195d438f26ad946d5b10110 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 19 Jan 2025 00:13:58 +0800 Subject: [PATCH 095/120] fix(stream): fix invalid read. --- source/dnode/vnode/src/tq/tqStreamTask.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index bb04cd8dba..9ea84830f1 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -22,6 +22,8 @@ typedef struct SBuildScanWalMsgParam { int64_t metaId; int32_t numOfTasks; + int8_t restored; + SMsgCb msgCb; } SBuildScanWalMsgParam; static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta); @@ -74,7 +76,6 @@ int32_t tqScanWal(STQ* pTq) { static void doStartScanWal(void* param, void* tmrId) { int32_t vgId = 0; - STQ* pTq = NULL; int32_t code = 0; SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param; @@ -100,16 +101,17 @@ static void doStartScanWal(void* param, void* tmrId) { } vgId = pMeta->vgId; - pTq = pMeta->ahandle; tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks, - pTq->pVnode->restored); + pParam->restored); +#if 0 + // wait for the vnode is freed, and invalid read may occur. + taosMsleep(10000); +#endif - if (pTq->pVnode != NULL) { - code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); - if (code) { - tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); - } + code = streamTaskSchedTask(&pParam->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); + if (code) { + tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); } code = taosReleaseRef(streamMetaRefPool, pParam->metaId); @@ -135,6 +137,8 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { pParam->metaId = pMeta->rid; pParam->numOfTasks = numOfTasks; + pParam->restored = pTq->pVnode->restored; + pParam->msgCb = pTq->pVnode->msgCb; code = streamTimerGetInstance(&pTimer); if (code) { From 2f817e1781f47297e34b5c580e0c69863583d73f Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 20 Jan 2025 09:50:14 +0800 Subject: [PATCH 096/120] refactor stmt-async-bind loop usleep to Producer Consumer Model --- source/client/inc/clientStmt.h | 2 ++ source/client/src/clientStmt.c | 37 +++++++++++++++++++++++++-------- source/client/src/clientStmt2.c | 31 ++++++++++++++++++++------- 3 files changed, 53 insertions(+), 17 deletions(-) diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h index 3540dc5c68..35bfa66f72 100644 --- a/source/client/inc/clientStmt.h +++ b/source/client/inc/clientStmt.h @@ -131,6 +131,8 @@ typedef struct SStmtQueue { SStmtQNode* head; SStmtQNode* tail; uint64_t qRemainNum; + TdThreadMutex mutex; + TdThreadCond waitCond; } SStmtQueue; typedef struct STscStmt { diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 4b993ccc1e..ad8681fbcd 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -39,31 +39,39 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void** } bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) { - while (0 == atomic_load_64(&pStmt->queue.qRemainNum)) { - taosUsleep(1); - return false; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) { + (void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex); + if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) { + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + return false; + } } - SStmtQNode* orig = pStmt->queue.head; - SStmtQNode* node = pStmt->queue.head->next; pStmt->queue.head = pStmt->queue.head->next; - - // taosMemoryFreeClear(orig); - *param = node; - (void)atomic_sub_fetch_64(&pStmt->queue.qRemainNum, 1); + (void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + + + *param = node; return true; } void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); + pStmt->queue.tail->next = param; pStmt->queue.tail = param; pStmt->stat.bindDataNum++; (void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); } static int32_t stmtCreateRequest(STscStmt* pStmt) { @@ -415,9 +423,11 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) { pTblBuf->buffIdx = 1; pTblBuf->buffOffset = sizeof(*pQueue->head); + (void)taosThreadMutexLock(&pQueue->mutex); pQueue->head = pQueue->tail = pTblBuf->pCurBuff; pQueue->qRemainNum = 0; pQueue->head->next = NULL; + (void)taosThreadMutexUnlock(&pQueue->mutex); } int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { @@ -809,6 +819,8 @@ int32_t stmtStartBindThread(STscStmt* pStmt) { } int32_t stmtInitQueue(STscStmt* pStmt) { + (void)taosThreadCondInit(&pStmt->queue.waitCond, NULL); + (void)taosThreadMutexInit(&pStmt->queue.mutex, NULL); STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head)); pStmt->queue.tail = pStmt->queue.head; @@ -1619,11 +1631,18 @@ int stmtClose(TAOS_STMT* stmt) { pStmt->queue.stopQueue = true; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + if (pStmt->bindThreadInUse) { (void)taosThreadJoin(pStmt->bindThread, NULL); pStmt->bindThreadInUse = false; } + (void)taosThreadCondDestroy(&pStmt->queue.waitCond); + (void)taosThreadMutexDestroy(&pStmt->queue.mutex); + STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64 ", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64 ", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u" diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 8edd60e4b5..72166fab84 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -39,31 +39,35 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void** } static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) { - taosUsleep(1); - return false; + (void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex); + if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) { + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + return false; + } } - SStmtQNode* orig = pStmt->queue.head; - SStmtQNode* node = pStmt->queue.head->next; pStmt->queue.head = pStmt->queue.head->next; - - // taosMemoryFreeClear(orig); - *param = node; (void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); return true; } static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); + pStmt->queue.tail->next = param; pStmt->queue.tail = param; - pStmt->stat.bindDataNum++; (void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); } static int32_t stmtCreateRequest(STscStmt2* pStmt) { @@ -339,9 +343,11 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) { pTblBuf->buffIdx = 1; pTblBuf->buffOffset = sizeof(*pQueue->head); + (void)taosThreadMutexLock(&pQueue->mutex); pQueue->head = pQueue->tail = pTblBuf->pCurBuff; pQueue->qRemainNum = 0; pQueue->head->next = NULL; + (void)taosThreadMutexUnlock(&pQueue->mutex); } static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) { @@ -735,6 +741,8 @@ static int32_t stmtStartBindThread(STscStmt2* pStmt) { } static int32_t stmtInitQueue(STscStmt2* pStmt) { + (void)taosThreadCondInit(&pStmt->queue.waitCond, NULL); + (void)taosThreadMutexInit(&pStmt->queue.mutex, NULL); STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head)); pStmt->queue.tail = pStmt->queue.head; @@ -1748,11 +1756,18 @@ int stmtClose2(TAOS_STMT2* stmt) { pStmt->queue.stopQueue = true; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + if (pStmt->bindThreadInUse) { (void)taosThreadJoin(pStmt->bindThread, NULL); pStmt->bindThreadInUse = false; } + (void)taosThreadCondDestroy(&pStmt->queue.waitCond); + (void)taosThreadMutexDestroy(&pStmt->queue.mutex); + if (pStmt->options.asyncExecFn && !pStmt->semWaited) { if (tsem_wait(&pStmt->asyncQuerySem) != 0) { tscError("failed to wait asyncQuerySem"); From 8d8c4fe8a7813ddcfd6f7461280f022261cb0c3b Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Mon, 20 Jan 2025 14:10:02 +0800 Subject: [PATCH 097/120] test:update 3.3.5.2 release notes in office web --- cmake/cmake.version | 2 +- docs/en/28-releases/01-tdengine.md | 4 +++ docs/en/28-releases/03-notes/3.3.5.2.md | 43 +++++++++++++++++++++++++ docs/en/28-releases/03-notes/index.md | 1 + docs/zh/28-releases/01-tdengine.md | 4 +++ docs/zh/28-releases/03-notes/3.3.5.2.md | 42 ++++++++++++++++++++++++ docs/zh/28-releases/03-notes/index.md | 1 + 7 files changed, 96 insertions(+), 1 deletion(-) create mode 100755 docs/en/28-releases/03-notes/3.3.5.2.md create mode 100755 docs/zh/28-releases/03-notes/3.3.5.2.md diff --git a/cmake/cmake.version b/cmake/cmake.version index 13fac68e3a..ad78dbbc1e 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.3.5.0.alpha") + SET(TD_VER_NUMBER "3.3.5.2.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 12cf5484d4..9f4246c7a0 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -25,6 +25,10 @@ Download links for TDengine 3.x version installation packages are as follows: import Release from "/components/ReleaseV3"; +## 3.3.5.2 + + + ## 3.3.5.0 diff --git a/docs/en/28-releases/03-notes/3.3.5.2.md b/docs/en/28-releases/03-notes/3.3.5.2.md new file mode 100755 index 0000000000..5631edcdbc --- /dev/null +++ b/docs/en/28-releases/03-notes/3.3.5.2.md @@ -0,0 +1,43 @@ +--- +title: TDengine 3.3.5.2 Release Notes +sidebar_label: 3.3.5.2 +description: Version 3.3.5.2 Notes +slug: /release-history/release-notes/3.3.5.2 +--- + +## Features + 1. feat: taosX now support multiple stables with template for MQTT + +## Enhancements + 1. enh: improve taosX error message if database is invalid + 2. enh: use poetry group depencencies and reduce dep when install https://github.com/taosdata/TDengine/issues/taosdata/taos-connector-python#251 + 3. enh: improve backup restore using taosX + 4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader + 5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later. + +## Fixes + 1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error + 2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table + 3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails. + 4. fix: taosd may crash when more than 100 views are created and the show views command is executed. + 5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail. + 6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail. + 7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash. + 8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd + 9. fix: the potential deadlock during the switching of log files + 10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema). + 11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up. + 12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface + 13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash + 14. fix: the issue of being unable to dynamically modify system parameters + 15. fix: random error of tranlict transaction in replication + 16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error + 17. fix: fix CVE-2022-28948 security issue in go connector + 18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error. + 19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail + 20. fix: column names were not correctly copied when using SELECT * FROM subqueries. + 21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash + 22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation + 23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition. + 24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash + diff --git a/docs/en/28-releases/03-notes/index.md b/docs/en/28-releases/03-notes/index.md index e54862e105..5ff7350e6c 100644 --- a/docs/en/28-releases/03-notes/index.md +++ b/docs/en/28-releases/03-notes/index.md @@ -5,6 +5,7 @@ slug: /release-history/release-notes [3.3.5.0](./3-3-5-0/) +[3.3.5.2](./3.3.5.2) [3.3.4.8](./3-3-4-8/) [3.3.4.3](./3-3-4-3/) diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 356777acdc..88c07a89f4 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -24,6 +24,10 @@ TDengine 3.x 各版本安装包下载链接如下: import Release from "/components/ReleaseV3"; +## 3.3.5.2 + + + ## 3.3.5.0 diff --git a/docs/zh/28-releases/03-notes/3.3.5.2.md b/docs/zh/28-releases/03-notes/3.3.5.2.md new file mode 100755 index 0000000000..4dfb79968d --- /dev/null +++ b/docs/zh/28-releases/03-notes/3.3.5.2.md @@ -0,0 +1,42 @@ +--- +title: 3.3.5.2 版本说明 +sidebar_label: 3.3.5.2 +description: 3.3.5.2 版本说明 +--- + +## 特性 + 1. 特性:taosX MQTT 数据源支持根据模板创建多个超级表 + +## 优化 + 1. 优化:改进 taosX 数据库不可用时的错误信息 + 2. 优化:使用 Poetry 标准管理依赖项并减少 Python 连接器安装依赖项 https://github.com/taosdata/TDengine/issues/taosdata/taos-connector-python#251 + 3. 优化:taosX 增量备份和恢复优化 + 4. 优化:在多级存储数据迁移过程中,如果迁移时间过长,可能会导致 Vnode 切主 + 5. 优化:调整 systemctl 守护 taosd 进程的策略,如果 60 秒内连续三次重启失败,下次重启将推迟至 900 秒后 + +## 修复 + 1. 修复:maxRetryWaitTime 参数用于控制当集群无法提供服务时客户端的最大重连超时时间,但在遇到 Sync timeout 错误时,该参数不生效 + 2. 修复:支持在修改子表的 tag 值后,即时订阅到更新后的 tag 值。 + 3. 修复:数据订阅的 tmq_consumer_poll 函数调用失败时没有返回错误码 + 4. 修复:当创建超过 100 个视图并执行 show views 命令时,taosd 可能会发生崩溃 + 5. 修复:当使用 stmt2 写入数据时,如果未绑定所有的数据列,写入操作将会失败 + 6. 修复:当使用 stmt2 写入数据时,如果数据库名或表名使用了反引号,写入操作将会失败。 + 7. 修复:关闭 vnode 时如果有正在进行的文件合并任务,taosd 可能会崩溃 + 8. 修复:频繁执行 drop table with `tb_uid` 语句可能导致 taosd 死锁 + 9. 修复:日志文件切换过程中可能出现的死锁问题 + 10. 修复:禁止创建与系统库(information_schema, performance_schema)同名的数据库 + 11. 修复:当嵌套查询的内层查询来源于超级表时,排序信息无法被上推 + 12. 修复:通过 STMT 接口尝试写入不符合拓扑规范的 Geometry 数据类型时误报错误 + 13. 修复:在查询语句中使用 percentile 函数和会话窗口时,如果出现错误,taosd 可能会崩溃 + 14. 修复:无法动态修改系统参数的问题 + 15. 修复:订阅同步偶发 Translict transaction 错误 + 16. 修复:同一消费者在执行取消订阅操作后,立即尝试订阅其他不同的主题时,会返回错误 + 17. 修复:Go 连接器安全修复 CVE-2022-28948 + 18. 修复:当视图中的子查询包含带别名的 ORDER BY 子句,并且查询函数自身也带有别名时,查询该视图会引发错误 + 19. 修复:在将数据库从单副本修改为多副本时,如果存在一些由较早版本生成且在新版本中已不再使用的元数据,会导致修改操作失败。 + 20. 修复:在使用 SELECT * FROM 子查询时,列名未能正确复制到外层查询 + 21. 修复:对字符串类型数据执行 max/min 函数时,结果不准确且 taosd 可能会崩溃 + 22. 修复:流式计算不支持使用 HAVING 语句,但在创建时未报告错误 + 23. 修复:taos shell 显示的服务端版本信息不准确,例如无法正确区分社区版和企业版。 + 24. 修复:在某些特定的查询场景下,当 JOIN 和 CAST 联合使用时,taosd 可能会崩溃。 + diff --git a/docs/zh/28-releases/03-notes/index.md b/docs/zh/28-releases/03-notes/index.md index 27898aa2df..420ab4a54d 100644 --- a/docs/zh/28-releases/03-notes/index.md +++ b/docs/zh/28-releases/03-notes/index.md @@ -4,6 +4,7 @@ sidebar_label: 版本说明 description: 各版本版本说明 --- +[3.3.5.2](./3.3.5.2) [3.3.5.0](./3.3.5.0) [3.3.4.8](./3.3.4.8) [3.3.4.3](./3.3.4.3) From f9ce37ad80f1105ffaf50e37ebcb1ade0d6db704 Mon Sep 17 00:00:00 2001 From: menshibin Date: Mon, 20 Jan 2025 14:11:18 +0800 Subject: [PATCH 098/120] modify source name --- docs/examples/flink/Main.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/examples/flink/Main.java b/docs/examples/flink/Main.java index 12d79126cf..50a507d1de 100644 --- a/docs/examples/flink/Main.java +++ b/docs/examples/flink/Main.java @@ -263,7 +263,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") Class> typeClass = (Class>) (Class) SourceRecords.class; SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters"); TDengineSource> source = new TDengineSource<>(connProps, sql, typeClass); - DataStreamSource> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction, String>) records -> { StringBuilder sb = new StringBuilder(); Iterator iterator = records.iterator(); @@ -304,7 +304,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData"); config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8"); TDengineCdcSource tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class); - DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction) rowData -> { StringBuilder sb = new StringBuilder(); sb.append("tsxx: " + rowData.getTimestamp(0, 0) + @@ -343,7 +343,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") Class> typeClass = (Class>) (Class) ConsumerRecords.class; TDengineCdcSource> tdengineSource = new TDengineCdcSource<>("topic_meters", config, typeClass); - DataStreamSource> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction, String>) records -> { Iterator> iterator = records.iterator(); StringBuilder sb = new StringBuilder(); @@ -388,7 +388,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultDeserializer"); config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8"); TDengineCdcSource tdengineSource = new TDengineCdcSource<>("topic_meters", config, ResultBean.class); - DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction) rowData -> { StringBuilder sb = new StringBuilder(); sb.append("ts: " + rowData.getTs() + From aad393f0f88d900c4906c60bb312fdef0ca83d1f Mon Sep 17 00:00:00 2001 From: huohong <346479823@qq.com> Date: Mon, 20 Jan 2025 14:42:01 +0800 Subject: [PATCH 099/120] Update 3.3.5.2.md --- docs/en/28-releases/03-notes/3.3.5.2.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/en/28-releases/03-notes/3.3.5.2.md b/docs/en/28-releases/03-notes/3.3.5.2.md index 5631edcdbc..8200e4916c 100755 --- a/docs/en/28-releases/03-notes/3.3.5.2.md +++ b/docs/en/28-releases/03-notes/3.3.5.2.md @@ -13,31 +13,31 @@ slug: /release-history/release-notes/3.3.5.2 2. enh: use poetry group depencencies and reduce dep when install https://github.com/taosdata/TDengine/issues/taosdata/taos-connector-python#251 3. enh: improve backup restore using taosX 4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader - 5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later. + 5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later ## Fixes 1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error 2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table - 3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails. - 4. fix: taosd may crash when more than 100 views are created and the show views command is executed. - 5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail. - 6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail. - 7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash. + 3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails + 4. fix: taosd may crash when more than 100 views are created and the show views command is executed + 5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail + 6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail + 7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash 8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd 9. fix: the potential deadlock during the switching of log files - 10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema). - 11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up. + 10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema) + 11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up 12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface 13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash 14. fix: the issue of being unable to dynamically modify system parameters 15. fix: random error of tranlict transaction in replication 16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error 17. fix: fix CVE-2022-28948 security issue in go connector - 18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error. + 18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error 19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail - 20. fix: column names were not correctly copied when using SELECT * FROM subqueries. + 20. fix: column names were not correctly copied when using SELECT * FROM subqueries 21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash 22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation - 23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition. + 23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition 24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash From 503dec595cd8a1a79d4e499d5a033e4bb9cc91b9 Mon Sep 17 00:00:00 2001 From: huohong <346479823@qq.com> Date: Mon, 20 Jan 2025 14:45:02 +0800 Subject: [PATCH 100/120] Update 3.3.5.2.md --- docs/zh/28-releases/03-notes/3.3.5.2.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/zh/28-releases/03-notes/3.3.5.2.md b/docs/zh/28-releases/03-notes/3.3.5.2.md index 4dfb79968d..dc2734c50b 100755 --- a/docs/zh/28-releases/03-notes/3.3.5.2.md +++ b/docs/zh/28-releases/03-notes/3.3.5.2.md @@ -9,18 +9,18 @@ description: 3.3.5.2 版本说明 ## 优化 1. 优化:改进 taosX 数据库不可用时的错误信息 - 2. 优化:使用 Poetry 标准管理依赖项并减少 Python 连接器安装依赖项 https://github.com/taosdata/TDengine/issues/taosdata/taos-connector-python#251 + 2. 优化:使用 Poetry 标准管理依赖项并减少 Python 连接器安装依赖项 [#251](https://github.com/taosdata/taos-connector-python/issues/251) 3. 优化:taosX 增量备份和恢复优化 4. 优化:在多级存储数据迁移过程中,如果迁移时间过长,可能会导致 Vnode 切主 5. 优化:调整 systemctl 守护 taosd 进程的策略,如果 60 秒内连续三次重启失败,下次重启将推迟至 900 秒后 ## 修复 1. 修复:maxRetryWaitTime 参数用于控制当集群无法提供服务时客户端的最大重连超时时间,但在遇到 Sync timeout 错误时,该参数不生效 - 2. 修复:支持在修改子表的 tag 值后,即时订阅到更新后的 tag 值。 + 2. 修复:支持在修改子表的 tag 值后,即时订阅到更新后的 tag 值 3. 修复:数据订阅的 tmq_consumer_poll 函数调用失败时没有返回错误码 4. 修复:当创建超过 100 个视图并执行 show views 命令时,taosd 可能会发生崩溃 5. 修复:当使用 stmt2 写入数据时,如果未绑定所有的数据列,写入操作将会失败 - 6. 修复:当使用 stmt2 写入数据时,如果数据库名或表名使用了反引号,写入操作将会失败。 + 6. 修复:当使用 stmt2 写入数据时,如果数据库名或表名使用了反引号,写入操作将会失败 7. 修复:关闭 vnode 时如果有正在进行的文件合并任务,taosd 可能会崩溃 8. 修复:频繁执行 drop table with `tb_uid` 语句可能导致 taosd 死锁 9. 修复:日志文件切换过程中可能出现的死锁问题 @@ -33,10 +33,10 @@ description: 3.3.5.2 版本说明 16. 修复:同一消费者在执行取消订阅操作后,立即尝试订阅其他不同的主题时,会返回错误 17. 修复:Go 连接器安全修复 CVE-2022-28948 18. 修复:当视图中的子查询包含带别名的 ORDER BY 子句,并且查询函数自身也带有别名时,查询该视图会引发错误 - 19. 修复:在将数据库从单副本修改为多副本时,如果存在一些由较早版本生成且在新版本中已不再使用的元数据,会导致修改操作失败。 + 19. 修复:在将数据库从单副本修改为多副本时,如果存在一些由较早版本生成且在新版本中已不再使用的元数据,会导致修改操作失败 20. 修复:在使用 SELECT * FROM 子查询时,列名未能正确复制到外层查询 21. 修复:对字符串类型数据执行 max/min 函数时,结果不准确且 taosd 可能会崩溃 22. 修复:流式计算不支持使用 HAVING 语句,但在创建时未报告错误 - 23. 修复:taos shell 显示的服务端版本信息不准确,例如无法正确区分社区版和企业版。 - 24. 修复:在某些特定的查询场景下,当 JOIN 和 CAST 联合使用时,taosd 可能会崩溃。 + 23. 修复:taos shell 显示的服务端版本信息不准确,例如无法正确区分社区版和企业版 + 24. 修复:在某些特定的查询场景下,当 JOIN 和 CAST 联合使用时,taosd 可能会崩溃 From 9328bb86ef6c2d925a3b4b7d1b9b5a0bdbc96954 Mon Sep 17 00:00:00 2001 From: huohong <346479823@qq.com> Date: Mon, 20 Jan 2025 14:46:03 +0800 Subject: [PATCH 101/120] Update 3.3.5.2.md --- docs/en/28-releases/03-notes/3.3.5.2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/28-releases/03-notes/3.3.5.2.md b/docs/en/28-releases/03-notes/3.3.5.2.md index 8200e4916c..ce0141cccf 100755 --- a/docs/en/28-releases/03-notes/3.3.5.2.md +++ b/docs/en/28-releases/03-notes/3.3.5.2.md @@ -10,7 +10,7 @@ slug: /release-history/release-notes/3.3.5.2 ## Enhancements 1. enh: improve taosX error message if database is invalid - 2. enh: use poetry group depencencies and reduce dep when install https://github.com/taosdata/TDengine/issues/taosdata/taos-connector-python#251 + 2. enh: use poetry group depencencies and reduce dep when install [#251](https://github.com/taosdata/taos-connector-python/issues/251) 3. enh: improve backup restore using taosX 4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader 5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later From 6588f9bec349aaa29690c4e50368bebf3e766f35 Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 20 Jan 2025 17:09:41 +0800 Subject: [PATCH 102/120] fix TD-33570 --- source/client/test/stmt2Test.cpp | 19 +++++++- source/client/test/stmtTest.cpp | 62 +++++++++++++++++++++------ source/libs/parser/src/parInsertSql.c | 3 ++ 3 files changed, 71 insertions(+), 13 deletions(-) diff --git a/source/client/test/stmt2Test.cpp b/source/client/test/stmt2Test.cpp index 52c89e97ab..499b5d16bb 100644 --- a/source/client/test/stmt2Test.cpp +++ b/source/client/test/stmt2Test.cpp @@ -735,7 +735,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { { const char* sql = "insert into stmt2_testdb_4.? values(?,?)"; printf("case 2 : %s\n", sql); - getFieldsError(taos, sql, TSDB_CODE_PAR_TABLE_NOT_EXIST); + getFieldsError(taos, sql, TSDB_CODE_TSC_STMT_TBNAME_ERROR); } // case 3 : wrong para nums @@ -1496,6 +1496,23 @@ TEST(stmt2Case, geometry) { checkError(stmt, code); ASSERT_EQ(affected_rows, 3); + // test wrong wkb input + unsigned char wkb2[3][61] = { + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, + }, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}}; + params[1].buffer = wkb2; + code = taos_stmt2_bind_param(stmt, &bindv, -1); + ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE); + taos_stmt2_close(stmt); do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13"); taos_close(taos); diff --git a/source/client/test/stmtTest.cpp b/source/client/test/stmtTest.cpp index 77130e41db..8efa907155 100644 --- a/source/client/test/stmtTest.cpp +++ b/source/client/test/stmtTest.cpp @@ -212,15 +212,6 @@ void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_E *expectedTagFields, int expectedTagFieldNum, TAOS_FIELD_E *expectedColFields, int expectedColFieldNum) { - // create database and table - do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3"); - do_query(taos, "USE stmt_testdb_3"); - do_query( - taos, - "CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " - "(groupId INT, location BINARY(24))"); - TAOS_STMT *stmt = taos_stmt_init(taos); ASSERT_NE(stmt, nullptr); int code = taos_stmt_prepare(stmt, sql, 0); @@ -267,6 +258,24 @@ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_ taos_stmt_close(stmt); } +void getFieldsError(TAOS *taos, const char *sql, int expectedErrocode) { + TAOS_STMT *stmt = taos_stmt_init(taos); + ASSERT_NE(stmt, nullptr); + STscStmt *pStmt = (STscStmt *)stmt; + + int code = taos_stmt_prepare(stmt, sql, 0); + + int fieldNum = 0; + TAOS_FIELD_E *pFields = NULL; + code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields); + ASSERT_EQ(code, expectedErrocode); + ASSERT_EQ(pStmt->errCode, TSDB_CODE_SUCCESS); + + taosMemoryFree(pFields); + + taos_stmt_close(stmt); +} + } // namespace int main(int argc, char **argv) { @@ -298,6 +307,15 @@ TEST(stmtCase, get_fields) { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); + // create database and table + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3"); + do_query(taos, "USE stmt_testdb_3"); + do_query( + taos, + "CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); + // nomarl test { TAOS_FIELD_E tagFields[2] = {{"groupid", TSDB_DATA_TYPE_INT, 0, 0, sizeof(int)}, {"location", TSDB_DATA_TYPE_BINARY, 0, 0, 24}}; @@ -307,6 +325,12 @@ TEST(stmtCase, get_fields) { {"phase", TSDB_DATA_TYPE_FLOAT, 0, 0, sizeof(float)}}; getFields(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 7, &tagFields[0], 2, &colFields[0], 4); } + // error case [TD-33570] + { getFieldsError(taos, "INSERT INTO ? VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); } + + { getFieldsError(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); } + + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); taos_close(taos); } @@ -520,9 +544,6 @@ TEST(stmtCase, geometry) { int code = taos_stmt_prepare(stmt, stmt_sql, 0); checkError(stmt, code); - // code = taos_stmt_set_tbname(stmt, "tb1"); - // checkError(stmt, code); - code = taos_stmt_bind_param_batch(stmt, params); checkError(stmt, code); @@ -532,6 +553,23 @@ TEST(stmtCase, geometry) { code = taos_stmt_execute(stmt); checkError(stmt, code); + //test wrong wkb input + unsigned char wkb2[3][61] = { + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, + }, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}}; + params[1].buffer = wkb2; + code = taos_stmt_bind_param_batch(stmt, params); + ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE); + taosMemoryFree(t64_len); taosMemoryFree(wkb_len); taos_stmt_close(stmt); diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 67ad874b15..5ff6e4f555 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -2751,6 +2751,9 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS if (TSDB_CODE_SUCCESS == code && hasData) { code = parseInsertTableClause(pCxt, pStmt, &token); } + if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pCxt->preCtbname) { + code = TSDB_CODE_TSC_STMT_TBNAME_ERROR; + } } if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) { From 7d8a9a80ea081239ded89ebf7b30451c8eff5ca6 Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 20 Jan 2025 09:50:14 +0800 Subject: [PATCH 103/120] refactor stmt-async-bind loop usleep to Producer Consumer Model --- source/client/inc/clientStmt.h | 2 ++ source/client/src/clientStmt.c | 37 +++++++++++++++++++++++++-------- source/client/src/clientStmt2.c | 31 ++++++++++++++++++++------- 3 files changed, 53 insertions(+), 17 deletions(-) diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h index 3540dc5c68..35bfa66f72 100644 --- a/source/client/inc/clientStmt.h +++ b/source/client/inc/clientStmt.h @@ -131,6 +131,8 @@ typedef struct SStmtQueue { SStmtQNode* head; SStmtQNode* tail; uint64_t qRemainNum; + TdThreadMutex mutex; + TdThreadCond waitCond; } SStmtQueue; typedef struct STscStmt { diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 4b993ccc1e..ad8681fbcd 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -39,31 +39,39 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void** } bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) { - while (0 == atomic_load_64(&pStmt->queue.qRemainNum)) { - taosUsleep(1); - return false; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) { + (void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex); + if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) { + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + return false; + } } - SStmtQNode* orig = pStmt->queue.head; - SStmtQNode* node = pStmt->queue.head->next; pStmt->queue.head = pStmt->queue.head->next; - - // taosMemoryFreeClear(orig); - *param = node; - (void)atomic_sub_fetch_64(&pStmt->queue.qRemainNum, 1); + (void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + + + *param = node; return true; } void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); + pStmt->queue.tail->next = param; pStmt->queue.tail = param; pStmt->stat.bindDataNum++; (void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); } static int32_t stmtCreateRequest(STscStmt* pStmt) { @@ -415,9 +423,11 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) { pTblBuf->buffIdx = 1; pTblBuf->buffOffset = sizeof(*pQueue->head); + (void)taosThreadMutexLock(&pQueue->mutex); pQueue->head = pQueue->tail = pTblBuf->pCurBuff; pQueue->qRemainNum = 0; pQueue->head->next = NULL; + (void)taosThreadMutexUnlock(&pQueue->mutex); } int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { @@ -809,6 +819,8 @@ int32_t stmtStartBindThread(STscStmt* pStmt) { } int32_t stmtInitQueue(STscStmt* pStmt) { + (void)taosThreadCondInit(&pStmt->queue.waitCond, NULL); + (void)taosThreadMutexInit(&pStmt->queue.mutex, NULL); STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head)); pStmt->queue.tail = pStmt->queue.head; @@ -1619,11 +1631,18 @@ int stmtClose(TAOS_STMT* stmt) { pStmt->queue.stopQueue = true; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + if (pStmt->bindThreadInUse) { (void)taosThreadJoin(pStmt->bindThread, NULL); pStmt->bindThreadInUse = false; } + (void)taosThreadCondDestroy(&pStmt->queue.waitCond); + (void)taosThreadMutexDestroy(&pStmt->queue.mutex); + STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64 ", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64 ", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u" diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 8edd60e4b5..72166fab84 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -39,31 +39,35 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void** } static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) { - taosUsleep(1); - return false; + (void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex); + if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) { + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + return false; + } } - SStmtQNode* orig = pStmt->queue.head; - SStmtQNode* node = pStmt->queue.head->next; pStmt->queue.head = pStmt->queue.head->next; - - // taosMemoryFreeClear(orig); - *param = node; (void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); return true; } static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); + pStmt->queue.tail->next = param; pStmt->queue.tail = param; - pStmt->stat.bindDataNum++; (void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); } static int32_t stmtCreateRequest(STscStmt2* pStmt) { @@ -339,9 +343,11 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) { pTblBuf->buffIdx = 1; pTblBuf->buffOffset = sizeof(*pQueue->head); + (void)taosThreadMutexLock(&pQueue->mutex); pQueue->head = pQueue->tail = pTblBuf->pCurBuff; pQueue->qRemainNum = 0; pQueue->head->next = NULL; + (void)taosThreadMutexUnlock(&pQueue->mutex); } static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) { @@ -735,6 +741,8 @@ static int32_t stmtStartBindThread(STscStmt2* pStmt) { } static int32_t stmtInitQueue(STscStmt2* pStmt) { + (void)taosThreadCondInit(&pStmt->queue.waitCond, NULL); + (void)taosThreadMutexInit(&pStmt->queue.mutex, NULL); STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head)); pStmt->queue.tail = pStmt->queue.head; @@ -1748,11 +1756,18 @@ int stmtClose2(TAOS_STMT2* stmt) { pStmt->queue.stopQueue = true; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + if (pStmt->bindThreadInUse) { (void)taosThreadJoin(pStmt->bindThread, NULL); pStmt->bindThreadInUse = false; } + (void)taosThreadCondDestroy(&pStmt->queue.waitCond); + (void)taosThreadMutexDestroy(&pStmt->queue.mutex); + if (pStmt->options.asyncExecFn && !pStmt->semWaited) { if (tsem_wait(&pStmt->asyncQuerySem) != 0) { tscError("failed to wait asyncQuerySem"); From d8806a5175b03a910f10eb5c2600a289d6c8910b Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 20 Jan 2025 19:23:40 +0800 Subject: [PATCH 104/120] fix:[TD-33556] tmq close elegantly to avoid invalid read in TD-32585 --- source/client/src/clientMain.c | 2 +- source/client/src/clientTmq.c | 28 +++++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 83aff351dd..190a724151 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -253,7 +253,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); - // cleanupAppInfo(); + cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index f4426fc94a..a562cebc2c 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1619,6 +1619,24 @@ void tmqMgmtClose(void) { } if (tmqMgmt.rsetId >= 0) { + tmq_t *tmq = taosIterateRef(tmqMgmt.rsetId, 0); + int64_t refId = 0; + + while (tmq) { + refId = tmq->refId; + if (refId == 0) { + break; + } + taosWLockLatch(&tmq->lock); + atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED); + taosWUnLockLatch(&tmq->lock); + + if (taosRemoveRef(tmqMgmt.rsetId, tmq->refId) != 0) { + qWarn("taosRemoveRef tmq refId:%" PRId64 " failed, error:%s", refId, tstrerror(terrno)); + } + + tmq = taosIterateRef(tmqMgmt.rsetId, refId); + } taosCloseRef(tmqMgmt.rsetId); tmqMgmt.rsetId = -1; } @@ -2617,8 +2635,13 @@ int32_t tmq_unsubscribe(tmq_t* tmq) { int32_t tmq_consumer_close(tmq_t* tmq) { if (tmq == NULL) return TSDB_CODE_INVALID_PARA; + int32_t code = 0; + taosWLockLatch(&tmq->lock); + if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__CLOSED){ + goto end; + } tqInfoC("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status); - int32_t code = tmq_unsubscribe(tmq); + code = tmq_unsubscribe(tmq); if (code == 0) { atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED); code = taosRemoveRef(tmqMgmt.rsetId, tmq->refId); @@ -2626,6 +2649,9 @@ int32_t tmq_consumer_close(tmq_t* tmq) { tqErrorC("tmq close failed to remove ref:%" PRId64 ", code:%d", tmq->refId, code); } } + +end: + taosWUnLockLatch(&tmq->lock); return code; } From 14199345712e4bbe6bbd6f0fe6d2fde4d3537e5f Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 20 Jan 2025 20:25:17 +0800 Subject: [PATCH 105/120] fix TD-33582 --- source/client/src/clientStmt.c | 39 ++++++++++++++++++++++----------- source/client/src/clientStmt2.c | 35 +++++++++++++++++++---------- 2 files changed, 50 insertions(+), 24 deletions(-) diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 4b993ccc1e..0df5d89cd3 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -1757,7 +1757,9 @@ _return: } int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { + int code = 0; STscStmt* pStmt = (STscStmt*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get param num"); @@ -1765,7 +1767,7 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { return pStmt->errCode; } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1777,23 +1779,29 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } if (STMT_TYPE_QUERY == pStmt->sql.type) { *nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues); } else { - STMT_ERR_RET(stmtFetchColFields(stmt, nums, NULL)); + STMT_ERRI_JRET(stmtFetchColFields(stmt, nums, NULL)); } - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { + int code = 0; STscStmt* pStmt = (STscStmt*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get param"); @@ -1802,10 +1810,10 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { } if (STMT_TYPE_QUERY == pStmt->sql.type) { - STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1817,27 +1825,32 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } int32_t nums = 0; TAOS_FIELD_E* pField = NULL; - STMT_ERR_RET(stmtFetchColFields(stmt, &nums, &pField)); + STMT_ERRI_JRET(stmtFetchColFields(stmt, &nums, &pField)); if (idx >= nums) { tscError("idx %d is too big", idx); taosMemoryFree(pField); - STMT_ERR_RET(TSDB_CODE_INVALID_PARA); + STMT_ERRI_JRET(TSDB_CODE_INVALID_PARA); } *type = pField[idx].type; *bytes = pField[idx].bytes; - taosMemoryFree(pField); +_return: + if (pField) { + taosMemoryFree(pField); + } - return TSDB_CODE_SUCCESS; + pStmt->errCode = preCode; + + return code; } TAOS_RES* stmtUseResult(TAOS_STMT* stmt) { diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 8edd60e4b5..cec55c211a 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1066,13 +1066,16 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E } static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_ALL** fields) { + int32_t code = 0; + int32_t preCode = pStmt->errCode; + if (pStmt->errCode != TSDB_CODE_SUCCESS) { return pStmt->errCode; } if (STMT_TYPE_QUERY == pStmt->sql.type) { tscError("invalid operation to get query column fileds"); - STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR); + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); } STableDataCxt** pDataBlock = NULL; @@ -1084,21 +1087,25 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL (STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)); if (NULL == pDataBlock) { tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName); - STMT_ERR_RET(TSDB_CODE_APP_ERROR); + STMT_ERRI_JRET(TSDB_CODE_APP_ERROR); } } - STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields)); + STMT_ERRI_JRET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields)); if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE) { pStmt->bInfo.needParse = true; qDestroyStmtDataBlock(*pDataBlock); if (taosHashRemove(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)) != 0) { tscError("get fileds %s remove exec blockHash fail", pStmt->bInfo.tbFName); - STMT_ERR_RET(TSDB_CODE_APP_ERROR); + STMT_ERRI_JRET(TSDB_CODE_APP_ERROR); } } - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } /* SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { @@ -1824,7 +1831,7 @@ int stmtParseColFields2(TAOS_STMT2* stmt) { if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { taos_free_result(pStmt->exec.pRequest); pStmt->exec.pRequest = NULL; - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); } STMT_ERRI_JRET(stmtCreateRequest(pStmt)); @@ -1850,7 +1857,9 @@ int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_ALL** fields) { } int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { + int32_t code = 0; STscStmt2* pStmt = (STscStmt2*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get param num"); @@ -1858,7 +1867,7 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { return pStmt->errCode; } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1870,19 +1879,23 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } if (STMT_TYPE_QUERY == pStmt->sql.type) { *nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues); } else { - STMT_ERR_RET(stmtFetchColFields2(stmt, nums, NULL)); + STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, NULL)); } - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } TAOS_RES* stmtUseResult2(TAOS_STMT2* stmt) { From 3e90b67d5b182301cca25ee445558e6263e10b95 Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Mon, 20 Jan 2025 20:26:12 +0800 Subject: [PATCH 106/120] add test --- source/client/test/stmt2Test.cpp | 26 ++++++++++++++++++++++++++ source/client/test/stmtTest.cpp | 30 ++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/source/client/test/stmt2Test.cpp b/source/client/test/stmt2Test.cpp index 52c89e97ab..c56e2ff3e2 100644 --- a/source/client/test/stmt2Test.cpp +++ b/source/client/test/stmt2Test.cpp @@ -1500,4 +1500,30 @@ TEST(stmt2Case, geometry) { do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13"); taos_close(taos); } + +// TD-33582 +TEST(stmt2Case, errcode) { + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, nullptr); + do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_14"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt2_testdb_14"); + do_query(taos, "use stmt2_testdb_14"); + + TAOS_STMT2_OPTION option = {0}; + TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); + ASSERT_NE(stmt, nullptr); + char* sql = "select * from t where ts > ? and name = ? foo = ?"; + int code = taos_stmt2_prepare(stmt, sql, 0); + checkError(stmt, code); + + int fieldNum = 0; + TAOS_FIELD_ALL* pFields = NULL; + code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields); + ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR); + + // get fail dont influence the next stmt prepare + sql = "nsert into ? (ts, name) values (?, ?)"; + code = taos_stmt_prepare(stmt, sql, 0); + checkError(stmt, code); +} #pragma GCC diagnostic pop diff --git a/source/client/test/stmtTest.cpp b/source/client/test/stmtTest.cpp index 77130e41db..13fefb7cc8 100644 --- a/source/client/test/stmtTest.cpp +++ b/source/client/test/stmtTest.cpp @@ -538,5 +538,35 @@ TEST(stmtCase, geometry) { do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_5"); taos_close(taos); } +//TD-33582 +TEST(stmtCase, errcode) { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, nullptr); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_4"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_4"); + do_query(taos, "USE stmt_testdb_4"); + do_query( + taos, + "CREATE STABLE IF NOT EXISTS stmt_testdb_4.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); + + TAOS_STMT *stmt = taos_stmt_init(taos); + ASSERT_NE(stmt, nullptr); + char *sql = "select * from t where ts > ? and name = ? foo = ?"; + int code = taos_stmt_prepare(stmt, sql, 0); + checkError(stmt, code); + + int fieldNum = 0; + TAOS_FIELD_E *pFields = NULL; + code = stmtGetParamNum(stmt, &fieldNum); + ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR); + + code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields); + ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR); + // get fail dont influence the next stmt prepare + sql = "nsert into ? (ts, name) values (?, ?)"; + code = taos_stmt_prepare(stmt, sql, 0); + checkError(stmt, code); +} #pragma GCC diagnostic pop \ No newline at end of file From 6ff57b22d472576cd2ba83d412325686e0921639 Mon Sep 17 00:00:00 2001 From: "pengrongkun94@qq.com" Date: Tue, 21 Jan 2025 11:08:39 +0800 Subject: [PATCH 107/120] fix review --- source/client/src/clientStmt.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 0df5d89cd3..675ee8e27e 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -1836,7 +1836,6 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { STMT_ERRI_JRET(stmtFetchColFields(stmt, &nums, &pField)); if (idx >= nums) { tscError("idx %d is too big", idx); - taosMemoryFree(pField); STMT_ERRI_JRET(TSDB_CODE_INVALID_PARA); } @@ -1844,10 +1843,8 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { *bytes = pField[idx].bytes; _return: - if (pField) { - taosMemoryFree(pField); - } + taosMemoryFree(pField); pStmt->errCode = preCode; return code; From f3b38edb980c7fd6ab545865e4888ae5e8dc0569 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 26 Dec 2024 14:33:14 +0800 Subject: [PATCH 108/120] add stream event notify --- include/common/tcommon.h | 1 + source/libs/executor/inc/executorInt.h | 6 +- source/libs/executor/inc/streamexecutorInt.h | 3 +- .../executor/src/streameventwindowoperator.c | 56 ++++++++++++++++++- source/libs/executor/src/streamexecutorInt.c | 15 ++++- .../src/streamintervalsliceoperator.c | 3 +- .../executor/src/streamtimesliceoperator.c | 3 +- 7 files changed, 79 insertions(+), 8 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 0450766535..3f76239ce5 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -160,6 +160,7 @@ typedef enum EStreamType { STREAM_PARTITION_DELETE_DATA, STREAM_GET_RESULT, STREAM_DROP_CHILD_TABLE, + STREAM_EVENT_OPEN_WINDOW, } EStreamType; #pragma pack(push, 1) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 48afa78251..04e7884020 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -450,8 +450,10 @@ typedef struct STimeWindowAggSupp { } STimeWindowAggSupp; typedef struct SSteamOpBasicInfo { - int32_t primaryPkIndex; - bool updateOperatorInfo; + int32_t primaryPkIndex; + bool updateOperatorInfo; + SSDataBlock* pEventRes; + SArray* pEventInfo; } SSteamOpBasicInfo; typedef struct SStreamFillSupporter { diff --git a/source/libs/executor/inc/streamexecutorInt.h b/source/libs/executor/inc/streamexecutorInt.h index 0a69080314..0c0ea0d6fc 100644 --- a/source/libs/executor/inc/streamexecutorInt.h +++ b/source/libs/executor/inc/streamexecutorInt.h @@ -57,7 +57,8 @@ typedef struct SSlicePoint { void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type); bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo); void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo); -void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); +int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); +void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption); void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins); diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index fa6008eba7..a9a47580dc 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -12,6 +12,8 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ + +#include "cmdnodes.h" #include "executorInt.h" #include "filter.h" #include "function.h" @@ -53,6 +55,8 @@ void destroyStreamEventOperatorInfo(void* param) { &pInfo->groupResInfo); pInfo->pOperator = NULL; } + + destroyStreamBasicInfo(&pInfo->basic); destroyStreamAggSupporter(&pInfo->streamAggSup); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); @@ -121,7 +125,7 @@ void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI) { } int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t groupId, bool* pStart, bool* pEnd, - int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey) { + int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey, int32_t* pWinCode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t winCode = TSDB_CODE_SUCCESS; @@ -143,6 +147,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro setEventWindowInfo(pAggSup, &leftWinKey, pVal, pCurWin); if (inWin || (pCurWin->pWinFlag->startFlag && !pCurWin->pWinFlag->endFlag)) { pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin); + (*pWinCode) = TSDB_CODE_SUCCESS; goto _end; } } @@ -156,6 +161,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro if (endi < 0 || pTs[endi] >= rightWinKey.win.skey) { setEventWindowInfo(pAggSup, &rightWinKey, pVal, pCurWin); pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin); + (*pWinCode) = TSDB_CODE_SUCCESS; goto _end; } } @@ -163,6 +169,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro SSessionKey winKey = {.win.skey = ts, .win.ekey = ts, .groupId = groupId}; code = pAggSup->stateStore.streamStateSessionAllocWinBuffByNextPosition(pAggSup->pState, pCur, &winKey, &pVal, &len); QUERY_CHECK_CODE(code, lino, _error); + (*pWinCode) = TSDB_CODE_FAILED; setEventWindowInfo(pAggSup, &winKey, pVal, pCurWin); pCurWin->pWinFlag->startFlag = start; @@ -303,6 +310,14 @@ void doDeleteEventWindow(SStreamAggSupporter* pAggSup, SSHashObj* pSeUpdated, SS removeSessionResult(pAggSup, pSeUpdated, pAggSup->pResultRows, pKey); } +static int32_t setEventData(SSteamOpBasicInfo* pBasicInfo, SSessionKey* pWinKey) { + void* pRes = taosArrayPush(pBasicInfo->pEventInfo, pWinKey); + if (pRes != NULL) { + return TSDB_CODE_SUCCESS; + } + return terrno; +} + static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pSeUpdated, SSHashObj* pStDeleted) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -373,10 +388,16 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl bool allEqual = true; SEventWindowInfo curWin = {0}; SSessionKey nextWinKey = {0}; + int32_t winCode = TSDB_CODE_SUCCESS; code = setEventOutputBuf(pAggSup, tsCols, groupId, (bool*)pColStart->pData, (bool*)pColEnd->pData, i, rows, &curWin, - &nextWinKey); + &nextWinKey, &winCode); QUERY_CHECK_CODE(code, lino, _end); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) && winCode != TSDB_CODE_SUCCESS) { + code = setEventData(&pInfo->basic, &curWin.winInfo.sessionWin); + QUERY_CHECK_CODE(code, lino, _end); + } + setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo); bool rebuild = false; code = updateEventWindowInfo(pAggSup, &curWin, &nextWinKey, tsCols, (bool*)pColStart->pData, (bool*)pColEnd->pData, @@ -561,12 +582,42 @@ void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) { } } +static void buildEventNotifyResult(SSteamOpBasicInfo* pBasicInfo) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + blockDataCleanup(pBasicInfo->pEventRes); + int32_t size = taosArrayGetSize(pBasicInfo->pEventInfo); + code = blockDataEnsureCapacity(pBasicInfo->pEventRes, size); + QUERY_CHECK_CODE(code, lino, _end); + for (int32_t i = 0; i < size; i++) { + SSessionKey* pKey = taosArrayGet(pBasicInfo->pEventInfo, i); + uint64_t uid = 0; + code = appendDataToSpecialBlock(pBasicInfo->pEventRes, &pKey->win.skey, &pKey->win.ekey, &uid, &pKey->groupId, NULL); + QUERY_CHECK_CODE(code, lino, _end); + } + taosArrayClear(pBasicInfo->pEventInfo); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s.", __func__, lino, tstrerror(code)); + } +} + + static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { int32_t code = TSDB_CODE_SUCCESS; SStreamEventAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + buildEventNotifyResult(&pInfo->basic); + if (pInfo->basic.pEventRes->info.rows > 0) { + printDataBlock(pInfo->basic.pEventRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->basic.pEventRes; + return code; + } + doBuildDeleteDataBlock(pOperator, pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { printDataBlock(pInfo->pDelRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); @@ -957,6 +1008,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey; + initStreamBasicInfo(&pInfo->basic); pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED, diff --git a/source/libs/executor/src/streamexecutorInt.c b/source/libs/executor/src/streamexecutorInt.c index b94798934c..1e7fbfa446 100644 --- a/source/libs/executor/src/streamexecutorInt.c +++ b/source/libs/executor/src/streamexecutorInt.c @@ -14,6 +14,7 @@ */ #include "executorInt.h" +#include "tdatablock.h" void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type) { if (type != STREAM_GET_ALL && type != STREAM_CHECKPOINT) { @@ -29,7 +30,19 @@ void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->updateOperatorInfo = false; } -void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { +int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->primaryPkIndex = -1; pBasicInfo->updateOperatorInfo = false; + pBasicInfo->pEventInfo = taosArrayInit(4, sizeof(SSessionKey)); + if (pBasicInfo->pEventInfo == NULL) { + return terrno; + } + return createSpecialDataBlock(STREAM_EVENT_OPEN_WINDOW, &pBasicInfo->pEventRes); +} + +void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { + blockDataDestroy(pBasicInfo->pEventRes); + pBasicInfo->pEventRes = NULL; + taosArrayDestroy(pBasicInfo->pEventInfo); + pBasicInfo->pEventInfo = NULL; } diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c index d038e4d82c..45707e670e 100644 --- a/source/libs/executor/src/streamintervalsliceoperator.c +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -651,7 +651,8 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamIntervalSliceReleaseState, streamIntervalSliceReloadState); - initStreamBasicInfo(&pInfo->basic); + code = initStreamBasicInfo(&pInfo->basic); + QUERY_CHECK_CODE(code, lino, _error); if (downstream) { code = initIntervalSliceDownStream(downstream, &pInfo->streamAggSup, pPhyNode->type, pInfo->primaryTsIndex, &pInfo->twAggSup, &pInfo->basic, &pInfo->interval, pInfo->hasInterpoFunc); diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index 44004a4c6b..9ec6063486 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -2201,7 +2201,8 @@ int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamTimeSliceReleaseState, streamTimeSliceReloadState); - initStreamBasicInfo(&pInfo->basic); + code = initStreamBasicInfo(&pInfo->basic); + QUERY_CHECK_CODE(code, lino, _error); if (downstream) { code = initTimeSliceDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex, &pInfo->twAggSup, &pInfo->basic, pInfo->pFillSup); From 326ea19c917bb133f8ddfe36199f4db16403b67f Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 21 Jan 2025 11:26:06 +0800 Subject: [PATCH 109/120] fix:[TD-33556] tmq close elegantly to avoid invalid read in TD-32585 --- source/client/src/clientTmq.c | 38 +++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index a562cebc2c..f5a487c007 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -74,8 +74,9 @@ enum { }; typedef struct { - tmr_h timer; - int32_t rsetId; + tmr_h timer; + int32_t rsetId; + TdThreadMutex lock; } SMqMgmt; struct tmq_list_t { @@ -1603,13 +1604,33 @@ static void tmqMgmtInit(void) { tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ"); if (tmqMgmt.timer == NULL) { - tmqInitRes = terrno; + goto END; } tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); if (tmqMgmt.rsetId < 0) { - tmqInitRes = terrno; + goto END; } + + TdThreadMutexAttr attr = {0}; + if (taosThreadMutexAttrInit(&attr) != 0){ + goto END; + } + + if (taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE) != 0){ + goto END; + } + + if (taosThreadMutexInit(&tmqMgmt.lock, &attr) != 0){ + goto END; + } + + if (taosThreadMutexAttrDestroy(&attr) != 0) { + goto END; + } + +END: + tmqInitRes = terrno; } void tmqMgmtClose(void) { @@ -1618,6 +1639,7 @@ void tmqMgmtClose(void) { tmqMgmt.timer = NULL; } + (void) taosThreadMutexLock(&tmqMgmt.lock); if (tmqMgmt.rsetId >= 0) { tmq_t *tmq = taosIterateRef(tmqMgmt.rsetId, 0); int64_t refId = 0; @@ -1627,9 +1649,7 @@ void tmqMgmtClose(void) { if (refId == 0) { break; } - taosWLockLatch(&tmq->lock); atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED); - taosWUnLockLatch(&tmq->lock); if (taosRemoveRef(tmqMgmt.rsetId, tmq->refId) != 0) { qWarn("taosRemoveRef tmq refId:%" PRId64 " failed, error:%s", refId, tstrerror(terrno)); @@ -1640,6 +1660,8 @@ void tmqMgmtClose(void) { taosCloseRef(tmqMgmt.rsetId); tmqMgmt.rsetId = -1; } + (void)taosThreadMutexUnlock(&tmqMgmt.lock); + (void)taosThreadMutexDestroy(&tmqMgmt.lock); } tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { @@ -2636,7 +2658,7 @@ int32_t tmq_unsubscribe(tmq_t* tmq) { int32_t tmq_consumer_close(tmq_t* tmq) { if (tmq == NULL) return TSDB_CODE_INVALID_PARA; int32_t code = 0; - taosWLockLatch(&tmq->lock); + (void) taosThreadMutexLock(&tmqMgmt.lock); if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__CLOSED){ goto end; } @@ -2651,7 +2673,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) { } end: - taosWUnLockLatch(&tmq->lock); + (void)taosThreadMutexUnlock(&tmqMgmt.lock); return code; } From a4e88660db1b28487f85fbb1192a74e2e0af73bb Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 21 Jan 2025 11:33:08 +0800 Subject: [PATCH 110/120] fix(rpc):use tsApplyMemoryAllowed to control memory alloc while apply msg. --- include/common/tglobal.h | 4 +++ include/util/tqueue.h | 1 + source/common/src/tglobal.c | 25 +++++++++++++----- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 2 +- source/dnode/mgmt/test/sut/src/sut.cpp | 3 ++- source/libs/sync/src/syncPipeline.c | 6 ++++- source/util/src/tqueue.c | 29 ++++++++++++++++++--- 8 files changed, 57 insertions(+), 15 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 6beb7c8860..4e9a9bd801 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -34,6 +34,9 @@ extern "C" { #define GLOBAL_CONFIG_FILE_VERSION 1 #define LOCAL_CONFIG_FILE_VERSION 1 +#define RPC_MEMORY_USAGE_RATIO 0.1 +#define QUEUE_MEMORY_USAGE_RATIO 0.6 + typedef enum { DND_CA_SM4 = 1, } EEncryptAlgor; @@ -110,6 +113,7 @@ extern int32_t tsNumOfQnodeFetchThreads; extern int32_t tsNumOfSnodeStreamThreads; extern int32_t tsNumOfSnodeWriteThreads; extern int64_t tsQueueMemoryAllowed; +extern int64_t tsApplyMemoryAllowed; extern int32_t tsRetentionSpeedLimitMB; extern int32_t tsNumOfCompactThreads; diff --git a/include/util/tqueue.h b/include/util/tqueue.h index 5ae642b69f..1d634ce742 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -55,6 +55,7 @@ typedef struct { typedef enum { DEF_QITEM = 0, RPC_QITEM = 1, + APPLY_QITEM = 2, } EQItype; typedef void (*FItem)(SQueueInfo *pInfo, void *pItem); diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1e4d3f8c3c..5343e2de97 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,12 +14,12 @@ */ #define _DEFAULT_SOURCE -#include "tglobal.h" #include "cJSON.h" #include "defines.h" #include "os.h" #include "osString.h" #include "tconfig.h" +#include "tglobal.h" #include "tgrant.h" #include "tjson.h" #include "tlog.h" @@ -500,7 +500,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { TAOS_RETURN(TSDB_CODE_SUCCESS); } -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -818,8 +820,13 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); - tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; - tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); + tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * QUEUE_MEMORY_USAGE_RATIO; + tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10LL, + TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10000LL); + + tsApplyMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * (1 - QUEUE_MEMORY_USAGE_RATIO); + tsApplyMemoryAllowed = TRANGE(tsApplyMemoryAllowed, TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10LL, + TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10000LL); tsLogBufferMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsLogBufferMemoryAllowed = TRANGE(tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); @@ -857,7 +864,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); - TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * RPC_MEMORY_USAGE_RATIO * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); @@ -1572,7 +1579,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfSnodeWriteThreads = pItem->i32; TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "rpcQueueMemoryAllowed"); - tsQueueMemoryAllowed = pItem->i64; + tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO; + tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "simdEnable"); tsSIMDEnable = (bool)pItem->bval; @@ -2395,6 +2403,10 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { code = TSDB_CODE_SUCCESS; goto _exit; } + if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) { + tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO; + tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO); + } if (strcasecmp(name, "numOfCompactThreads") == 0) { #ifdef TD_ENTERPRISE @@ -2500,7 +2512,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"experimental", &tsExperimental}, {"numOfRpcSessions", &tsNumOfRpcSessions}, - {"rpcQueueMemoryAllowed", &tsQueueMemoryAllowed}, {"shellActivityTimer", &tsShellActivityTimer}, {"readTimeout", &tsReadTimeout}, {"safetyCheckLevel", &tsSafetyCheckLevel}, diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 9ed4ee83c4..637713d2f9 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -181,7 +181,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { req.numOfSupportVnodes = tsNumOfSupportVnodes; req.numOfDiskCfg = tsDiskCfgNum; req.memTotal = tsTotalMemoryKB * 1024; - req.memAvail = req.memTotal - tsQueueMemoryAllowed - 16 * 1024 * 1024; + req.memAvail = req.memTotal - tsQueueMemoryAllowed - tsApplyMemoryAllowed - 16 * 1024 * 1024; tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN); tstrncpy(req.machineId, pMgmt->pData->machineId, TSDB_MACHINE_ID_LEN + 1); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index c22adec9b4..334c213945 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -323,7 +323,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { return TSDB_CODE_INVALID_MSG; } - EQItype itype = APPLY_QUEUE == qtype ? DEF_QITEM : RPC_QITEM; + EQItype itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM; SRpcMsg *pMsg; code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg); if (code) { diff --git a/source/dnode/mgmt/test/sut/src/sut.cpp b/source/dnode/mgmt/test/sut/src/sut.cpp index 13c8c73f44..a1fdebb636 100644 --- a/source/dnode/mgmt/test/sut/src/sut.cpp +++ b/source/dnode/mgmt/test/sut/src/sut.cpp @@ -36,7 +36,8 @@ void Testbase::InitLog(const char* path) { tstrncpy(tsLogDir, path, PATH_MAX); taosGetSystemInfo(); - tsQueueMemoryAllowed = tsTotalMemoryKB * 0.1; + tsQueueMemoryAllowed = tsTotalMemoryKB * 0.06; + tsApplyMemoryAllowed = tsTotalMemoryKB * 0.04; if (taosInitLog("taosdlog", 1, false) != 0) { printf("failed to init log file\n"); } diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 3022a1f8ac..18252db9ee 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -732,7 +732,11 @@ int32_t syncFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, SyncTe pEntry->index, pEntry->term, TMSG_INFO(pEntry->originalRpcType), code, retry); if (retry) { taosMsleep(10); - sError("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, tstrerror(code), pEntry->index); + if (code == TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE) { + sError("vgId:%d, failed to execute fsm since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + } else { + sDebug("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + } } } while (retry); diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index f531d9ad61..fd55851cc6 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -14,14 +14,16 @@ */ #define _DEFAULT_SOURCE -#include "tqueue.h" #include "taoserror.h" #include "tlog.h" +#include "tqueue.h" #include "tutil.h" int64_t tsQueueMemoryAllowed = 0; int64_t tsQueueMemoryUsed = 0; +int64_t tsApplyMemoryAllowed = 0; +int64_t tsApplyMemoryUsed = 0; struct STaosQueue { STaosQnode *head; STaosQnode *tail; @@ -148,21 +150,35 @@ int64_t taosQueueMemorySize(STaosQueue *queue) { } int32_t taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize, void **item) { - int64_t alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize); + int64_t alloced = -1; + if (alloced > tsQueueMemoryAllowed) { + alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize); if (itype == RPC_QITEM) { uError("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced, tsQueueMemoryAllowed); (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE); } + } else if (itype == APPLY_QITEM) { + alloced = atomic_add_fetch_64(&tsApplyMemoryUsed, size + dataSize); + if (alloced > tsApplyMemoryAllowed) { + uDebug("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced, + tsApplyMemoryAllowed); + (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); + terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE; + return NULL; + } } *item = NULL; STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); - if (pNode == NULL) { + if (itype == RPC_QITEM) { (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); return terrno; + } else if (itype == APPLY_QITEM) { + (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); + return terrno; } pNode->dataSize = dataSize; @@ -178,7 +194,12 @@ void taosFreeQitem(void *pItem) { if (pItem == NULL) return; STaosQnode *pNode = (STaosQnode *)((char *)pItem - sizeof(STaosQnode)); - int64_t alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize); + int64_t alloced = -1; + if (pNode->itype == RPC_QITEM) { + alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize); + } else if (pNode->itype == APPLY_QITEM) { + alloced = atomic_sub_fetch_64(&tsApplyMemoryUsed, pNode->size + pNode->dataSize); + } uTrace("item:%p, node:%p is freed, alloc:%" PRId64, pItem, pNode, alloced); taosMemoryFree(pNode); From 18dd35546359a45b15cc09ac124e98be200fc681 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 21 Jan 2025 11:37:26 +0800 Subject: [PATCH 111/120] Fix build error. --- source/util/src/tqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index fd55851cc6..1e42ac1d86 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -167,7 +167,7 @@ int32_t taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize, void ** tsApplyMemoryAllowed); (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE; - return NULL; + return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE); } } From 9d01175c88fc93df660540a817182d3cf9bf454e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 21 Jan 2025 11:42:28 +0800 Subject: [PATCH 112/120] fix:[TD-33556] tmq close elegantly to avoid invalid read in TD-32585 --- source/client/src/clientTmq.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index f5a487c007..e137bd3ee3 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1612,20 +1612,7 @@ static void tmqMgmtInit(void) { goto END; } - TdThreadMutexAttr attr = {0}; - if (taosThreadMutexAttrInit(&attr) != 0){ - goto END; - } - - if (taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE) != 0){ - goto END; - } - - if (taosThreadMutexInit(&tmqMgmt.lock, &attr) != 0){ - goto END; - } - - if (taosThreadMutexAttrDestroy(&attr) != 0) { + if (taosThreadMutexInit(&tmqMgmt.lock, NULL) != 0){ goto END; } From 685d06d8aa62af4b5a262bb68c2eeae8e01680d3 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 21 Jan 2025 11:44:49 +0800 Subject: [PATCH 113/120] fix:[TD-33556] tmq close elegantly to avoid invalid read in TD-32585 --- source/client/src/clientTmq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index e137bd3ee3..4d24548a31 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1615,6 +1615,7 @@ static void tmqMgmtInit(void) { if (taosThreadMutexInit(&tmqMgmt.lock, NULL) != 0){ goto END; } + return; END: tmqInitRes = terrno; From 3070d2d455abf0efe1724f86b89f9341ad14d6ad Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 21 Jan 2025 12:34:47 +0800 Subject: [PATCH 114/120] Fix merge code error. --- source/util/src/tqueue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 1e42ac1d86..db75c6b2ff 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -166,18 +166,18 @@ int32_t taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize, void ** uDebug("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced, tsApplyMemoryAllowed); (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); - terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE; return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE); } } *item = NULL; STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); - if (itype == RPC_QITEM) { - (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); - return terrno; - } else if (itype == APPLY_QITEM) { - (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); + if (pNode == NULL) { + if (itype == RPC_QITEM) { + (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); + } else if (itype == APPLY_QITEM) { + (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); + } return terrno; } From 6f976d604a65f171f2d4bd44dd21cd9ce5d0ebd2 Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Tue, 21 Jan 2025 12:45:54 +0800 Subject: [PATCH 115/120] Fix ci problems. --- source/common/src/tglobal.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 5343e2de97..84e0ffb313 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -2406,6 +2406,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) { tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO; tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO); + code = TSDB_CODE_SUCCESS; + goto _exit; } if (strcasecmp(name, "numOfCompactThreads") == 0) { From 779b1de7afc9c865adcd0b6d1bd99b20ccc5e564 Mon Sep 17 00:00:00 2001 From: menshibin Date: Tue, 21 Jan 2025 13:18:51 +0800 Subject: [PATCH 116/120] doc:update flink connector version --- docs/en/10-third-party/01-collection/flink.md | 5 +++-- docs/zh/10-third-party/01-collection/12-flink.md | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/en/10-third-party/01-collection/flink.md b/docs/en/10-third-party/01-collection/flink.md index 12468b4f6c..19a767f1f6 100644 --- a/docs/en/10-third-party/01-collection/flink.md +++ b/docs/en/10-third-party/01-collection/flink.md @@ -26,7 +26,8 @@ Flink Connector supports all platforms that can run Flink 1.19 and above version | Flink Connector Version | Major Changes | TDengine Version| |-------------------------| ------------------------------------ | ---------------- | -| 2.0.0 | 1.Support SQL queries on data in TDengine database.
2. Support CDC subscription to data in TDengine database.
3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.0 and higher| +| 2.0.1 | Sink supports writing types from Rowdata implementations.| - | +| 2.0.0 | 1.Support SQL queries on data in TDengine database.
2. Support CDC subscription to data in TDengine database.
3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher| | 1.0.0 | Support Sink function to write data from other sources to TDengine in the future.| 3.3.2.0 and higher| ## Exception and error codes @@ -114,7 +115,7 @@ If using Maven to manage a project, simply add the following dependencies in pom com.taosdata.flink flink-connector-tdengine - 2.0.0 + 2.0.1 ``` diff --git a/docs/zh/10-third-party/01-collection/12-flink.md b/docs/zh/10-third-party/01-collection/12-flink.md index e085d2fd53..0f8bde5260 100644 --- a/docs/zh/10-third-party/01-collection/12-flink.md +++ b/docs/zh/10-third-party/01-collection/12-flink.md @@ -24,7 +24,8 @@ Flink Connector 支持所有能运行 Flink 1.19 及以上版本的平台。 ## 版本历史 | Flink Connector 版本 | 主要变化 | TDengine 版本 | | ------------------| ------------------------------------ | ---------------- | -| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据
2. 支持 CDC 订阅 TDengine 数据库中的数据
3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.0 及以上版本 | +| 2.0.1 | Sink 支持对所有继承自 RowData 并已实现的类型进行数据写入| - | +| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据
2. 支持 CDC 订阅 TDengine 数据库中的数据
3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.1 及以上版本 | | 1.0.0 | 支持 Sink 功能,将来着其他数据源的数据写入到 TDengine| 3.3.2.0 及以上版本| ## 异常和错误码 @@ -111,7 +112,7 @@ env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE); com.taosdata.flink flink-connector-tdengine - 2.0.0 + 2.0.1 ``` From eb5d46349071aaef64a7ce90a7bb65cc1799b09f Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Thu, 16 Jan 2025 08:43:47 +0800 Subject: [PATCH 117/120] feat(stream)[TS-5469]. add support for window event notifications in stream processing - Introduce new syntax to specify notification type and destination address - Collect relevant event information during window computations - Implement websocket-based notification delivery to the specified address --- cmake/addr2line_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/ssl_CMakeLists.txt.in | 6 +- contrib/CMakeLists.txt | 16 +- include/common/tcommon.h | 5 +- include/common/tdatablock.h | 2 + include/common/tmsg.h | 6 + include/libs/executor/executor.h | 3 + include/libs/nodes/cmdnodes.h | 49 +- include/libs/stream/tstream.h | 22 +- include/util/tdef.h | 1 + include/util/tlog.h | 3 + source/common/CMakeLists.txt | 21 +- source/common/src/msg/tmsg.c | 35 ++ source/common/src/tdatablock.c | 27 + source/dnode/mnode/impl/src/mndStream.c | 79 +++ source/dnode/vnode/CMakeLists.txt | 1 + source/dnode/vnode/src/inc/tq.h | 5 + source/dnode/vnode/src/inc/vnodeInt.h | 5 + source/dnode/vnode/src/tq/tqSink.c | 16 +- source/dnode/vnode/src/tq/tqStreamNotify.c | 445 +++++++++++++++ source/dnode/vnode/src/tqCommon/tqCommon.c | 10 +- source/dnode/vnode/src/vnd/vnodeOpen.c | 10 + source/libs/executor/inc/executorInt.h | 16 +- source/libs/executor/inc/querytask.h | 4 + source/libs/executor/inc/streamexecutorInt.h | 10 + source/libs/executor/src/executor.c | 22 + source/libs/executor/src/querytask.c | 2 + .../executor/src/streameventwindowoperator.c | 84 +-- source/libs/executor/src/streamexecutorInt.c | 519 +++++++++++++++++- .../src/streamintervalsliceoperator.c | 1 + .../executor/src/streamtimesliceoperator.c | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 52 ++ source/libs/nodes/src/nodesUtilFuncs.c | 9 + source/libs/parser/inc/parAst.h | 6 +- source/libs/parser/inc/sql.y | 22 +- source/libs/parser/src/parAstCreater.c | 115 +++- source/libs/parser/src/parTokenizer.c | 3 + source/libs/parser/src/parTranslater.c | 43 ++ source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamMeta.c | 2 + source/libs/stream/src/streamTask.c | 87 ++- source/util/src/tlog.c | 29 + 43 files changed, 1706 insertions(+), 94 deletions(-) create mode 100644 source/dnode/vnode/src/tq/tqStreamNotify.c diff --git a/cmake/addr2line_CMakeLists.txt.in b/cmake/addr2line_CMakeLists.txt.in index 93fb9bb96c..7cfcb46718 100644 --- a/cmake/addr2line_CMakeLists.txt.in +++ b/cmake/addr2line_CMakeLists.txt.in @@ -2,7 +2,7 @@ # addr2line ExternalProject_Add(addr2line GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git - GIT_TAG master + GIT_TAG main SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line" BINARY_DIR "${TD_CONTRIB_DIR}/addr2line" CONFIGURE_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 6494177faf..2a14018810 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -12,7 +12,7 @@ ExternalProject_Add(curl2 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug + CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug BUILD_COMMAND make -j INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/ssl_CMakeLists.txt.in b/cmake/ssl_CMakeLists.txt.in index 1098593943..81e1cb15e9 100644 --- a/cmake/ssl_CMakeLists.txt.in +++ b/cmake/ssl_CMakeLists.txt.in @@ -6,9 +6,9 @@ ExternalProject_Add(openssl DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" SOURCE_DIR "${TD_CONTRIB_DIR}/openssl" BUILD_IN_SOURCE TRUE - #BUILD_ALWAYS 1 - #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared + BUILD_ALWAYS 1 + UPDATE_COMMAND "" + CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared BUILD_COMMAND make -j INSTALL_COMMAND make install_sw -j TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 2304ad54aa..767df03d22 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -17,7 +17,6 @@ elseif(${BUILD_WITH_COS}) file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) endif(${BUILD_WITH_COS}) configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") @@ -146,11 +145,16 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# libcurl +if(NOT ${TD_WINDOWS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/) + cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(NOT ${TD_WINDOWS}) + # s3 if(${BUILD_WITH_S3}) - cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_S3) @@ -160,7 +164,6 @@ elseif(${BUILD_WITH_COS}) # cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - # cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif() @@ -199,6 +202,11 @@ endif() # lemon cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers +IF(${TD_DARWIN}) + SET(CONTRIB_CONFIG_ENV "CC=cc") +ENDIF() + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 3f76239ce5..c30f2ab4ec 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -160,7 +160,7 @@ typedef enum EStreamType { STREAM_PARTITION_DELETE_DATA, STREAM_GET_RESULT, STREAM_DROP_CHILD_TABLE, - STREAM_EVENT_OPEN_WINDOW, + STREAM_NOTIFY_EVENT, } EStreamType; #pragma pack(push, 1) @@ -409,6 +409,9 @@ typedef struct STUidTagInfo { #define UD_GROUPID_COLUMN_INDEX 1 #define UD_TAG_COLUMN_INDEX 2 +// stream notify event block column +#define NOTIFY_EVENT_STR_COLUMN_INDEX 0 + int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime); int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol); diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 1103b89ccb..96478047ca 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -285,6 +285,8 @@ bool isAutoTableName(char* ctbName); int32_t buildCtbNameAddGroupId(const char* stbName, char* ctbName, uint64_t groupId, size_t cap); int32_t buildCtbNameByGroupId(const char* stbName, uint64_t groupId, char** pName); int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf); +int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule, + char** dstTableName); int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index aebe09b563..82eaa2359e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -269,6 +269,7 @@ typedef enum ENodeType { QUERY_NODE_TSMA_OPTIONS, QUERY_NODE_ANOMALY_WINDOW, QUERY_NODE_RANGE_AROUND, + QUERY_NODE_STREAM_NOTIFY_OPTIONS, // Statement nodes are used in parser and planner module. QUERY_NODE_SET_OPERATOR = 100, @@ -2956,6 +2957,11 @@ typedef struct { // 3.3.0.0 SArray* pCols; // array of SField int64_t smaId; + // 3.3.6.0 + SArray* pNotifyAddrUrls; + int32_t notifyEventTypes; + int32_t notifyErrorHandle; + int8_t notifyHistory; } SCMCreateStreamReq; typedef struct { diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 883c5f7b99..9a7c3912b0 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -98,6 +98,9 @@ int32_t qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId); int32_t qSetStreamOpOpen(qTaskInfo_t tinfo); +int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, + const char* stbFullName, bool newSubTableRule); + /** * Set multiple input data blocks for the stream scan. * @param tinfo diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 12d77bd0c2..26482a87d4 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -566,19 +566,44 @@ typedef struct SStreamOptions { int64_t setFlag; } SStreamOptions; +typedef enum EStreamNotifyOptionSetFlag { + SNOTIFY_OPT_ERROR_HANDLE_SET = BIT_FLAG_MASK(0), + SNOTIFY_OPT_NOTIFY_HISTORY_SET = BIT_FLAG_MASK(1), +} EStreamNotifyOptionSetFlag; + +typedef enum EStreamNotifyEventType { + SNOTIFY_EVENT_WINDOW_OPEN = BIT_FLAG_MASK(0), + SNOTIFY_EVENT_WINDOW_CLOSE = BIT_FLAG_MASK(1), +} EStreamNotifyEventType; + +typedef enum EStreamNotifyErrorHandleType { + SNOTIFY_ERROR_HANDLE_PAUSE, + SNOTIFY_ERROR_HANDLE_DROP, +} EStreamNotifyErrorHandleType; + +typedef struct SStreamNotifyOptions { + ENodeType type; + SNodeList* pAddrUrls; + EStreamNotifyEventType eventTypes; + EStreamNotifyErrorHandleType errorHandle; + bool notifyHistory; + EStreamNotifyOptionSetFlag setFlag; +} SStreamNotifyOptions; + typedef struct SCreateStreamStmt { - ENodeType type; - char streamName[TSDB_TABLE_NAME_LEN]; - char targetDbName[TSDB_DB_NAME_LEN]; - char targetTabName[TSDB_TABLE_NAME_LEN]; - bool ignoreExists; - SStreamOptions* pOptions; - SNode* pQuery; - SNode* pPrevQuery; - SNodeList* pTags; - SNode* pSubtable; - SNodeList* pCols; - SCMCreateStreamReq* pReq; + ENodeType type; + char streamName[TSDB_TABLE_NAME_LEN]; + char targetDbName[TSDB_DB_NAME_LEN]; + char targetTabName[TSDB_TABLE_NAME_LEN]; + bool ignoreExists; + SStreamOptions* pOptions; + SNode* pQuery; + SNode* pPrevQuery; + SNodeList* pTags; + SNode* pSubtable; + SNodeList* pCols; + SStreamNotifyOptions* pNotifyOptions; + SCMCreateStreamReq* pReq; } SCreateStreamStmt; typedef struct SDropStreamStmt { diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index a4d89dcdcc..9cd6dd13ca 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -65,10 +65,14 @@ typedef struct SStreamTaskSM SStreamTaskSM; typedef struct SStreamQueueItem SStreamQueueItem; typedef struct SActiveCheckpointInfo SActiveCheckpointInfo; -#define SSTREAM_TASK_VER 4 -#define SSTREAM_TASK_INCOMPATIBLE_VER 1 -#define SSTREAM_TASK_NEED_CONVERT_VER 2 -#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 +#define SSTREAM_TASK_VER 5 +#define SSTREAM_TASK_INCOMPATIBLE_VER 1 +#define SSTREAM_TASK_NEED_CONVERT_VER 2 +#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 // Append subtable name with groupId +#define SSTREAM_TASK_APPEND_STABLE_NAME_VER 4 // Append subtable name with stableName and groupId +#define SSTREAM_TASK_ADD_NOTIFY_VER 5 // Support event notification at window open/close + +#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) extern int32_t streamMetaRefPool; extern int32_t streamTaskRefPool; @@ -427,6 +431,15 @@ typedef struct STaskCheckInfo { TdThreadMutex checkInfoLock; } STaskCheckInfo; +typedef struct SNotifyInfo { + SArray* pNotifyAddrUrls; + int32_t notifyEventTypes; + int32_t notifyErrorHandle; + char* streamName; + char* stbFullName; + SSchemaWrapper* pSchemaWrapper; +} SNotifyInfo; + struct SStreamTask { int64_t ver; SStreamTaskId id; @@ -449,6 +462,7 @@ struct SStreamTask { SStreamState* pState; // state backend SUpstreamInfo upstreamInfo; STaskCheckInfo taskCheckInfo; + SNotifyInfo notifyInfo; // the followings attributes don't be serialized SScanhistorySchedInfo schedHistoryInfo; diff --git a/include/util/tdef.h b/include/util/tdef.h index 0fa00bf1d2..f08697b0d4 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -245,6 +245,7 @@ typedef enum ELogicConditionType { #define TSDB_OFFSET_LEN 64 // it is a null-terminated string #define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string #define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string +#define TSDB_STREAM_NOTIFY_URL_LEN 128 // it includes the terminating '\0' #define TSDB_DB_NAME_LEN 65 #define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_PRIVILEDGE_CONDITION_LEN 48 * 1024 diff --git a/include/util/tlog.h b/include/util/tlog.h index f573d61e73..60ddc29288 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -79,6 +79,9 @@ void taosResetLog(); void taosDumpData(uint8_t *msg, int32_t len); void taosSetNoNewFile(); +// Fast uint64_t to string conversion, equivalent to sprintf(buf, "%lu", val) but with 10x better performance. +char *u64toaFastLut(uint64_t val, char *buf); + void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) #ifdef __GNUC__ __attribute__((format(printf, 4, 5))) diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index e050eaa16d..8dccdaa016 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -54,6 +54,23 @@ target_link_libraries( INTERFACE api ) +if(NOT ${TD_WINDOWS}) + target_include_directories( + common + PUBLIC "$ENV{HOME}/.cos-local.2/include" + ) + + find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + target_link_libraries( + common + PUBLIC ${CURL_LIBRARY} + PUBLIC ${SSL_LIBRARY} + PUBLIC ${CRYPTO_LIBRARY} + ) +endif() + if(${BUILD_S3}) if(${BUILD_WITH_S3}) target_include_directories( @@ -65,9 +82,6 @@ if(${BUILD_S3}) set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2) find_library(S3_LIBRARY s3) - find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) target_link_libraries( common @@ -87,7 +101,6 @@ if(${BUILD_S3}) find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) - find_library(CURL_LIBRARY curl) target_link_libraries( common diff --git a/source/common/src/msg/tmsg.c b/source/common/src/msg/tmsg.c index a3989012f6..7a51669d46 100644 --- a/source/common/src/msg/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -9959,6 +9959,16 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS } TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->smaId)); + + int32_t addrSize = taosArrayGetSize(pReq->pNotifyAddrUrls); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, addrSize)); + for (int32_t i = 0; i < addrSize; ++i) { + const char *url = taosArrayGetP(pReq->pNotifyAddrUrls, i); + TAOS_CHECK_EXIT((tEncodeCStr(&encoder, url))); + } + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyEventTypes)); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyErrorHandle)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->notifyHistory)); tEndEncode(&encoder); _exit: @@ -10093,6 +10103,30 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->smaId)); } + if (!tDecodeIsEnd(&decoder)) { + int32_t addrSize = 0; + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &addrSize)); + pReq->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES); + if (pReq->pNotifyAddrUrls == NULL) { + TAOS_CHECK_EXIT(terrno); + } + for (int32_t i = 0; i < addrSize; ++i) { + char *url = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(&decoder, &url)); + url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN); + if (url == NULL) { + TAOS_CHECK_EXIT(terrno); + } + if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) { + taosMemoryFree(url); + TAOS_CHECK_EXIT(terrno); + } + } + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyEventTypes)); + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyErrorHandle)); + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->notifyHistory)); + } + tEndDecode(&decoder); _exit: tDecoderClear(&decoder); @@ -10155,6 +10189,7 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) { taosArrayDestroy(pReq->fillNullCols); taosArrayDestroy(pReq->pVgroupVerList); taosArrayDestroy(pReq->pCols); + taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL); } int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index bd18c9ceb9..c3e0fff578 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3061,6 +3061,33 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha return code; } +int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule, + char** dstTableName) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (parTbName[0]) { + if (newSubTableRule && !isAutoTableName(parTbName) && !alreadyAddGroupId(parTbName, gid) && gid != 0 && + stbFullName) { + *dstTableName = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN); + TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno); + + tstrncpy(*dstTableName, parTbName, TSDB_TABLE_NAME_LEN); + code = buildCtbNameAddGroupId(stbFullName, *dstTableName, gid, TSDB_TABLE_NAME_LEN); + TSDB_CHECK_CODE(code, lino, _end); + } else { + *dstTableName = taosStrdup(parTbName); + TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno); + } + } else { + code = buildCtbNameByGroupId(stbFullName, gid, dstTableName); + TSDB_CHECK_CODE(code, lino, _end); + } + +_end: + return code; +} + // return length of encoded data, return -1 if failed int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) { int32_t code = blockDataCheck(pBlock); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 50018e867f..c1cf41103b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -753,6 +753,77 @@ static int32_t doStreamCheck(SMnode *pMnode, SStreamObj *pStreamObj) { return TSDB_CODE_SUCCESS; } +static void *notifyAddrDup(void *p) { return taosStrdup((char *)p); } + +static int32_t addStreamTaskNotifyInfo(const SCMCreateStreamReq *createReq, const SStreamObj *pStream, + SStreamTask *pTask) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA); + + pTask->notifyInfo.pNotifyAddrUrls = taosArrayDup(createReq->pNotifyAddrUrls, notifyAddrDup); + TSDB_CHECK_NULL(pTask->notifyInfo.pNotifyAddrUrls, code, lino, _end, terrno); + pTask->notifyInfo.notifyEventTypes = createReq->notifyEventTypes; + pTask->notifyInfo.notifyErrorHandle = createReq->notifyErrorHandle; + pTask->notifyInfo.streamName = taosStrdup(createReq->name); + TSDB_CHECK_NULL(pTask->notifyInfo.streamName, code, lino, _end, terrno); + pTask->notifyInfo.stbFullName = taosStrdup(createReq->targetStbFullName); + TSDB_CHECK_NULL(pTask->notifyInfo.stbFullName, code, lino, _end, terrno); + pTask->notifyInfo.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); + TSDB_CHECK_NULL(pTask->notifyInfo.pSchemaWrapper, code, lino, _end, terrno); + +_end: + if (code != TSDB_CODE_SUCCESS) { + mError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t addStreamNotifyInfo(SCMCreateStreamReq *createReq, SStreamObj *pStream) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t level = 0; + int32_t nTasks = 0; + SArray *pLevel = NULL; + + TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pStream, code, lino, _end, TSDB_CODE_INVALID_PARA); + + if (taosArrayGetSize(createReq->pNotifyAddrUrls) == 0) { + goto _end; + } + + level = taosArrayGetSize(pStream->tasks); + for (int32_t i = 0; i < level; ++i) { + pLevel = taosArrayGetP(pStream->tasks, i); + nTasks = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < nTasks; ++j) { + code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j)); + TSDB_CHECK_CODE(code, lino, _end); + } + } + + if (pStream->conf.fillHistory && createReq->notifyHistory) { + level = taosArrayGetSize(pStream->pHTasksList); + for (int32_t i = 0; i < level; ++i) { + pLevel = taosArrayGetP(pStream->pHTasksList, i); + nTasks = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < nTasks; ++j) { + code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j)); + TSDB_CHECK_CODE(code, lino, _end); + } + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + mError("%s for stream %s failed at line %d since %s", __func__, pStream->name, lino, tstrerror(code)); + } + return code; +} + static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; @@ -850,6 +921,14 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } + // add notify info into all stream tasks + code = addStreamNotifyInfo(&createReq, &streamObj); + if (code != TSDB_CODE_SUCCESS) { + mError("stream:%s failed to add stream notify info since %s", createReq.name, tstrerror(code)); + mndTransDrop(pTrans); + goto _OVER; + } + // add stream to trans code = mndPersistStream(pTrans, &streamObj); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) { diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 8f63cc8779..b90e1844ae 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -75,6 +75,7 @@ set( "src/tq/tqSnapshot.c" "src/tq/tqStreamStateSnap.c" "src/tq/tqStreamTaskSnap.c" + "src/tq/tqStreamNotify.c" ) aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES) diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 12a803d1d8..e0bf51b333 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -159,6 +159,11 @@ int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t n SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq); int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type); +// tq send notifications +int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap); +void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap); +int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode); + #define TQ_ERR_GO_TO_END(c) \ do { \ code = c; \ diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 940116317c..02c3b3ebe0 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -81,6 +81,8 @@ typedef struct SCommitInfo SCommitInfo; typedef struct SCompactInfo SCompactInfo; typedef struct SQueryNode SQueryNode; +typedef struct SStreamNotifyHandleMap SStreamNotifyHandleMap; + #define VNODE_META_TMP_DIR "meta.tmp" #define VNODE_META_BACKUP_DIR "meta.backup" @@ -496,6 +498,9 @@ struct SVnode { int64_t blockSeq; SQHandle* pQuery; SVMonitorObj monitor; + + // Notification Handles + SStreamNotifyHandleMap* pNotifyHandleMap; }; #define TD_VID(PVNODE) ((PVNODE)->config.vgId) diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 7ba77cf813..98ea92125c 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -16,8 +16,6 @@ #include "tcommon.h" #include "tq.h" -#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) - typedef struct STableSinkInfo { uint64_t uid; tstr name; @@ -983,7 +981,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat tqDebug("s-task:%s append groupId:%" PRId64 " for generated dstTable:%s", id, groupId, dstTableName); if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) { code = buildCtbNameAddGroupId(NULL, dstTableName, groupId, sizeof(pDataBlock->info.parTbName)); - } else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER && stbFullName) { + } else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER && stbFullName) { code = buildCtbNameAddGroupId(stbFullName, dstTableName, groupId, sizeof(pDataBlock->info.parTbName)); } if (code != TSDB_CODE_SUCCESS) { @@ -1150,6 +1148,12 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { return; } + code = tqSendAllNotifyEvents(pBlocks, pTask, pVnode); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId: %d, s-task:%s failed to send all event notifications", vgId, id); + // continue processing even if notification fails + } + bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks); if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) { tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id, @@ -1173,6 +1177,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { continue; } else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) { code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid); + } else if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) { + continue; } else { code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs); } @@ -1317,6 +1323,10 @@ void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVno continue; } + if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) { + continue; + } + hasSubmit = true; pTask->execInfo.sink.numOfBlocks += 1; uint64_t groupId = pDataBlock->info.id.groupId; diff --git a/source/dnode/vnode/src/tq/tqStreamNotify.c b/source/dnode/vnode/src/tq/tqStreamNotify.c new file mode 100644 index 0000000000..46ee95d3b9 --- /dev/null +++ b/source/dnode/vnode/src/tq/tqStreamNotify.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "cmdnodes.h" +#include "tq.h" + +#ifndef WINDOWS +#include "curl/curl.h" +#endif + +#define STREAM_EVENT_NOTIFY_RETRY_MS 50 // 50ms + +typedef struct SStreamNotifyHandle { + TdThreadMutex mutex; +#ifndef WINDOWS + CURL* curl; +#endif + char* url; +} SStreamNotifyHandle; + +struct SStreamNotifyHandleMap { + TdThreadMutex gMutex; + SHashObj* handleMap; +}; + +static void stopStreamNotifyConn(SStreamNotifyHandle* pHandle) { +#ifndef WINDOWS + if (pHandle == NULL || pHandle->curl == NULL) { + return; + } + // status code 1000 means normal closure + size_t len = 0; + uint16_t status = htons(1000); + CURLcode res = curl_ws_send(pHandle->curl, &status, sizeof(status), &len, 0, CURLWS_CLOSE); + if (res != CURLE_OK) { + tqWarn("failed to send ws-close msg to %s for %d", pHandle->url ? pHandle->url : "", res); + } + // TODO: add wait mechanism for peer connection close response + curl_easy_cleanup(pHandle->curl); +#endif +} + +static void destroyStreamNotifyHandle(void* ptr) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamNotifyHandle** ppHandle = ptr; + + if (ppHandle == NULL || *ppHandle == NULL) { + return; + } + code = taosThreadMutexDestroy(&(*ppHandle)->mutex); + stopStreamNotifyConn(*ppHandle); + taosMemoryFreeClear((*ppHandle)->url); + taosMemoryFreeClear(*ppHandle); +} + +static void releaseStreamNotifyHandle(SStreamNotifyHandle** ppHandle) { + if (ppHandle == NULL || *ppHandle == NULL) { + return; + } + (void)taosThreadMutexUnlock(&(*ppHandle)->mutex); + *ppHandle = NULL; +} + +static int32_t acquireStreamNotifyHandle(SStreamNotifyHandleMap* pMap, const char* url, + SStreamNotifyHandle** ppHandle) { +#ifndef WINDOWS + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + bool gLocked = false; + SStreamNotifyHandle** ppFindHandle = NULL; + SStreamNotifyHandle* pNewHandle = NULL; + CURL* newCurl = NULL; + CURLcode res = CURLE_OK; + + TSDB_CHECK_NULL(pMap, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(url, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(ppHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *ppHandle = NULL; + + code = taosThreadMutexLock(&pMap->gMutex); + TSDB_CHECK_CODE(code, lino, _end); + gLocked = true; + + ppFindHandle = taosHashGet(pMap->handleMap, url, strlen(url)); + if (ppFindHandle == NULL) { + pNewHandle = taosMemoryCalloc(1, sizeof(SStreamNotifyHandle)); + TSDB_CHECK_NULL(pNewHandle, code, lino, _end, terrno); + code = taosThreadMutexInit(&pNewHandle->mutex, NULL); + TSDB_CHECK_CODE(code, lino, _end); + code = taosHashPut(pMap->handleMap, url, strlen(url), &pNewHandle, POINTER_BYTES); + TSDB_CHECK_CODE(code, lino, _end); + *ppHandle = pNewHandle; + pNewHandle = NULL; + } else { + *ppHandle = *ppFindHandle; + } + + code = taosThreadMutexLock(&(*ppHandle)->mutex); + TSDB_CHECK_CODE(code, lino, _end); + + (void)taosThreadMutexUnlock(&pMap->gMutex); + gLocked = false; + + if ((*ppHandle)->curl == NULL) { + newCurl = curl_easy_init(); + TSDB_CHECK_NULL(newCurl, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_URL, url); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYPEER, 0L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYHOST, 0L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_TIMEOUT, 3L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_CONNECT_ONLY, 2L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_perform(newCurl); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + (*ppHandle)->curl = newCurl; + newCurl = NULL; + } + + if ((*ppHandle)->url == NULL) { + (*ppHandle)->url = taosStrdup(url); + TSDB_CHECK_NULL((*ppHandle)->url, code, lino, _end, terrno); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code)); + if (*ppHandle) { + releaseStreamNotifyHandle(ppHandle); + } + *ppHandle = NULL; + } + if (newCurl) { + curl_easy_cleanup(newCurl); + } + if (pNewHandle) { + destroyStreamNotifyHandle(&pNewHandle); + } + if (gLocked) { + (void)taosThreadMutexUnlock(&pMap->gMutex); + } + return code; +#else + tqError("stream notify events is not supported on windows"); + return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS; +#endif +} + +int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamNotifyHandleMap* pMap = NULL; + + TSDB_CHECK_NULL(ppMap, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *ppMap = NULL; + pMap = taosMemoryCalloc(1, sizeof(SStreamNotifyHandleMap)); + TSDB_CHECK_NULL(pMap, code, lino, _end, terrno); + code = taosThreadMutexInit(&pMap->gMutex, NULL); + TSDB_CHECK_CODE(code, lino, _end); + pMap->handleMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + TSDB_CHECK_NULL(pMap->handleMap, code, lino, _end, terrno); + taosHashSetFreeFp(pMap->handleMap, destroyStreamNotifyHandle); + *ppMap = pMap; + pMap = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (pMap != NULL) { + tqDestroyNotifyHandleMap(&pMap); + } + return code; +} + +void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (*ppMap == NULL) { + return; + } + taosHashCleanup((*ppMap)->handleMap); + code = taosThreadMutexDestroy(&(*ppMap)->gMutex); + taosMemoryFreeClear((*ppMap)); +} + +#define JSON_CHECK_ADD_ITEM(obj, str, item) \ + TSDB_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + +static int32_t getStreamNotifyEventHeader(const char* streamName, char** pHeader) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + cJSON* obj = NULL; + cJSON* streams = NULL; + cJSON* stream = NULL; + char msgId[37]; + + TSDB_CHECK_NULL(streamName, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pHeader, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pHeader = NULL; + + code = taosGetSystemUUIDLimit36(msgId, sizeof(msgId)); + TSDB_CHECK_CODE(code, lino, _end); + + stream = cJSON_CreateObject(); + TSDB_CHECK_NULL(stream, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(stream, "streamName", cJSON_CreateStringReference(streamName)); + JSON_CHECK_ADD_ITEM(stream, "events", cJSON_CreateArray()); + + streams = cJSON_CreateArray(); + TSDB_CHECK_CONDITION(cJSON_AddItemToArray(streams, stream), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + stream = NULL; + + obj = cJSON_CreateObject(); + TSDB_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(obj, "messageId", cJSON_CreateStringReference(msgId)); + JSON_CHECK_ADD_ITEM(obj, "timestamp", cJSON_CreateNumber(taosGetTimestampMs())); + JSON_CHECK_ADD_ITEM(obj, "streams", streams); + streams = NULL; + + *pHeader = cJSON_PrintUnformatted(obj); + TSDB_CHECK_NULL(*pHeader, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (stream != NULL) { + cJSON_Delete(stream); + } + if (streams != NULL) { + cJSON_Delete(streams); + } + if (obj != NULL) { + cJSON_Delete(obj); + } + return code; +} + +static int32_t packupStreamNotifyEvent(const char* streamName, const SArray* pBlocks, char** pMsg, + int32_t* nNotifyEvents) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t numOfBlocks = 0; + int32_t msgHeaderLen = 0; + int32_t msgTailLen = 0; + int32_t msgLen = 0; + char* msgHeader = NULL; + const char* msgTail = "]}]}"; + char* msg = NULL; + + TSDB_CHECK_NULL(pMsg, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pMsg = NULL; + numOfBlocks = taosArrayGetSize(pBlocks); + *nNotifyEvents = 0; + + for (int32_t i = 0; i < numOfBlocks; ++i) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) { + continue; + } + + SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { + char* val = colDataGetVarData(pEventStrCol, j); + msgLen += varDataLen(val) + 1; + } + *nNotifyEvents += pDataBlock->info.rows; + } + + if (msgLen == 0) { + // skip since no notification events found + goto _end; + } + + code = getStreamNotifyEventHeader(streamName, &msgHeader); + TSDB_CHECK_CODE(code, lino, _end); + msgHeaderLen = strlen(msgHeader); + msgTailLen = strlen(msgTail); + msgLen += msgHeaderLen; + + msg = taosMemoryMalloc(msgLen); + TSDB_CHECK_NULL(msg, code, lino, _end, terrno); + char* p = msg; + TAOS_STRNCPY(p, msgHeader, msgHeaderLen); + p += msgHeaderLen - msgTailLen; + + for (int32_t i = 0; i < numOfBlocks; ++i) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) { + continue; + } + + SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { + char* val = colDataGetVarData(pEventStrCol, j); + TAOS_STRNCPY(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + *(p++) = ','; + } + } + + p -= 1; + TAOS_STRNCPY(p, msgTail, msgTailLen); + *(p + msgTailLen) = '\0'; + + *pMsg = msg; + msg = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (msgHeader != NULL) { + cJSON_free(msgHeader); + } + if (msg != NULL) { + taosMemoryFreeClear(msg); + } + return code; +} + +static int32_t sendSingleStreamNotify(SStreamNotifyHandle* pHandle, char* msg) { +#ifndef WINDOWS + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + CURLcode res = CURLE_OK; + uint64_t sentLen = 0; + uint64_t totalLen = 0; + size_t nbytes = 0; + + TSDB_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pHandle->curl, code, lino, _end, TSDB_CODE_INVALID_PARA); + + totalLen = strlen(msg); + while (sentLen < totalLen) { + res = curl_ws_send(pHandle->curl, msg + sentLen, totalLen - sentLen, &nbytes, 0, CURLWS_TEXT); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + sentLen += nbytes; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code)); + stopStreamNotifyConn(pHandle); + } + return code; +#else + tqError("stream notify events is not supported on windows"); + return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS; +#endif +} + +int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char* msg = NULL; + int32_t nNotifyAddr = 0; + int32_t nNotifyEvents = 0; + SStreamNotifyHandle* pHandle = NULL; + + TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA); + + nNotifyAddr = taosArrayGetSize(pTask->notifyInfo.pNotifyAddrUrls); + if (nNotifyAddr == 0) { + goto _end; + } + + code = packupStreamNotifyEvent(pTask->notifyInfo.streamName, pBlocks, &msg, &nNotifyEvents); + TSDB_CHECK_CODE(code, lino, _end); + if (msg == NULL) { + goto _end; + } + + tqDebug("stream task %s prepare to send %d notify events, total msg length: %" PRIu64, pTask->notifyInfo.streamName, + nNotifyEvents, (uint64_t)strlen(msg)); + + for (int32_t i = 0; i < nNotifyAddr; ++i) { + if (streamTaskShouldStop(pTask)) { + break; + } + const char* url = taosArrayGetP(pTask->notifyInfo.pNotifyAddrUrls, i); + code = acquireStreamNotifyHandle(pVnode->pNotifyHandleMap, url, &pHandle); + if (code != TSDB_CODE_SUCCESS) { + tqError("failed to get stream notify handle of %s", url); + if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) { + // retry for event message sending in PAUSE error handling mode + taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS); + --i; + continue; + } else { + // simply ignore the failure in DROP error handling mode + code = TSDB_CODE_SUCCESS; + continue; + } + } + code = sendSingleStreamNotify(pHandle, msg); + if (code != TSDB_CODE_SUCCESS) { + tqError("failed to send stream notify handle to %s since %s", url, tstrerror(code)); + if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) { + // retry for event message sending in PAUSE error handling mode + taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS); + --i; + } else { + // simply ignore the failure in DROP error handling mode + code = TSDB_CODE_SUCCESS; + } + } else { + tqDebug("stream task %s send %d notify events to %s successfully", pTask->notifyInfo.streamName, nNotifyEvents, + url); + } + releaseStreamNotifyHandle(&pHandle); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (msg) { + taosMemoryFreeClear(msg); + } + return code; +} diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 06b7b33cd8..1880156f61 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -86,6 +86,14 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { if (code) { return code; } + + code = + qSetStreamNotifyInfo(pTask->exec.pExecutor, pTask->notifyInfo.notifyEventTypes, + pTask->notifyInfo.pSchemaWrapper, pTask->notifyInfo.stbFullName, IS_NEW_SUBTB_RULE(pTask)); + if (code) { + tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code)); + return code; + } } streamSetupScheduleTrigger(pTask); @@ -1357,4 +1365,4 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { streamMetaReleaseTask(pMeta, pTask); return 0; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 6de5298728..280ee527f7 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -15,6 +15,7 @@ #include "sync.h" #include "tcs.h" +#include "tq.h" #include "tsdb.h" #include "vnd.h" @@ -483,6 +484,14 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC ret = taosRealPath(tdir, NULL, sizeof(tdir)); TAOS_UNUSED(ret); + // init handle map for stream event notification + ret = tqInitNotifyHandleMap(&pVnode->pNotifyHandleMap); + if (ret != TSDB_CODE_SUCCESS) { + vError("vgId:%d, failed to init StreamNotifyHandleMap", TD_VID(pVnode)); + terrno = ret; + goto _err; + } + // open query vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode)); if (vnodeQueryOpen(pVnode)) { @@ -555,6 +564,7 @@ void vnodeClose(SVnode *pVnode) { vnodeAWait(&pVnode->commitTask); vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); + tqDestroyNotifyHandleMap(&pVnode->pNotifyHandleMap); tqClose(pVnode->pTq); walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 04e7884020..84eba69acb 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -449,11 +449,17 @@ typedef struct STimeWindowAggSupp { SColumnInfoData timeWindowData; // query time window info for scalar function execution. } STimeWindowAggSupp; +typedef struct SStreamNotifyEventSupp { + SArray* pWindowEvents; // Array of SStreamNotifyEvent, storing window events and trigger values. + SHashObj* pTableNameHashMap; // Hash map from groupid to the dest child table name. + SHashObj* pResultHashMap; // Hash map from groupid+skey to the window agg result. + SSDataBlock* pEventBlock; // The datablock contains all window events and results. +} SStreamNotifyEventSupp; + typedef struct SSteamOpBasicInfo { - int32_t primaryPkIndex; - bool updateOperatorInfo; - SSDataBlock* pEventRes; - SArray* pEventInfo; + int32_t primaryPkIndex; + bool updateOperatorInfo; + SStreamNotifyEventSupp windowEventSup; } SSteamOpBasicInfo; typedef struct SStreamFillSupporter { @@ -769,6 +775,8 @@ typedef struct SStreamEventAggOperatorInfo { SSHashObj* pPkDeleted; bool destHasPrimaryKey; struct SOperatorInfo* pOperator; + SNodeList* pStartCondCols; + SNodeList* pEndCondCols; } SStreamEventAggOperatorInfo; typedef struct SStreamCountAggOperatorInfo { diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index f726e4300f..86ee6f4124 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -71,6 +71,10 @@ typedef struct { SVersionRange fillHistoryVer; STimeWindow fillHistoryWindow; SStreamState* pState; + int32_t eventTypes; // event types to notify + SSchemaWrapper* notifyResultSchema; // agg result to notify + char* stbFullName; // used to generate dest child table name + bool newSubTableRule; // used to generate dest child table name } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/inc/streamexecutorInt.h b/source/libs/executor/inc/streamexecutorInt.h index 0c0ea0d6fc..7b3c828351 100644 --- a/source/libs/executor/inc/streamexecutorInt.h +++ b/source/libs/executor/inc/streamexecutorInt.h @@ -19,7 +19,10 @@ extern "C" { #endif +#include "cJSON.h" +#include "cmdnodes.h" #include "executorInt.h" +#include "querytask.h" #include "tutil.h" #define FILL_POS_INVALID 0 @@ -107,6 +110,13 @@ int32_t buildAllResultKey(SStateStore* pStateStore, SStreamState* pState, TSKEY int32_t initOffsetInfo(int32_t** ppOffset, SSDataBlock* pRes); TSKEY compareTs(void* pKey); +int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey, + const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri, + SStreamNotifyEventSupp* sup); +int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper, + SStreamNotifyEventSupp* sup); +int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 1386b0b82f..39bef9c95f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -250,6 +250,28 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) { return code; } +int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, + const char* stbFullName, bool newSubTableRule) { + int32_t code = TSDB_CODE_SUCCESS; + SStreamTaskInfo *pStreamInfo = NULL; + + if (tinfo == 0 || eventTypes == 0 || pSchemaWrapper == NULL || stbFullName == NULL) { + goto _end; + } + + pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo; + pStreamInfo->eventTypes = eventTypes; + pStreamInfo->notifyResultSchema = tCloneSSchemaWrapper(pSchemaWrapper); + if (pStreamInfo->notifyResultSchema == NULL) { + code = terrno; + } + pStreamInfo->stbFullName = taosStrdup(stbFullName); + pStreamInfo->newSubTableRule = newSubTableRule; + +_end: + return code; +} + int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) { if (tinfo == NULL) { return TSDB_CODE_APP_ERROR; diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index c6a1900b41..20c80df4fa 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -262,6 +262,8 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) { static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) { tDeleteSchemaWrapper(pStreamInfo->schema); tOffsetDestroy(&pStreamInfo->currentOffset); + tDeleteSchemaWrapper(pStreamInfo->notifyResultSchema); + taosMemoryFree(pStreamInfo->stbFullName); } static void freeBlock(void* pParam) { diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index a9a47580dc..5f4d6b30fa 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -93,6 +93,16 @@ void destroyStreamEventOperatorInfo(void* param) { pInfo->pEndCondInfo = NULL; } + if (pInfo->pStartCondCols != NULL) { + nodesDestroyList(pInfo->pStartCondCols); + pInfo->pStartCondCols = NULL; + } + + if (pInfo->pEndCondCols != NULL) { + nodesDestroyList(pInfo->pEndCondCols); + pInfo->pEndCondCols = NULL; + } + taosMemoryFreeClear(param); } @@ -310,14 +320,6 @@ void doDeleteEventWindow(SStreamAggSupporter* pAggSup, SSHashObj* pSeUpdated, SS removeSessionResult(pAggSup, pSeUpdated, pAggSup->pResultRows, pKey); } -static int32_t setEventData(SSteamOpBasicInfo* pBasicInfo, SSessionKey* pWinKey) { - void* pRes = taosArrayPush(pBasicInfo->pEventInfo, pWinKey); - if (pRes != NULL) { - return TSDB_CODE_SUCCESS; - } - return terrno; -} - static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pSeUpdated, SSHashObj* pStDeleted) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -393,8 +395,10 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl &nextWinKey, &winCode); QUERY_CHECK_CODE(code, lino, _end); - if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) && winCode != TSDB_CODE_SUCCESS) { - code = setEventData(&pInfo->basic, &curWin.winInfo.sessionWin); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) && + *(bool*)colDataGetNumData(pColStart, i) && winCode != TSDB_CODE_SUCCESS) { + code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_OPEN, &curWin.winInfo.sessionWin, pSDataBlock, + pInfo->pStartCondCols, i, &pInfo->basic.windowEventSup); QUERY_CHECK_CODE(code, lino, _end); } @@ -464,6 +468,12 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo)); QUERY_CHECK_CODE(code, lino, _end); } + + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) { + code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_CLOSE, &curWin.winInfo.sessionWin, pSDataBlock, + pInfo->pEndCondCols, i + winRows - 1, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } } _end: @@ -582,42 +592,13 @@ void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) { } } -static void buildEventNotifyResult(SSteamOpBasicInfo* pBasicInfo) { - int32_t code = TSDB_CODE_SUCCESS; - int32_t lino = 0; - - blockDataCleanup(pBasicInfo->pEventRes); - int32_t size = taosArrayGetSize(pBasicInfo->pEventInfo); - code = blockDataEnsureCapacity(pBasicInfo->pEventRes, size); - QUERY_CHECK_CODE(code, lino, _end); - for (int32_t i = 0; i < size; i++) { - SSessionKey* pKey = taosArrayGet(pBasicInfo->pEventInfo, i); - uint64_t uid = 0; - code = appendDataToSpecialBlock(pBasicInfo->pEventRes, &pKey->win.skey, &pKey->win.ekey, &uid, &pKey->groupId, NULL); - QUERY_CHECK_CODE(code, lino, _end); - } - taosArrayClear(pBasicInfo->pEventInfo); - -_end: - if (code != TSDB_CODE_SUCCESS) { - qError("%s failed at line %d since %s.", __func__, lino, tstrerror(code)); - } -} - - static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SStreamEventAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - buildEventNotifyResult(&pInfo->basic); - if (pInfo->basic.pEventRes->info.rows > 0) { - printDataBlock(pInfo->basic.pEventRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); - (*ppRes) = pInfo->basic.pEventRes; - return code; - } - doBuildDeleteDataBlock(pOperator, pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { printDataBlock(pInfo->pDelRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); @@ -628,10 +609,27 @@ static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { doBuildSessionResult(pOperator, pInfo->streamAggSup.pState, &pInfo->groupResInfo, pBInfo->pRes); if (pBInfo->pRes->info.rows > 0) { printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) { + code = addAggResultNotifyEvent(pBInfo->pRes, pTaskInfo->streamInfo.notifyResultSchema, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } (*ppRes) = pBInfo->pRes; return code; } + + code = buildNotifyEventBlock(pTaskInfo, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + if (pInfo->basic.windowEventSup.pEventBlock->info.rows > 0) { + printDataBlock(pInfo->basic.windowEventSup.pEventBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->basic.windowEventSup.pEventBlock; + return code; + } + +_end: (*ppRes) = NULL; + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } return code; } @@ -1041,6 +1039,12 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = filterInitFromNode((SNode*)pEventNode->pEndCond, &pInfo->pEndCondInfo, 0); QUERY_CHECK_CODE(code, lino, _error); + code = + nodesCollectColumnsFromNode((SNode*)pEventNode->pStartCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pStartCondCols); + QUERY_CHECK_CODE(code, lino, _error); + code = nodesCollectColumnsFromNode((SNode*)pEventNode->pEndCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pEndCondCols); + QUERY_CHECK_CODE(code, lino, _error); + *pOptrInfo = pOperator; return TSDB_CODE_SUCCESS; diff --git a/source/libs/executor/src/streamexecutorInt.c b/source/libs/executor/src/streamexecutorInt.c index 1e7fbfa446..9cafdfff0c 100644 --- a/source/libs/executor/src/streamexecutorInt.c +++ b/source/libs/executor/src/streamexecutorInt.c @@ -13,9 +13,20 @@ * along with this program. If not, see . */ +#include "streamexecutorInt.h" + #include "executorInt.h" #include "tdatablock.h" +#define NOTIFY_EVENT_NAME_CACHE_LIMIT_MB 16 + +typedef struct SStreamNotifyEvent { + uint64_t gid; + TSKEY skey; + char* content; + bool isEnd; +} SStreamNotifyEvent; + void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type) { if (type != STREAM_GET_ALL && type != STREAM_CHECKPOINT) { pBasicInfo->updateOperatorInfo = true; @@ -30,19 +41,509 @@ void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->updateOperatorInfo = false; } +static void destroyStreamWindowEvent(void* ptr) { + SStreamNotifyEvent* pEvent = ptr; + if (pEvent == NULL || pEvent->content == NULL) return; + cJSON_free(pEvent->content); +} + +static void destroyStreamNotifyEventSupp(SStreamNotifyEventSupp* sup) { + if (sup == NULL) return; + taosArrayDestroyEx(sup->pWindowEvents, destroyStreamWindowEvent); + taosHashCleanup(sup->pTableNameHashMap); + taosHashCleanup(sup->pResultHashMap); + blockDataDestroy(sup->pEventBlock); + *sup = (SStreamNotifyEventSupp){0}; +} + +static int32_t initStreamNotifyEventSupp(SStreamNotifyEventSupp *sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SSDataBlock* pBlock = NULL; + SColumnInfoData infoData = {0}; + + if (sup == NULL) { + goto _end; + } + + code = createDataBlock(&pBlock); + QUERY_CHECK_CODE(code, lino, _end); + + pBlock->info.type = STREAM_NOTIFY_EVENT; + pBlock->info.watermark = INT64_MIN; + + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = tDataTypes[infoData.info.type].bytes; + code = blockDataAppendColInfo(pBlock, &infoData); + QUERY_CHECK_CODE(code, lino, _end); + + sup->pWindowEvents = taosArrayInit(0, sizeof(SStreamNotifyEvent)); + QUERY_CHECK_NULL(sup->pWindowEvents, code, lino, _end, terrno); + sup->pTableNameHashMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(sup->pTableNameHashMap, code, lino, _end, terrno); + sup->pResultHashMap = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(sup->pResultHashMap, code, lino, _end, terrno); + taosHashSetFreeFp(sup->pResultHashMap, destroyStreamWindowEvent); + sup->pEventBlock = pBlock; + pBlock = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + if (sup) { + destroyStreamNotifyEventSupp(sup); + } + } + if (pBlock != NULL) { + blockDataDestroy(pBlock); + } + return code; +} + int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->primaryPkIndex = -1; pBasicInfo->updateOperatorInfo = false; - pBasicInfo->pEventInfo = taosArrayInit(4, sizeof(SSessionKey)); - if (pBasicInfo->pEventInfo == NULL) { - return terrno; - } - return createSpecialDataBlock(STREAM_EVENT_OPEN_WINDOW, &pBasicInfo->pEventRes); + return initStreamNotifyEventSupp(&pBasicInfo->windowEventSup); } void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { - blockDataDestroy(pBasicInfo->pEventRes); - pBasicInfo->pEventRes = NULL; - taosArrayDestroy(pBasicInfo->pEventInfo); - pBasicInfo->pEventInfo = NULL; + destroyStreamNotifyEventSupp(&pBasicInfo->windowEventSup); +} + +static void streamNotifyGetEventWindowId(const SSessionKey* pSessionKey, char *buf) { + uint64_t hash = 0; + uint64_t ar[2]; + + ar[0] = pSessionKey->groupId; + ar[1] = pSessionKey->win.skey; + hash = MurmurHash3_64((char*)ar, sizeof(ar)); + buf = u64toaFastLut(hash, buf); +} + +#define JSON_CHECK_ADD_ITEM(obj, str, item) \ + QUERY_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + +static int32_t jsonAddColumnField(const char* colName, const SColumnInfoData* pColData, int32_t ri, cJSON* obj) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char* temp = NULL; + + QUERY_CHECK_NULL(colName, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_INVALID_PARA); + + if (colDataIsNull_s(pColData, ri)) { + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNull()); + goto _end; + } + + switch (pColData->info.type) { + case TSDB_DATA_TYPE_BOOL: { + bool val = *(bool*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateBool(val)); + break; + } + + case TSDB_DATA_TYPE_TINYINT: { + int8_t val = *(int8_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + int16_t val = *(int16_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: { + int64_t val = *(int64_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_FLOAT: { + float val = *(float*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_DOUBLE: { + double val = *(double*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_NCHAR: { + // cJSON requires null-terminated strings, but this data is not null-terminated, + // so we need to manually copy the string and add null termination. + const char* src = varDataVal(colDataGetVarData(pColData, ri)); + int32_t len = varDataLen(colDataGetVarData(pColData, ri)); + temp = cJSON_malloc(len + 1); + QUERY_CHECK_NULL(temp, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + memcpy(temp, src, len); + temp[len] = '\0'; + + cJSON* item = cJSON_CreateStringReference(temp); + JSON_CHECK_ADD_ITEM(obj, colName, item); + + // let the cjson object to free memory later + item->type &= ~cJSON_IsReference; + temp = NULL; + break; + } + + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t val = *(uint8_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t val = *(uint16_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_UINT: { + uint32_t val = *(uint32_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t val = *(uint64_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + default: { + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateStringReference("")); + break; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (temp) { + cJSON_free(temp); + } + return code; +} + +int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey, + const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri, + SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SNode* node = NULL; + cJSON* event = NULL; + cJSON* fields = NULL; + cJSON* cond = NULL; + SStreamNotifyEvent item = {0}; + char windowId[32]; + + QUERY_CHECK_NULL(pSessionKey, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pInputBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pInputBlock->pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pCondCols, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA); + + qDebug("add stream notify event from event window, type: %s, start: %" PRId64 ", end: %" PRId64, + (eventType == SNOTIFY_EVENT_WINDOW_OPEN) ? "WINDOW_OPEN" : "WINDOW_CLOSE", pSessionKey->win.skey, + pSessionKey->win.ekey); + + event = cJSON_CreateObject(); + QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + + // add basic info + streamNotifyGetEventWindowId(pSessionKey, windowId); + if (eventType == SNOTIFY_EVENT_WINDOW_OPEN) { + JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_OPEN")); + } else if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) { + JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_CLOSE")); + } + JSON_CHECK_ADD_ITEM(event, "eventTime", cJSON_CreateNumber(taosGetTimestampMs())); + JSON_CHECK_ADD_ITEM(event, "windowId", cJSON_CreateStringReference(windowId)); + JSON_CHECK_ADD_ITEM(event, "windowType", cJSON_CreateStringReference("Event")); + JSON_CHECK_ADD_ITEM(event, "windowStart", cJSON_CreateNumber(pSessionKey->win.skey)); + if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) { + JSON_CHECK_ADD_ITEM(event, "windowEnd", cJSON_CreateNumber(pSessionKey->win.ekey)); + } + + // create fields object to store matched column values + fields = cJSON_CreateObject(); + QUERY_CHECK_NULL(fields, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + FOREACH(node, pCondCols) { + SColumnNode* pColDef = (SColumnNode*)node; + SColumnInfoData* pColData = taosArrayGet(pInputBlock->pDataBlock, pColDef->slotId); + code = jsonAddColumnField(pColDef->colName, pColData, ri, fields); + QUERY_CHECK_CODE(code, lino, _end); + } + + // add trigger condition + cond = cJSON_CreateObject(); + QUERY_CHECK_NULL(cond, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(cond, "conditionIndex", cJSON_CreateNumber(0)); + JSON_CHECK_ADD_ITEM(cond, "fieldValues", fields); + fields = NULL; + JSON_CHECK_ADD_ITEM(event, "triggerConditions", cond); + cond = NULL; + + // convert json object to string value + item.gid = pSessionKey->groupId; + item.skey = pSessionKey->win.skey; + item.isEnd = (eventType == SNOTIFY_EVENT_WINDOW_CLOSE); + item.content = cJSON_PrintUnformatted(event); + QUERY_CHECK_NULL(taosArrayPush(sup->pWindowEvents, &item), code, lino, _end, terrno); + item.content = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + destroyStreamWindowEvent(&item); + if (cond != NULL) { + cJSON_Delete(cond); + } + if (fields != NULL) { + cJSON_Delete(fields); + } + if (event != NULL) { + cJSON_Delete(event); + } + return code; +} + +int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper, + SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SNode * node = NULL; + cJSON* event = NULL; + cJSON* result = NULL; + SStreamNotifyEvent item = {0}; + SColumnInfoData* pWstartCol = NULL; + + QUERY_CHECK_NULL(pResultBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pSchemaWrapper, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA); + + qDebug("add %" PRId64 " stream notify results from window agg", pResultBlock->info.rows); + + pWstartCol = taosArrayGet(pResultBlock->pDataBlock, 0); + for (int32_t i = 0; i< pResultBlock->info.rows; ++i) { + event = cJSON_CreateObject(); + QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + + // convert the result row into json + result = cJSON_CreateObject(); + QUERY_CHECK_NULL(result, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + for (int32_t j = 0; j < pSchemaWrapper->nCols; ++j) { + SSchema *pCol = pSchemaWrapper->pSchema + j; + SColumnInfoData *pColData = taosArrayGet(pResultBlock->pDataBlock, pCol->colId - 1); + code = jsonAddColumnField(pCol->name, pColData, i, result); + QUERY_CHECK_CODE(code, lino, _end); + } + JSON_CHECK_ADD_ITEM(event, "result", result); + result = NULL; + + item.gid = pResultBlock->info.id.groupId; + item.skey = *(uint64_t*)colDataGetNumData(pWstartCol, i); + item.content = cJSON_PrintUnformatted(event); + code = taosHashPut(sup->pResultHashMap, &item.gid, sizeof(item.gid) + sizeof(item.skey), &item, sizeof(item)); + TSDB_CHECK_CODE(code, lino, _end); + item.content = NULL; + + cJSON_Delete(event); + event = NULL; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + destroyStreamWindowEvent(&item); + if (result != NULL) { + cJSON_Delete(result); + } + if (event != NULL) { + cJSON_Delete(event); + } + return code; +} + +static int32_t streamNotifyGetDestTableName(const SExecTaskInfo* pTaskInfo, uint64_t gid, char** pTableName) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + const SStorageAPI* pAPI = NULL; + void* tbname = NULL; + int32_t winCode = TSDB_CODE_SUCCESS; + char parTbName[TSDB_TABLE_NAME_LEN]; + const SStreamTaskInfo* pStreamInfo = NULL; + + QUERY_CHECK_NULL(pTaskInfo, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pTableName, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pTableName = NULL; + + pAPI = &pTaskInfo->storageAPI; + code = pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, gid, &tbname, false, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode != TSDB_CODE_SUCCESS) { + parTbName[0] = '\0'; + } else { + tstrncpy(parTbName, tbname, sizeof(parTbName)); + } + pAPI->stateStore.streamStateFreeVal(tbname); + + pStreamInfo = &pTaskInfo->streamInfo; + code = buildSinkDestTableName(parTbName, pStreamInfo->stbFullName, gid, pStreamInfo->newSubTableRule, pTableName); + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t streamNotifyFillTableName(const char* tableName, const SStreamNotifyEvent* pEvent, + const SStreamNotifyEvent* pResult, char** pVal) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + static const char* prefix = "{\"tableName\":\""; + uint64_t prefixLen = 0; + uint64_t nameLen = 0; + uint64_t eventLen = 0; + uint64_t resultLen = 0; + uint64_t valLen = 0; + char* val = NULL; + char* p = NULL; + + QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pEvent, code, lino , _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pVal, code, lino , _end, TSDB_CODE_INVALID_PARA); + + *pVal = NULL; + prefixLen = strlen(prefix); + nameLen = strlen(tableName); + eventLen = strlen(pEvent->content); + + if (pResult != NULL) { + resultLen = strlen(pResult->content); + valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + resultLen; + } else { + valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + 1; + } + val = taosMemoryMalloc(valLen); + QUERY_CHECK_NULL(val, code, lino, _end, terrno); + varDataSetLen(val, valLen - VARSTR_HEADER_SIZE); + + p = varDataVal(val); + TAOS_STRNCPY(p, prefix, prefixLen); + p += prefixLen; + TAOS_STRNCPY(p, tableName, nameLen); + p += nameLen; + *(p++) = '\"'; + TAOS_STRNCPY(p, pEvent->content, eventLen); + *p = ','; + + if (pResult != NULL) { + p += eventLen - 1; + TAOS_STRNCPY(p, pResult->content, resultLen); + *p = ','; + } + *pVal = val; + val = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (val != NULL) { + taosMemoryFreeClear(val); + } + return code; +} + +int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SColumnInfoData* pEventStrCol = NULL; + int32_t nWindowEvents = 0; + int32_t nWindowResults = 0; + char* val = NULL; + + if (pTaskInfo == NULL || sup == NULL) { + goto _end; + } + + QUERY_CHECK_NULL(sup->pEventBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + blockDataCleanup(sup->pEventBlock); + nWindowEvents = taosArrayGetSize(sup->pWindowEvents); + nWindowResults = taosHashGetSize(sup->pResultHashMap); + qDebug("start to build stream notify event block, nWindowEvents: %d, nWindowResults: %d", nWindowEvents, + nWindowResults); + if (nWindowEvents == 0) { + goto _end; + } + + code = blockDataEnsureCapacity(sup->pEventBlock, nWindowEvents); + QUERY_CHECK_CODE(code, lino, _end); + + pEventStrCol = taosArrayGet(sup->pEventBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + QUERY_CHECK_NULL(pEventStrCol, code, lino, _end, terrno); + + for (int32_t i = 0; i < nWindowEvents; ++i) { + SStreamNotifyEvent* pResult = NULL; + SStreamNotifyEvent* pEvent = taosArrayGet(sup->pWindowEvents, i); + char* tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid)); + if (tableName == NULL) { + code = streamNotifyGetDestTableName(pTaskInfo, pEvent->gid, &tableName); + QUERY_CHECK_CODE(code, lino, _end); + code = taosHashPut(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid), tableName, strlen(tableName) + 1); + taosMemoryFreeClear(tableName); + QUERY_CHECK_CODE(code, lino, _end); + tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid)); + QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + if (pEvent->isEnd) { + pResult = taosHashGet(sup->pResultHashMap, &pEvent->gid, sizeof(pEvent->gid) + sizeof(pEvent->skey)); + QUERY_CHECK_NULL(pResult, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + code = streamNotifyFillTableName(tableName, pEvent, pResult, &val); + QUERY_CHECK_CODE(code, lino, _end); + code = colDataSetVal(pEventStrCol, i, val, false); + QUERY_CHECK_CODE(code, lino, _end); + taosMemoryFreeClear(val); + sup->pEventBlock->info.rows++; + } + + if (taosHashGetMemSize(sup->pTableNameHashMap) >= NOTIFY_EVENT_NAME_CACHE_LIMIT_MB * 1024 * 1024) { + taosHashClear(sup->pTableNameHashMap); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (val != NULL) { + taosMemoryFreeClear(val); + } + if (sup != NULL) { + taosArrayClearEx(sup->pWindowEvents, destroyStreamWindowEvent); + taosHashClear(sup->pResultHashMap); + } + return code; } diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c index 45707e670e..44799f193b 100644 --- a/source/libs/executor/src/streamintervalsliceoperator.c +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -55,6 +55,7 @@ void destroyStreamIntervalSliceOperatorInfo(void* param) { pInfo->pOperator = NULL; } + destroyStreamBasicInfo(&pInfo->basic); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); pInfo->pUpdated = NULL; diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index 9ec6063486..4fe8efe397 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -150,6 +150,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) { &pInfo->groupResInfo); pInfo->pOperator = NULL; } + destroyStreamBasicInfo(&pInfo->basic); colDataDestroy(&pInfo->twAggSup.timeWindowData); destroyStreamAggSupporter(&pInfo->streamAggSup); resetPrevAndNextWindow(pInfo->pFillSup); diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index bea9b96215..bfe86aa2ac 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -99,6 +99,8 @@ const char* nodesNodeName(ENodeType type) { return "CountWindow"; case QUERY_NODE_ANOMALY_WINDOW: return "AnomalyWindow"; + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return "StreamNotifyOptions"; case QUERY_NODE_SET_OPERATOR: return "SetOperator"; case QUERY_NODE_SELECT_STMT: @@ -5812,6 +5814,45 @@ static int32_t jsonToStreamOptions(const SJson* pJson, void* pObj) { return code; } +static const char* jkStreamNotifyOptionsAddrUrls = "AddrUrls"; +static const char* jkStreamNotifyOptionsEventType = "EventType"; +static const char* jkStreamNotifyOptionsErrorHandle = "ErrorHandle"; +static const char* jkStreamNotifyOptionsNotifyHistory = "NotifyHistory"; + +static int32_t streamNotifyOptionsToJson(const void* pObj, SJson* pJson) { + const SStreamNotifyOptions* pNotifyOption = (const SStreamNotifyOptions*)pObj; + int32_t code = nodeListToJson(pJson, jkStreamNotifyOptionsAddrUrls, pNotifyOption->pAddrUrls); + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsEventType, pNotifyOption->eventTypes); + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsErrorHandle, pNotifyOption->errorHandle); + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddBoolToObject(pJson, jkStreamNotifyOptionsNotifyHistory, pNotifyOption->notifyHistory); + } + + return code; +} + +static int32_t jsonToStreamNotifyOptions(const SJson* pJson, void* pObj) { + SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pObj; + int32_t code = jsonToNodeList(pJson, jkStreamNotifyOptionsAddrUrls, &pNotifyOption->pAddrUrls); + int32_t val = 0; + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsEventType, &val); + pNotifyOption->eventTypes = val; + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsErrorHandle, &val); + pNotifyOption->errorHandle = val; + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetBoolValue(pJson, jkStreamNotifyOptionsNotifyHistory, &pNotifyOption->notifyHistory); + } + return code; +} + static const char* jkWhenThenWhen = "When"; static const char* jkWhenThenThen = "Then"; @@ -7207,6 +7248,7 @@ static const char* jkCreateStreamStmtOptions = "Options"; static const char* jkCreateStreamStmtQuery = "Query"; static const char* jkCreateStreamStmtTags = "Tags"; static const char* jkCreateStreamStmtSubtable = "Subtable"; +static const char* jkCreateStreamStmtNotifyOptions = "NotifyOptions"; static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) { const SCreateStreamStmt* pNode = (const SCreateStreamStmt*)pObj; @@ -7233,6 +7275,9 @@ static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkCreateStreamStmtSubtable, nodeToJson, pNode->pSubtable); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkCreateStreamStmtNotifyOptions, nodeToJson, pNode->pNotifyOptions); + } return code; } @@ -7262,6 +7307,9 @@ static int32_t jsonToCreateStreamStmt(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkCreateStreamStmtSubtable, &pNode->pSubtable); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkCreateStreamStmtNotifyOptions, (SNode**)&pNode->pNotifyOptions); + } return code; } @@ -8029,6 +8077,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return countWindowNodeToJson(pObj, pJson); case QUERY_NODE_ANOMALY_WINDOW: return anomalyWindowNodeToJson(pObj, pJson); + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return streamNotifyOptionsToJson(pObj, pJson); case QUERY_NODE_SET_OPERATOR: return setOperatorToJson(pObj, pJson); case QUERY_NODE_SELECT_STMT: @@ -8402,6 +8452,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToCountWindowNode(pJson, pObj); case QUERY_NODE_ANOMALY_WINDOW: return jsonToAnomalyWindowNode(pJson, pObj); + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return jsonToStreamNotifyOptions(pJson, pObj); case QUERY_NODE_SET_OPERATOR: return jsonToSetOperator(pJson, pObj); case QUERY_NODE_SELECT_STMT: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 3d4df385f7..ae5b302d2d 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -467,6 +467,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { case QUERY_NODE_WINDOW_OFFSET: code = makeNode(type, sizeof(SWindowOffsetNode), &pNode); break; + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + code = makeNode(type, sizeof(SStreamNotifyOptions), &pNode); + break; case QUERY_NODE_SET_OPERATOR: code = makeNode(type, sizeof(SSetOperator), &pNode); break; @@ -1267,6 +1270,11 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pAround->pTimepoint); break; } + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: { + SStreamNotifyOptions* pNotifyOptions = (SStreamNotifyOptions*)pNode; + nodesDestroyList(pNotifyOptions->pAddrUrls); + break; + } case QUERY_NODE_SET_OPERATOR: { SSetOperator* pStmt = (SSetOperator*)pNode; nodesDestroyList(pStmt->pProjectionList); @@ -1479,6 +1487,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pStmt->pQuery); nodesDestroyList(pStmt->pTags); nodesDestroyNode(pStmt->pSubtable); + nodesDestroyNode((SNode*)pStmt->pNotifyOptions); tFreeSCMCreateStreamReq(pStmt->pReq); taosMemoryFreeClear(pStmt->pReq); break; diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index dc9986ad04..387bccf358 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -296,8 +296,12 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con SNode* createStreamOptions(SAstCreateContext* pCxt); SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptionsSetFlag setflag, SToken* pToken, SNode* pNode); +SNode* createStreamNotifyOptions(SAstCreateContext *pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes); +SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag, + SToken* pToken); SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable, - SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols); + SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols, + SNode* pNotifyOptions); SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createPauseStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createResumeStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, bool ignoreUntreated, SToken* pStreamName); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 7f383afe48..439af13d71 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -785,7 +785,7 @@ full_view_name(A) ::= db_name(B) NK_DOT view_name(C). /************************************************ create/drop stream **************************************************/ cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO full_table_name(C) col_list_opt(H) tag_def_or_ref_opt(F) subtable_opt(G) - AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H); } + AS query_or_subquery(D) notify_opt(I). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H, I); } cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); } cmd ::= PAUSE STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createPauseStreamStmt(pCxt, A, &B); } cmd ::= RESUME STREAM exists_opt(A) ignore_opt(C) stream_name(B). { pCxt->pRootNode = createResumeStreamStmt(pCxt, A, C, &B); } @@ -832,6 +832,26 @@ subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP. ignore_opt(A) ::= . { A = false; } ignore_opt(A) ::= IGNORE UNTREATED. { A = true; } +notify_opt(A) ::= . { A = NULL; } +notify_opt(A) ::= notify_def(B). { A = B; } + +notify_def(A) ::= NOTIFY NK_LP url_def_list(B) NK_RP ON NK_LP event_def_list(C) NK_RP. { A = createStreamNotifyOptions(pCxt, B, C); } +notify_def(A) ::= notify_def(B) ON_FAILURE DROP(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); } +notify_def(A) ::= notify_def(B) ON_FAILURE PAUSE(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); } +notify_def(A) ::= notify_def(B) NOTIFY_HISTORY NK_INTEGER(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_NOTIFY_HISTORY_SET, &C); } + +%type url_def_list { SNodeList* } +%destructor url_def_list { nodesDestroyList($$); } +url_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); } +url_def_list(A) ::= url_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); } + +%type event_def_list { SNodeList* } +%destructor event_def_list { nodesDestroyList($$); } +event_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); } +event_def_list(A) ::= event_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); } + + + /************************************************ kill connection/query ***********************************************/ cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); } cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 5b90fd601e..c875cbad05 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1526,8 +1526,8 @@ SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhe pCaseWhen->pCase = pCase; pCaseWhen->pWhenThenList = pWhenThenList; pCaseWhen->pElse = pElse; - pCaseWhen->tz = pCxt->pQueryCxt->timezone; - pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt; + pCaseWhen->tz = pCxt->pQueryCxt->timezone; + pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt; return (SNode*)pCaseWhen; _err: nodesDestroyNode(pCase); @@ -3657,8 +3657,115 @@ SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptions return pOptions; } +static bool validateNotifyUrl(const char* url) { + const char* prefix[] = {"http://", "https://", "ws://", "wss://"}; + const char* host = NULL; + + if (!url || *url == '\0') return false; + + for (int32_t i = 0; i < ARRAY_SIZE(prefix); ++i) { + if (strncasecmp(url, prefix[i], strlen(prefix[i])) == 0) { + host = url + strlen(prefix[i]); + break; + } + } + + return (host != NULL) && (*host != '\0') && (*host != '/'); +} + +SNode* createStreamNotifyOptions(SAstCreateContext* pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes) { + SNode* pNode = NULL; + EStreamNotifyEventType eventTypes = 0; + const char* eWindowOpenStr = "WINDOW_OPEN"; + const char* eWindowCloseStr = "WINDOW_CLOSE"; + + CHECK_PARSER_STATUS(pCxt); + + if (LIST_LENGTH(pAddrUrls) == 0) { + pCxt->errCode = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "notification address cannot be empty"); + goto _err; + } + + FOREACH(pNode, pAddrUrls) { + char *url = ((SValueNode*)pNode)->literal; + if (strlen(url) >= TSDB_STREAM_NOTIFY_URL_LEN) { + pCxt->errCode = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "notification address \"%s\" exceed maximum length %d", url, TSDB_STREAM_NOTIFY_URL_LEN); + goto _err; + } + if (!validateNotifyUrl(url)) { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "invalid notification address \"%s\"", url); + goto _err; + } + } + + if (LIST_LENGTH(pEventTypes) == 0) { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "event types must be specified for notification"); + goto _err; + } + + FOREACH(pNode, pEventTypes) { + char *eventStr = ((SValueNode *)pNode)->literal; + if (strncasecmp(eventStr, eWindowOpenStr, strlen(eWindowOpenStr) + 1) == 0) { + BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_OPEN); + } else if (strncasecmp(eventStr, eWindowCloseStr, strlen(eWindowCloseStr) + 1) == 0) { + BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE); + } else { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "invalid event type '%s' for notification", eventStr); + goto _err; + } + } + + SStreamNotifyOptions* pNotifyOptions = NULL; + pCxt->errCode = nodesMakeNode(QUERY_NODE_STREAM_NOTIFY_OPTIONS, (SNode**)&pNotifyOptions); + CHECK_MAKE_NODE(pNotifyOptions); + pNotifyOptions->pAddrUrls = pAddrUrls; + pNotifyOptions->eventTypes = eventTypes; + pNotifyOptions->errorHandle = SNOTIFY_ERROR_HANDLE_PAUSE; + pNotifyOptions->notifyHistory = false; + nodesDestroyList(pEventTypes); + return (SNode*)pNotifyOptions; +_err: + nodesDestroyList(pAddrUrls); + nodesDestroyList(pEventTypes); + return NULL; +} + +SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag, + SToken* pToken) { + CHECK_PARSER_STATUS(pCxt); + + SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pNode; + if (BIT_FLAG_TEST_MASK(pNotifyOption->setFlag, setFlag)) { + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "stream notify options each item can only be set once"); + goto _err; + } + switch (setFlag) { + case SNOTIFY_OPT_ERROR_HANDLE_SET: + pNotifyOption->errorHandle = (pToken->type == TK_DROP) ? SNOTIFY_ERROR_HANDLE_DROP : SNOTIFY_ERROR_HANDLE_PAUSE; + break; + case SNOTIFY_OPT_NOTIFY_HISTORY_SET: + pNotifyOption->notifyHistory = taosStr2Int8(pToken->z, NULL, 10); + break; + default: + break; + } + BIT_FLAG_SET_MASK(pNotifyOption->setFlag, setFlag); + return pNode; +_err: + nodesDestroyNode(pNode); + return NULL; +} + SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable, - SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols) { + SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols, + SNode* pNotifyOptions) { CHECK_PARSER_STATUS(pCxt); CHECK_NAME(checkStreamName(pCxt, pStreamName)); SCreateStreamStmt* pStmt = NULL; @@ -3674,6 +3781,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken pStmt->pTags = pTags; pStmt->pSubtable = pSubtable; pStmt->pCols = pCols; + pStmt->pNotifyOptions = (SStreamNotifyOptions*)pNotifyOptions; return (SNode*)pStmt; _err: nodesDestroyNode(pRealTable); @@ -3682,6 +3790,7 @@ _err: nodesDestroyList(pTags); nodesDestroyNode(pSubtable); nodesDestroyList(pCols); + nodesDestroyNode(pNotifyOptions); return NULL; } diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index ea2e9d712f..7ed438a7dc 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -355,6 +355,9 @@ static SKeyword keywordTable[] = { {"FORCE_WINDOW_CLOSE", TK_FORCE_WINDOW_CLOSE}, {"DISK_INFO", TK_DISK_INFO}, {"AUTO", TK_AUTO}, + {"NOTIFY", TK_NOTIFY}, + {"ON_FAILURE", TK_ON_FAILURE}, + {"NOTIFY_HISTORY", TK_NOTIFY_HISTORY}, }; // clang-format on diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 1d87b83e62..74dd1be614 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -12192,6 +12192,45 @@ static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt return TSDB_CODE_SUCCESS; } +static int32_t buildStreamNotifyOptions(STranslateContext* pCxt, SStreamNotifyOptions* pNotifyOptions, + SCMCreateStreamReq* pReq) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pNode = NULL; + + if (pNotifyOptions == NULL || pNotifyOptions->pAddrUrls->length == 0) { + return code; + } + + pReq->pNotifyAddrUrls = taosArrayInit(pNotifyOptions->pAddrUrls->length, POINTER_BYTES); + if (pReq->pNotifyAddrUrls != NULL) { + FOREACH(pNode, pNotifyOptions->pAddrUrls) { + char *url = taosStrndup(((SValueNode*)pNode)->literal, TSDB_STREAM_NOTIFY_URL_LEN); + if (url == NULL) { + code = terrno; + break; + } + if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) { + code = terrno; + taosMemoryFreeClear(url); + break; + } + } + } else { + code = terrno; + } + + if (code == TSDB_CODE_SUCCESS) { + pReq->notifyEventTypes = pNotifyOptions->eventTypes; + pReq->notifyErrorHandle = pNotifyOptions->errorHandle; + pReq->notifyHistory = pNotifyOptions->notifyHistory; + } else { + taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL); + pReq->pNotifyAddrUrls = NULL; + } + + return code; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -12238,6 +12277,10 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } } + if (TSDB_CODE_SUCCESS == code) { + code = buildStreamNotifyOptions(pCxt, pStmt->pNotifyOptions, pReq); + } + return code; } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 42d7f44b62..baf36d0453 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -735,7 +735,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S !alreadyAddGroupId(pDataBlock->info.parTbName, groupId) && groupId != 0) { if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) { code = buildCtbNameAddGroupId(NULL, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName)); - } else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER) { + } else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER) { code = buildCtbNameAddGroupId(pTask->outputInfo.shuffleDispatcher.stbFullName, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName)); } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 0de256d86d..dde7b197c4 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -198,6 +198,7 @@ int32_t streamMetaCheckBackendCompatible(SStreamMeta* pMeta) { SCheckpointInfo info; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) { + tDecoderClear(&decoder); continue; } @@ -1031,6 +1032,7 @@ int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta) { SCheckpointInfo info; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) { + tDecoderClear(&decoder); continue; } tDecoderClear(&decoder); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index d27ed520c6..5ee8bd43f5 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -326,6 +326,11 @@ void tFreeStreamTask(void* pParam) { streamTaskDestroyActiveChkptInfo(pTask->chkInfo.pActiveInfo); pTask->chkInfo.pActiveInfo = NULL; + taosArrayDestroyP(pTask->notifyInfo.pNotifyAddrUrls, NULL); + taosMemoryFreeClear(pTask->notifyInfo.streamName); + taosMemoryFreeClear(pTask->notifyInfo.stbFullName); + tDeleteSchemaWrapper(pTask->notifyInfo.pSchemaWrapper); + taosMemoryFree(pTask); stDebug("s-task:0x%x free task completed", taskId); } @@ -1318,6 +1323,78 @@ void streamTaskFreeRefId(int64_t* pRefId) { metaRefMgtRemove(pRefId); } +static int32_t tEncodeStreamNotifyInfo(SEncoder* pEncoder, const SNotifyInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pEncoder, code, lino, _exit, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA); + + int32_t addrSize = taosArrayGetSize(info->pNotifyAddrUrls); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, addrSize)); + for (int32_t i = 0; i < addrSize; ++i) { + const char* url = taosArrayGetP(info->pNotifyAddrUrls, i); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, url)); + } + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyEventTypes)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyErrorHandle)); + if (addrSize > 0) { + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->streamName)); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->stbFullName)); + TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, info->pSchemaWrapper)); + } + +_exit: + if (code != TSDB_CODE_SUCCESS) { + stError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t tDecodeStreamNotifyInfo(SDecoder* pDecoder, SNotifyInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pDecoder, code, lino, _exit, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA); + + int32_t addrSize = 0; + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &addrSize)); + info->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES); + QUERY_CHECK_NULL(info->pNotifyAddrUrls, code, lino, _exit, terrno); + for (int32_t i = 0; i < addrSize; ++i) { + char *url = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &url)); + url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN); + QUERY_CHECK_NULL(url, code, lino, _exit, terrno); + if (taosArrayPush(info->pNotifyAddrUrls, &url) == NULL) { + taosMemoryFree(url); + TAOS_CHECK_EXIT(terrno); + } + } + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyEventTypes)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyErrorHandle)); + if (addrSize > 0) { + char* name = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name)); + info->streamName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1); + QUERY_CHECK_NULL(info->streamName, code, lino, _exit, terrno); + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name)); + info->stbFullName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1); + QUERY_CHECK_NULL(info->stbFullName, code, lino, _exit, terrno); + info->pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); + if (info->pSchemaWrapper == NULL) { + TAOS_CHECK_EXIT(terrno); + } + TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, info->pSchemaWrapper)); + } + +_exit: + if (code != TSDB_CODE_SUCCESS) { + stError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { int32_t code = 0; @@ -1388,6 +1465,10 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5)); TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1)); + if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) { + TAOS_CHECK_EXIT(tEncodeStreamNotifyInfo(pEncoder, &pTask->notifyInfo)); + } + tEndEncode(pEncoder); _exit: return code; @@ -1486,8 +1567,12 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { } TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve)); + if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) { + TAOS_CHECK_EXIT(tDecodeStreamNotifyInfo(pDecoder, &pTask->notifyInfo)); + } + tEndDecode(pDecoder); _exit: return code; -} \ No newline at end of file +} diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 4f5ca8d789..03ef00a0c0 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -1490,3 +1490,32 @@ bool taosAssertRelease(bool condition) { return true; } #endif + +char* u64toaFastLut(uint64_t val, char* buf) { + static const char* lut = + "0001020304050607080910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455" + "5657585960616263646566676869707172737475767778798081828384858687888990919293949596979899"; + + char temp[24]; + char* p = temp; + + while (val >= 100) { + strncpy(p, lut + (val % 100) * 2, 2); + val /= 100; + p += 2; + } + + if (val >= 10) { + strncpy(p, lut + val * 2, 2); + p += 2; + } else if (val > 0 || p == temp) { + *(p++) = val + '0'; + } + + while (p != temp) { + *buf++ = *--p; + } + + *buf = '\0'; + return buf; +} From dd2a8ecf22328e4be38ccaf4144909882bf0e4b9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 22 Jan 2025 10:59:31 +0800 Subject: [PATCH 118/120] fix:[TD-33556] tmq close elegantly to avoid invalid read in TD-32585 --- source/client/src/clientTmq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 4d24548a31..007a23720c 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1649,7 +1649,6 @@ void tmqMgmtClose(void) { tmqMgmt.rsetId = -1; } (void)taosThreadMutexUnlock(&tmqMgmt.lock); - (void)taosThreadMutexDestroy(&tmqMgmt.lock); } tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { From 90879f97d4b9a063f1b16415c66c4c07a157cf9c Mon Sep 17 00:00:00 2001 From: xiao-77 Date: Wed, 22 Jan 2025 13:48:09 +0800 Subject: [PATCH 119/120] Fix ci problems. --- source/common/src/tglobal.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index f1fca5769b..83b1845fd4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -865,9 +865,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * RPC_MEMORY_USAGE_RATIO * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncSnapReplMaxWaitN", tsSnapReplMaxWaitN, 16, (TSDB_SYNC_SNAP_BUFFER_SIZE >> 2), CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "syncLogBufferMemoryAllowed", tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); From dfe8aa076ddf88661485426f1b1fcd35fb46ca09 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Wed, 22 Jan 2025 15:09:17 +0800 Subject: [PATCH 120/120] docs: fix typos of description of maximum/minimum --- docs/en/14-reference/02-tools/10-taosbenchmark.md | 2 +- docs/zh/14-reference/02-tools/10-taosbenchmark.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index d1a18b5d1c..1b9e148d6f 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -403,7 +403,7 @@ Specify the configuration parameters for tag and data columns in `super_tables` - **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value. -- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value. +- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value. - **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on. diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index 44dab0ad5f..2b60288385 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -319,7 +319,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 - **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。 -- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 +- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最大值。 - **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。