diff --git a/.gitignore b/.gitignore index 08e3d57717..f8b42f9176 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -build/ +*build/ compile_commands.json CMakeSettings.json .cache @@ -132,3 +132,5 @@ tools/taos-tools tools/taosws-rs tags .clangd +*CMakeCache* +*CMakeFiles* diff --git a/README-CN.md b/README-CN.md index 2b1790f4bb..4931c0177e 100644 --- a/README-CN.md +++ b/README-CN.md @@ -45,7 +45,7 @@ TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。 TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。 -为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.0.2 或者更高版本。 +为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。 ## 安装工具 @@ -124,7 +124,7 @@ brew install argp-standalone gflags pkgconfig TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。 -请使用 1.14 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。 +请使用 1.20 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。 ``` go env -w GO111MODULE=on diff --git a/README.md b/README.md index a8c20ea3f6..31d3a8bf67 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ You can choose to install through source code, [container](https://docs.tdengine TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. -To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in the project directory. +To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. ## Install build tools @@ -131,7 +131,7 @@ brew install argp-standalone gflags pkgconfig TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup. -Please use version 1.14+. For the user in China, we recommend using a proxy to accelerate package downloading. +Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading. ``` go env -w GO111MODULE=on diff --git a/cmake/cmake.define b/cmake/cmake.define index 73f9497809..12e1b50539 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0) -set(CMAKE_VERBOSE_MAKEFILE TRUE) +set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) #set output directory diff --git a/cmake/cmake.version b/cmake/cmake.version index e9cf31e75b..0e35fa316f 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.2.2.0.alpha") + SET(TD_VER_NUMBER "3.2.3.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index af7f005dda..197d978fb7 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -1,6 +1,7 @@ # curl ExternalProject_Add(curl2 - URL https://curl.se/download/curl-8.2.1.tar.gz + URL https://github.com/curl/curl/releases/download/curl-8_2_1/curl-8.2.1.tar.gz + #URL https://curl.se/download/curl-8.2.1.tar.gz URL_HASH MD5=b25588a43556068be05e1624e0e74d41 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index ef6ed4af1d..13826a1a74 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/01-index.md b/docs/en/01-index.md index 9fe5e7800a..54271659ec 100644 --- a/docs/en/01-index.md +++ b/docs/en/01-index.md @@ -19,7 +19,9 @@ TDengine uses ubiquitous SQL as its query language, which greatly reduces learni If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section. -If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter. +If you want to know more about TDengine tools and the REST API, please see the [Reference](./reference) chapter. + +For information about connecting to TDengine with different programming languages, see [Client Libraries](./client-libraries/). If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index f9fe68b47a..28b94a5236 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -37,27 +37,26 @@ The major features are listed below: - Provides an interactive [Command Line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries. - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine. 10. Programming - - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages. + - Provides [client libraries](../client-libraries/) for [C/C++](../client-libraries/cpp), [Java](../client-libraries/java), [Python](../client-libraries/python), [Go](../client-libraries/go), [Rust](../client-libraries/rust), [Node.js](../client-libraries/node) and other programming languages. - Provides a [REST API](../reference/rest-api/). For more details on features, please read through the entire documentation. ## Competitive Advantages -By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages. +By making full use of [characteristics of time series data](https://tdengine.com/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases with the following advantages. -- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. +- **[High-Performance](https://tdengine.com/high-performance/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. -- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. +- **[Simplified Solution](https://tdengine.com/comprehensive-industrial-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds. +- **[Cloud Native](https://tdengine.com/cloud-native/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds. -- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[ - ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. +- **[Ease of Use](https://tdengine.com/easy-to-use/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. -- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. +- **[Easy Data Analytics](https://tdengine.com/simplifying-time-series-analysis-for-data-scientists/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. -- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide. +- **[Open Source](https://tdengine.com/open-source/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 22k stars on GitHub. There is an active developer community, and over 400k running instances worldwide. With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. @@ -125,16 +124,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine - [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/) - [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/) -- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/) -- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/) - -## More readings -- [Introduction to Time-Series Database](https://tdengine.com/tsdb/) -- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/) - ## Products -There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to -- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro) -- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) +For information about our paid offerings, see: +- [TDengine Enterprise](https://tdengine.com/enterprise/) +- [TDengine Cloud](https://cloud.tdengine.com) \ No newline at end of file diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 723194a325..761faf4a05 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -30,7 +30,7 @@ And then run the following command: docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine ``` -Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed. +Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connections. You can open these ports as needed. If you need to persist data to a specific directory on your local machine, please run the following command: ```shell diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index d34df2c970..c6f3e60932 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -14,9 +14,9 @@ This document describes how to install TDengine on Linux/Windows/macOS and perfo - To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). - If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine). -The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter). +The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to client libraries for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter). -The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector. +The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ client library. TDengine OSS is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS. diff --git a/docs/en/05-get-started/_pkg_install.mdx b/docs/en/05-get-started/_pkg_install.mdx index 2372d2ff26..768cc7cd74 100644 --- a/docs/en/05-get-started/_pkg_install.mdx +++ b/docs/en/05-get-started/_pkg_install.mdx @@ -2,7 +2,7 @@ import PkgList from "/components/PkgList"; TDengine is easy to download and install. -The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector. +The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ client library. You can download the TDengine installation package in .rpm, .deb, or .tar.gz format. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. @@ -10,7 +10,7 @@ Between official releases, beta versions may be released that contain new featur -For information about installing TDengine, see [Install and Uninstall](../../operation/pkg-install). +For information about installing TDengine, see [Install and Uninstall](../operation/pkg-install). For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads) diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index 5c4ab59f75..697f98af15 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -12,7 +12,7 @@ import StackOverflowSVG from './stackoverflow.svg' You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud. -The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter). +The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to client libraries for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter). ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/07-develop/01-connect/_connect_java.mdx b/docs/en/07-develop/01-connect/_connect_java.mdx index fda86f2221..4d29e24911 100644 --- a/docs/en/07-develop/01-connect/_connect_java.mdx +++ b/docs/en/07-develop/01-connect/_connect_java.mdx @@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si {{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} ``` -More configuration about connection, please refer to [Java Connector](../../reference/connector/java) +More configuration about connection, please refer to [Java Client Library](../../client-libraries/java) diff --git a/docs/en/07-develop/01-connect/_connect_rust.mdx b/docs/en/07-develop/01-connect/_connect_rust.mdx index 80ac1f4ff4..5746968263 100644 --- a/docs/en/07-develop/01-connect/_connect_rust.mdx +++ b/docs/en/07-develop/01-connect/_connect_rust.mdx @@ -3,6 +3,6 @@ ``` :::note -For Rust connector, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged. +For Rust client library, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged. ::: diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 11375bd060..2f826920ad 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -1,7 +1,7 @@ --- title: Connect to TDengine sidebar_label: Connect -description: This document describes how to establish connections to TDengine and how to install and use TDengine connectors. +description: This document describes how to establish connections to TDengine and how to install and use TDengine client libraries. --- import Tabs from "@theme/Tabs"; @@ -15,28 +15,28 @@ import ConnCSNative from "./_connect_cs.mdx"; import ConnC from "./_connect_c.mdx"; import ConnR from "./_connect_r.mdx"; import ConnPHP from "./_connect_php.mdx"; -import InstallOnLinux from "../../14-reference/03-connector/_linux_install.mdx"; -import InstallOnWindows from "../../14-reference/03-connector/_windows_install.mdx"; -import InstallOnMacOS from "../../14-reference/03-connector/_macos_install.mdx"; -import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx"; -import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; -import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx"; +import InstallOnLinux from "../../08-client-libraries/_linux_install.mdx"; +import InstallOnWindows from "../../08-client-libraries/_windows_install.mdx"; +import InstallOnMacOS from "../../08-client-libraries/_macos_install.mdx"; +import VerifyLinux from "../../08-client-libraries/_verify_linux.mdx"; +import VerifyWindows from "../../08-client-libraries/_verify_windows.mdx"; +import VerifyMacOS from "../../08-client-libraries/_verify_macos.mdx"; -Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. +Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the client libraries for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These client libraries support connecting to TDengine clusters using both native interfaces (taosc). Some client libraries also support connecting over a REST interface. Community developers have also contributed several unofficial client libraries, such as the ADO.NET, Lua, and PHP libraries. ## Establish Connection -There are two ways for a connector to establish connections to TDengine: +There are two ways for a client library to establish connections to TDengine: 1. REST connection through the REST API provided by the taosAdapter component. 2. Native connection through the TDengine client driver (taosc). -For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users. +For REST and native connections, client libraries provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users. Key differences: 3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. -1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../reference/connector/cpp#parameter-binding-api), [Subscription](../../reference/connector/cpp#subscription-and-consumption-api), etc. +1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../client-libraries/cpp#parameter-binding-api), [Subscription](../../client-libraries/cpp#subscription-and-consumption-api), etc. ## Install Client Driver taosc @@ -72,7 +72,7 @@ After the above installation and configuration are done and making sure TDengine -## Install Connectors +## Install Client Library @@ -131,7 +131,7 @@ libtaos = { version = "0.4.2"} ``` :::info -Rust connector uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature. +Rust client library uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature. ```toml libtaos = { version = "*", features = ["rest"] } @@ -142,9 +142,9 @@ libtaos = { version = "*", features = ["rest"] } -Node.js connector provides different ways of establishing connections by providing different packages. +Node.js client library provides different ways of establishing connections by providing different packages. -1. Install Node.js Native Connector +1. Install Node.js Native Client Library ``` npm install @tdengine/client @@ -154,7 +154,7 @@ npm install @tdengine/client It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`. ::: -2. Install Node.js REST Connector +2. Install Node.js REST Client Library ``` npm install @tdengine/rest @@ -207,7 +207,7 @@ install.packages("RJDBC") -If the client driver (taosc) is already installed, then the C connector is already available. +If the client driver (taosc) is already installed, then the C client library is already available.
diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index 1e719283cc..b9ec36e3ac 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -24,7 +24,7 @@ import PhpStmt from "./_php_stmt.mdx"; ## Introduction -Application programs can execute `INSERT` statement through connectors to insert rows. The TDengine CLI can also be used to manually insert data. +Application programs can execute `INSERT` statement through client libraries to insert rows. The TDengine CLI can also be used to manually insert data. ### Insert Single Row The below SQL statement is used to insert one row into table "d1001". diff --git a/docs/en/07-develop/03-insert-data/60-high-volume.md b/docs/en/07-develop/03-insert-data/60-high-volume.md index c15b3213bb..8e9a788d22 100644 --- a/docs/en/07-develop/03-insert-data/60-high-volume.md +++ b/docs/en/07-develop/03-insert-data/60-high-volume.md @@ -377,7 +377,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please - TDengine client driver has been installed - Python3 has been installed, the the version >= 3.8 - - TDengine Python connector `taospy` has been installed + - TDengine Python client library `taospy` has been installed 2. Install faster-fifo to replace python builtin multiprocessing.Queue @@ -434,7 +434,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please :::note -Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue. +Don't establish connection to TDengine in the parent process if using Python client library in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue. ::: diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index 70b1140748..e44161d397 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- title: Query Data -description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using connectors. +description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using client libraries. --- import Tabs from "@theme/Tabs"; @@ -19,7 +19,7 @@ import CAsync from "./_c_async.mdx"; ## Introduction -SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or client libraries. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - Query on single column or multiple columns - Filter on tags or data columns: >, <, =, <\>, like diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index b920d61a59..281e9e6750 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces. +The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces(you can learn it when you need it). - A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; - A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; - On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; @@ -45,12 +45,13 @@ The following are some explanations about data subscription, which require some This document does not provide any further introduction to the knowledge of message queues themselves. If you need to know more, please search for it yourself. +Note: Starting from version 3.2.0.0, data subscription supports vnode migration and splitting. Due to the dependence of data subscription on wal files, wal does not synchronize during vnode migration and splitting. Therefore, after migration or splitting, wal data that has not been consumed before cannot be consumed. So please ensure that all data has been consumed before proceeding with vnode migration or splitting, otherwise data loss may occur during consumption. ## Data Schema and API -The related schemas and APIs in various languages are described as follows: +The related schemas and APIs in various languages are described as follows(Note that the consumer structure is not thread safe. When using a consumer on one thread, do not close the consumer on another thread): diff --git a/docs/en/07-develop/index.md b/docs/en/07-develop/index.md index 4ed5e8c19f..da020d53d5 100644 --- a/docs/en/07-develop/index.md +++ b/docs/en/07-develop/index.md @@ -5,7 +5,7 @@ description: This document describes how to use the various components of TDengi Before creating an application to process time-series data with TDengine, consider the following: -1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages. +1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has client libraries for a variety of languages. 2. Design the data model based on your own use cases. Consider the main [concepts](../concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data. 3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. 4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL. @@ -14,7 +14,7 @@ Before creating an application to process time-series data with TDengine, consid 7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. 8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. -This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](../reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/). +This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each client library, please read the [Client Library Reference Guide](../client-libraries/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/). If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away. diff --git a/docs/en/14-reference/03-connector/03-cpp.mdx b/docs/en/08-client-libraries/03-cpp.mdx similarity index 97% rename from docs/en/14-reference/03-connector/03-cpp.mdx rename to docs/en/08-client-libraries/03-cpp.mdx index 27adb58c12..80014ef3bf 100644 --- a/docs/en/14-reference/03-connector/03-cpp.mdx +++ b/docs/en/08-client-libraries/03-cpp.mdx @@ -1,10 +1,10 @@ --- -title: C/C++ Connector +title: C/C++ Client Library sidebar_label: C/C++ -description: This document describes the TDengine C/C++ connector. +description: This document describes the TDengine C/C++ client library. --- -C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. +C/C++ developers can use TDengine's client driver and the C/C++ client library, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ client library you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. ```c #include @@ -263,7 +263,7 @@ typedef struct taosField { Get the reason for the last API call failure. The return value is the error code. :::note -TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. +TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C client library can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. ::: @@ -394,7 +394,7 @@ The specific functions related to the interface are as follows (see also the [pr ### Schemaless Writing API -In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../schemaless/), and the C/C++ API used with it is described here. +In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../reference/schemaless/), and the C/C++ API used with it is described here. - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/08-client-libraries/04-java.mdx similarity index 96% rename from docs/en/14-reference/03-connector/04-java.mdx rename to docs/en/08-client-libraries/04-java.mdx index 039395cc30..9feab378e0 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/08-client-libraries/04-java.mdx @@ -1,16 +1,16 @@ --- -title: TDengine Java Connector +title: TDengine Java Client Library sidebar_label: Java -description: This document describes the TDengine Java Connector. +description: This document describes the TDengine Java client library. toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -`taos-jdbcdriver` is the official Java connector for TDengine. Java developers can use it to develop applications that access data in TDengine. `taos-jdbcdriver` implements standard JDBC driver interfaces and two connection methods: One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The second is **REST connection** which is implemented through taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. +`taos-jdbcdriver` is the official Java client library for TDengine. Java developers can use it to develop applications that access data in TDengine. `taos-jdbcdriver` implements standard JDBC driver interfaces and two connection methods: One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The second is **REST connection** which is implemented through taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. -![TDengine Database Connector Java](tdengine-jdbc-connector.webp) +![TDengine Java client library](tdengine-jdbc-connector.webp) The preceding figure shows the two ways in which a Java application can access TDengine. @@ -72,7 +72,7 @@ try (Statement statement = connection.createStatement()) { } ``` -There are four types of error codes that the JDBC connector can report: +There are four types of error codes that the JDBC client library can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2360) @@ -123,7 +123,7 @@ For specific error codes, please refer to. | 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter | | 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism | -- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) +- [TDengine Java client library](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) ## TDengine DataType vs. Java DataType @@ -148,21 +148,21 @@ TDengine currently supports timestamp, number, character, Boolean type, and the **Note**: Only TAG supports JSON types Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead. -GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../../taos-sql/data-type/) -For WKB specifications, please refer to [Well Known Binary (WKB)]( https://libgeos.org/specifications/wkb/ ) -For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) +GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../taos-sql/data-type/) +For WKB specifications, please refer to [Well Known Binary (WKB)](https://libgeos.org/specifications/wkb/) +For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) ## Installation Steps ### Pre-installation preparation -Before using Java Connector to connect to the database, the following conditions are required. +Before using Java client library to connect to the database, the following conditions are required. - Java 1.8 or above runtime environment and Maven 3.6 or above installed - TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](../#Install-Client-Driver) -### Install the connectors +### Install the client library @@ -186,7 +186,7 @@ Add following dependency in the `pom.xml` file of your Maven project: -You can build Java connector from source code after cloning the TDengine project: +You can build Java client library from source code after cloning the TDengine project: ```shell git clone https://github.com/taosdata/taos-connector-jdbc.git @@ -269,7 +269,7 @@ In the above example, JDBC uses the client's configuration file to establish a c In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally. -The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS. +The configuration file here refers to the configuration file on the machine where the application that calls the JDBC client library is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS. @@ -368,7 +368,7 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. - For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../config/#Client-Only). + For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../reference/config/#Client-Only). ### Priority of configuration parameters @@ -977,7 +977,7 @@ public void setTagGeometry(int index, byte[] value) ### Schemaless Writing -TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../reference/schemaless/). @@ -1057,7 +1057,7 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO ### Data Subscription -The TDengine Java Connector supports subscription functionality with the following application API. +The TDengine Java client library supports subscription functionality with the following application API. #### Create a Topic @@ -1094,7 +1094,7 @@ TaosConsumer consumer = new TaosConsumer<>(config); - httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type. - messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type. - httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type. -- For more information, see [Consumer Parameters](../../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0. +- For more information, see [Consumer Parameters](../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0. #### Subscribe to consume data @@ -1172,14 +1172,14 @@ consumer.unsubscribe(); consumer.close() ``` -For more information, see [Data Subscription](../../../develop/tmq). +For more information, see [Data Subscription](../../develop/tmq). #### Full Sample Code -In addition to the native connection, the Java Connector also supports subscribing via websocket. +In addition to the native connection, the Java client library also supports subscribing via websocket. ```java public abstract class ConsumerLoop { @@ -1461,7 +1461,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`: **Solution**: Use taos-jdbcdriver 3.0.2. -For additional troubleshooting, see [FAQ](../../../train-faq/faq). +For additional troubleshooting, see [FAQ](../../train-faq/faq). ## API Reference diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/08-client-libraries/05-go.mdx similarity index 97% rename from docs/en/14-reference/03-connector/05-go.mdx rename to docs/en/08-client-libraries/05-go.mdx index 33f7a93439..11930e2429 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/08-client-libraries/05-go.mdx @@ -1,20 +1,20 @@ --- -title: TDengine Go Connector +title: TDengine Go Client Library sidebar_label: Go -description: This document describes the TDengine Go connector. +description: This document describes the TDengine Go client library. toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../07-develop/04-query-data/_go.mdx" +import GoInsert from "../07-develop/03-insert-data/_go_sql.mdx" +import GoInfluxLine from "../07-develop/03-insert-data/_go_line.mdx" +import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx" +import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx" +import GoQuery from "../07-develop/04-query-data/_go.mdx" -`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. +`driver-go` is the official Go language client library for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. `driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. @@ -81,7 +81,7 @@ Configure the environment variables and check the command. * ```go env``` * ```gcc -v``` -### Install the connectors +### Install the client library 1. Initialize the project with the `go mod` command. @@ -222,11 +222,11 @@ func main() { ### Specify the URL and Properties to get the connection -The Go connector does not support this feature +The Go client library does not support this feature ### Priority of configuration parameters -The Go connector does not support this feature +The Go client library does not support this feature ## Usage examples @@ -769,7 +769,7 @@ You can get the unique id by `common.GetReqID()`. ### Data Subscription -The TDengine Go Connector supports subscription functionality with the following application API. +The TDengine Go client library supports subscription functionality with the following application API. #### Create a Topic diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/08-client-libraries/06-rust.mdx similarity index 93% rename from docs/en/14-reference/03-connector/06-rust.mdx rename to docs/en/08-client-libraries/06-rust.mdx index 0981df6724..8fa5c946aa 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/08-client-libraries/06-rust.mdx @@ -1,7 +1,7 @@ --- -title: TDengine Rust Connector +title: TDengine Rust Client Library sidebar_label: Rust -description: This document describes the TDengine Rust connector. +description: This document describes the TDengine Rust client library. toc_max_heading_level: 4 --- @@ -9,18 +9,18 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparation.mdx" -import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" -import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx" -import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx" -import RustQuery from "../../07-develop/04-query-data/_rust.mdx" +import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx" +import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx" +import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx" +import RustQuery from "../07-develop/04-query-data/_rust.mdx" [![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) -`taos` is the official Rust connector for TDengine. Rust developers can develop applications to access the TDengine instance data. +`taos` is the official Rust client library for TDengine. Rust developers can develop applications to access the TDengine instance data. `taos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is the **WebSocket connection**, which connects to TDengine instances via the WebSocket interface provided by taosAdapter. You can specify a connection type with Cargo features. By default, both types are supported. The Websocket connection can be used on any platform. The native connection can be used on any platform that the TDengine Client supports. -The source code for the Rust connectors is located on [GitHub](https://github.com/taosdata/taos-connector-rust). +The source code for the Rust client library is located on [GitHub](https://github.com/taosdata/taos-connector-rust). ## Supported platforms @@ -37,7 +37,7 @@ Websocket connections are supported on all platforms that can run Go. | v0.7.6 | 3.0.3.0 | Support req_id in query. | | v0.6.0 | 3.0.0.0 | Base features. | -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. +The Rust client library is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. ## Handling exceptions @@ -82,7 +82,7 @@ Note: Only TAG supports JSON types * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](../#install-client-driver) -### Install the connectors +### Install the client library Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: @@ -154,8 +154,8 @@ The structure of the DSN description string is as follows: The parameters are described as follows: -- **driver**: Specify a driver name so that the connector can choose which method to use to establish the connection. Supported driver names are as follows: - - **taos**: Table names use the TDengine connector driver. +- **driver**: Specify a driver name so that the client library can choose which method to use to establish the connection. Supported driver names are as follows: + - **taos**: Table names use the TDengine native connection driver. - **tmq**: Use the TMQ to subscribe to data. - **http/ws**: Use Websocket to establish connections. - **https/wss**: Use Websocket to establish connections, and enable SSL/TLS. @@ -327,7 +327,7 @@ Parameter binding details see [API Reference](#stmt-api) ### Schemaless Writing -TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../reference/schemaless/). @@ -347,7 +347,7 @@ client.put(&sml_data)? ### Data Subscription -TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). +TDengine starts subscriptions through [TMQ](../../taos-sql/tmq/). #### Create a Topic @@ -361,7 +361,7 @@ taos.exec_many([ #### Create a Consumer -You create a TMQ connector by using a DSN. +You create a TMQ connection by using a DSN. ```rust let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; @@ -442,7 +442,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m - `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. - `client.id`: Subscriber client ID. -- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](https://docs.tdengine.com/develop/tmq/). Note: This parameter is set per consumer group. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](../../develop/tmq/). Note: This parameter is set per consumer group. - `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. - `auto.commit.interval.ms`: Interval for automatic commits. @@ -488,7 +488,7 @@ The source code of the sample application is under `TDengine/examples/rust` : ## Frequently Asked Questions -For additional troubleshooting, see [FAQ](../../../train-faq/faq). +For additional troubleshooting, see [FAQ](../../train-faq/faq). ## API Reference diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/08-client-libraries/07-python.mdx similarity index 91% rename from docs/en/14-reference/03-connector/07-python.mdx rename to docs/en/08-client-libraries/07-python.mdx index ccc270d3be..4a06c42c12 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/08-client-libraries/07-python.mdx @@ -1,20 +1,20 @@ --- -title: TDengine Python Connector +title: TDengine Python Client Library sidebar_label: Python -description: This document describes taospy, the TDengine Python connector. +description: This document describes taospy, the TDengine Python client library. --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. +`taospy` is the official Python client library for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). `taos-ws-py` is an optional package to enable using WebSocket to connect TDengine. The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST or WebSocket interface provided by taosAdapter is referred to hereinafter as a "REST connection" or "WebSocket connection". -The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). +The source code for the Python client library is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Supported platforms - The [supported platforms](../#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. @@ -29,13 +29,13 @@ The source code for the Python connector is hosted on [GitHub](https://github.co We recommend using the latest version of `taospy`, regardless of the version of TDengine. -|Python Connector Version|major changes| +|Python Client Library Version|major changes| |:-------------------:|:----:| |2.7.12|1. added support for `varbinary` type (STMT does not yet support)
2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))| |2.7.9|support for getting assignment and seek function on subscription| |2.7.8|add `execute_many` method| -|Python Websocket Connector Version|major changes| +|Python Websocket Connection Version|major changes| |:----------------------------:|:-----:| |0.2.9|bugs fixes| |0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| @@ -43,9 +43,9 @@ We recommend using the latest version of `taospy`, regardless of the version of ## Handling Exceptions -There are 4 types of exception in python connector. +There are 4 types of exception in python client library. -- The exception of Python Connector itself. +- The exception of Python client library itself. - The exception of native library. - The exception of websocket - The exception of subscription. @@ -55,7 +55,7 @@ There are 4 types of exception in python connector. |:--------:|:---------:|:---------------:| |InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version| |ConnectionError|connection error|please check TDengine's status and the connection params| -|DatabaseError|database error|please upgrade Python connector to latest| +|DatabaseError|database error|please upgrade Python client library to latest| |OperationalError|operation error|| |ProgrammingError||| |StatementError|the exception of stmt|| @@ -65,7 +65,7 @@ There are 4 types of exception in python connector. It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html). -All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example: +All exceptions from the Python client library are thrown directly. Applications should handle these exceptions. For example: ```python {{#include docs/examples/python/handle_exception.py}} @@ -101,14 +101,14 @@ If you use a native connection, you will also need to [Install Client Driver](.. #### Uninstalling an older version -If you have installed an older version of the Python Connector, please uninstall it beforehand. +If you have installed an older version of the Python client library, please uninstall it beforehand. ``` pip3 uninstall taos taospy ``` :::note -Earlier TDengine client software includes the Python connector. If the Python connector is installed from the client package's installation directory, the corresponding Python package name is `taos`. So the above uninstall command includes `taos`, and it doesn't matter if it doesn't exist. +Earlier TDengine client software includes the Python client library. If the Python client library is installed from the client package's installation directory, the corresponding Python package name is `taos`. So the above uninstall command includes `taos`, and it doesn't matter if it doesn't exist. ::: @@ -160,7 +160,7 @@ pip3 install taos-ws-py -For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type. +For native connection, you need to verify that both the client driver and the Python client library itself are installed correctly. The client driver and Python client library have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type. ```python import taos @@ -202,7 +202,7 @@ Requirement already satisfied: taospy in c:\users\username\appdata\local\program ### Connectivity testing -Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster. +Before establishing a connection with the client library, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster. @@ -444,7 +444,7 @@ The best practice for TaosCursor is to create a cursor at the beginning of a que ##### Use of the RestClient class -The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. +The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. ```python title="Use of RestClient" {{#include docs/examples/python/rest_client_example.py}} @@ -501,7 +501,7 @@ The queried results can only be fetched once. For example, only one of `fetch_al -The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. +The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. ```python {{#include docs/examples/python/rest_client_example.py}} @@ -561,7 +561,7 @@ The `TaosConnection` class and the `TaosResult` class already implement all the ##### Use of the RestClient class -The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. +The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. ```python title="Use of RestClient" {{#include docs/examples/python/rest_client_with_req_id_example.py}} @@ -624,7 +624,7 @@ As the way to connect introduced above but add `req_id` argument. ### Writing data via parameter binding -The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound. +The Python client library provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound. @@ -755,7 +755,7 @@ stmt.close() ### Schemaless Writing -Connector support schemaless insert. +Client library support schemaless insert. @@ -817,11 +817,11 @@ There is a optional parameter called `req_id` in `schemaless_insert` and `schema ### Data Subscription -Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/). +Client library support data subscription. For more information about subscroption, please refer to [Data Subscription](../../develop/tmq/). #### Create a Topic -To create topic, please refer to [Data Subscription](../../../develop/tmq/#create-a-topic). +To create topic, please refer to [Data Subscription](../../develop/tmq/#create-a-topic). #### Create a Consumer @@ -829,7 +829,7 @@ To create topic, please refer to [Data Subscription](../../../develop/tmq/#creat -The consumer in the connector contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer). +The consumer in the client library contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer). ```python from taos.tmq import Consumer @@ -840,9 +840,9 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) -In addition to native connections, the connector also supports subscriptions via websockets. +In addition to native connections, the client library also supports subscriptions via websockets. -The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer). +The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer). ```python import taosws @@ -1025,7 +1025,7 @@ consumer.close() ### About nanoseconds -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python client library may modify the interface in the future if Python officially supports nanoseconds in full. 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/08-client-libraries/08-node.mdx similarity index 83% rename from docs/en/14-reference/03-connector/08-node.mdx rename to docs/en/08-client-libraries/08-node.mdx index bed06477f1..71f360c6d1 100644 --- a/docs/en/14-reference/03-connector/08-node.mdx +++ b/docs/en/08-client-libraries/08-node.mdx @@ -1,7 +1,7 @@ --- -title: TDengine Node.js Connector +title: TDengine Node.js Client Library sidebar_label: Node.js -description: This document describes the TDengine Node.js connector. +description: This document describes the TDengine Node.js client library. toc_max_heading_level: 4 --- @@ -9,22 +9,22 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; import Preparition from "./_preparation.mdx"; -import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; +import NodeInsert from "../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../07-develop/04-query-data/_js.mdx"; -`@tdengine/client` and `@tdengine/rest` are the official Node.js connectors. Node.js developers can develop applications to access TDengine instance data. Note: The connectors for TDengine 3.0 are different than those for TDengine 2.x. The new connectors do not support TDengine 2.x. +`@tdengine/client` and `@tdengine/rest` are the official Node.js client libraries. Node.js developers can develop applications to access TDengine instance data. Note: The client libraries for TDengine 3.0 are different than those for TDengine 2.x. The new client libraries do not support TDengine 2.x. -`@tdengine/client` is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. `@tdengine/rest` is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The REST connector can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface. +`@tdengine/client` is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. `@tdengine/rest` is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The REST client library can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface. -The source code for the Node.js connectors is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0). +The source code for the Node.js client libraries is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0). ## Supported platforms -The platforms supported by the native connector are the same as those supported by the TDengine client driver. -The REST connector supports all platforms that can run Node.js. +The platforms supported by the native client library are the same as those supported by the TDengine client driver. +The REST client library supports all platforms that can run Node.js. ## Version support @@ -34,7 +34,7 @@ Please refer to [version support list](../#version-support) - + 1. Connection Management 2. General Query @@ -44,7 +44,7 @@ Please refer to [version support list](../#version-support) 6. Schemaless - + 1. Connection Management 2. General Query @@ -58,7 +58,7 @@ Please refer to [version support list](../#version-support) ### Pre-installation preparation - Install the Node.js development environment -- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. +- If you are using the REST client library, skip this step. However, if you use the native client library, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. @@ -103,14 +103,14 @@ If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and ### Install via npm - + ```bash npm install @tdengine/client ``` - + ```bash npm install @tdengine/rest @@ -122,7 +122,7 @@ npm install @tdengine/rest ### Verify - + After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine. @@ -141,7 +141,7 @@ node nodejsChecker.js host=localhost - After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query. - + After installing the TDengine client, use the `restChecker.js` program to verify that the current environment supports Node.js access to TDengine. @@ -164,7 +164,7 @@ node restChecker.js ## Establishing a connection -Please choose to use one of the connectors. +Please choose to use one of the client libraries. @@ -288,7 +288,7 @@ let cursor = conn.cursor(); | [schemless insert](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/schemaless.js) | Schemaless insert | | [TMQ](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/tmq.js) | Using data subscription | | [asyncQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/asyncQueryExample.js) | Using asynchronous queries | -| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | Using TypeScript with the REST connector | +| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | Using TypeScript with the REST client library | ## Usage limitations @@ -316,7 +316,7 @@ let cursor = conn.cursor(); ## Important update records -### Native connectors +### Native client library | package name | version | TDengine version | Description | |------------------|---------|---------------------|------------------------------------------------------------------| @@ -324,7 +324,7 @@ let cursor = conn.cursor(); | td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. | | td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface | | td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription | -### REST Connector +### REST client library | package name | version | TDengine version | Description | |----------------------|---------|---------------------|---------------------------------------------------------------------------| @@ -334,6 +334,3 @@ let cursor = conn.cursor(); | td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token | | td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries | -## API Reference - -[API Reference](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/08-client-libraries/09-csharp.mdx similarity index 99% rename from docs/en/14-reference/03-connector/09-csharp.mdx rename to docs/en/08-client-libraries/09-csharp.mdx index 25bef1cf51..71cbacb515 100644 --- a/docs/en/14-reference/03-connector/09-csharp.mdx +++ b/docs/en/08-client-libraries/09-csharp.mdx @@ -1,7 +1,7 @@ --- -title: TDengine C# Connector +title: TDengine C# Client Library sidebar_label: C# -description: This document describes the TDengine C# connector. +description: This document describes the TDengine C# client library. toc_max_heading_level: 4 --- @@ -509,7 +509,7 @@ namespace NativeStmt stmt.AddBatch(); stmt.Exec(); var affected = stmt.Affected(); - Console.WriteLine($"affected rows: {affected}"); + Console.WriteLine($"affected rows: {affected}"); } } catch (Exception e) @@ -554,7 +554,7 @@ namespace WSStmt stmt.AddBatch(); stmt.Exec(); var affected = stmt.Affected(); - Console.WriteLine($"affected rows: {affected}"); + Console.WriteLine($"affected rows: {affected}"); } } catch (Exception e) @@ -950,7 +950,7 @@ namespace NativeSubscription } } } - + static void InsertData() { var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); @@ -1033,7 +1033,7 @@ namespace WSSubscription } } } - + static void InsertData() { var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); diff --git a/docs/en/08-client-libraries/50-odbc.mdx b/docs/en/08-client-libraries/50-odbc.mdx new file mode 100644 index 0000000000..08b2c031c6 --- /dev/null +++ b/docs/en/08-client-libraries/50-odbc.mdx @@ -0,0 +1,75 @@ +--- +sidebar_label: ODBC +title: TDengine ODBC +--- + + +## Introduction + +TDengine ODBC driver is a driver specifically designed for TDengine based on the ODBC standard. It can be used by ODBC based applications on Windows to access a local or remote TDengine cluster or TDengine cloud service, like [PowerBI](https://powerbi.microsoft.com). + +TDengine ODBC provides two kinds of connections, native connection and WebSocket connection. You can choose to use either one for your convenience, WebSocket is recommded choice and you must use WebSocket if you are trying to access TDengine cloud service. + +Note: TDengine ODBC driver can only be run on 64-bit system, and can only be invoked by 64-bit applications. + + +## Install + +1. TDengine ODBC driver supports only Windows platform. To run on Windows, VisualStudio C Runtime library is required. If VisualStudio C Runtime Library is missing on your platform, you can download and install it from [VC Runtime Library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). + +2. Install TDengine client package for Windows, the version should be above 3.2.1.0, the client package includes both TDengine ODBC driver and some other necessary libraries that will be used in either native connection or WebSocket connection. + +## Configure Data Source + +### Connection Types + +TDengine ODBC driver supports two kinds of connections to TDengine cluster, native connection and WebSocket connection, here is the major differences between them. + +1. Only WebSocket can connect to TDengine cloud service. + +2. Websocket connection is more compatible with different TDengine server versions, normally you don't need to uupgrade client package with the server side. + +3. Native connection normally has better performance, but you need to keep the version aligned with the server side. + +4. For most users, it's recommended to use **WebSocket** connection, which has much better compatibility and almost same performance as native connection. + +### WebSocket Connection + +1. Click the "Start" Menu, and Search for "ODBC", and choose "ODBC Data Source (64-bit)" (Note: Don't choose 32-bit) + +2. Select "User DSN" tab, and click "Add" to enter the page for "Create Data Source" + +3. Choose the data source to be added, here we choose "TDengine" + +4. Click "Finish", and enter the configuration page for "TDengine ODBC Data Source", fill in required fields + + ![ODBC websocket connection config](./assets/odbc-ws-config-en.webp) + + 4.1 [DSN]: Data Source Name, required field, to name the new ODBC data source + + 4.2 [Connection Type]: required field, we choose "WebSocket" + + 4.3 [URL]: required field, the URL for the ODBC data source, for example, `http://localhost:6041` is the URL for a local TDengine cluster, `https://gw.cloud.taosdata.com?token=your_token` is the URL for a TDengine cloud service. + + 4.4 [Database]: optional field, the default database to access + + 4.5 [User]: optional field, only used for connection testing in step 5; If it's left as blank, "root" user will be used by default. + + 4.6 [Password]: optional field, only used for connection testing in step 5; + +5. Click "Test Connecting" to test whether the data source can be connectted; if successful, it will prompt "connecting success" + +6. Click "OK" to sae the configuration and exit. + +7. You can also select an already configured data source name in step 2 to change existing configuration. + +### Native Connection + +Please be noted that native connection can't be used to access a TDengine client service. + +The steps are exactly same as "WebSocket" connection, except for you choose "Native" in step 4.2. + + +## PowerBI + +As an example, you can use PowerBI, which inovkes TDengine ODBC driver, to access TDengine, please refer to[Power BI](../../third-party/powerbi) for more details. diff --git a/docs/en/14-reference/03-connector/60-r-lang.mdx b/docs/en/08-client-libraries/60-r-lang.mdx similarity index 97% rename from docs/en/14-reference/03-connector/60-r-lang.mdx rename to docs/en/08-client-libraries/60-r-lang.mdx index f1cbb89f7b..881644c7f4 100644 --- a/docs/en/14-reference/03-connector/60-r-lang.mdx +++ b/docs/en/08-client-libraries/60-r-lang.mdx @@ -1,13 +1,13 @@ --- toc_max_heading_level: 4 sidebar_label: R -title: R Language Connector +title: R Language Client Library --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Rdemo from "../../07-develop/01-connect/_connect_r.mdx" +import Rdemo from "../07-develop/01-connect/_connect_r.mdx" By using the RJDBC library in R, you can enable R programs to access TDengine data. Here are the installation process, configuration steps, and an example code in R. diff --git a/docs/en/14-reference/03-connector/80-php.mdx b/docs/en/08-client-libraries/80-php.mdx similarity index 89% rename from docs/en/14-reference/03-connector/80-php.mdx rename to docs/en/08-client-libraries/80-php.mdx index bff9e8e5d5..ccaa2f8d55 100644 --- a/docs/en/14-reference/03-connector/80-php.mdx +++ b/docs/en/08-client-libraries/80-php.mdx @@ -1,12 +1,12 @@ --- -title: PHP Connector +title: PHP Client Library sidebar_label: PHP -description: This document describes the TDengine PHP connector. +description: This document describes the TDengine PHP client library. --- -`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine. +`php-tdengine` is the TDengine PHP client library provided by TDengine community. In particular, it supports Swoole coroutine. -PHP Connector relies on TDengine client driver. +PHP client library relies on TDengine client driver. Project Repository: @@ -81,7 +81,7 @@ Option Two: Use CLI `php -dextension=tdengine test.php`. ## Sample Programs -In this section a few sample programs which use TDengine PHP connector to access TDengine cluster are demonstrated. +In this section a few sample programs which use TDengine PHP client library to access TDengine cluster are demonstrated. > Any error would throw exception: `TDengine\Exception\TDengineException` diff --git a/docs/en/08-client-libraries/_category_.yml b/docs/en/08-client-libraries/_category_.yml new file mode 100644 index 0000000000..a70a33caa6 --- /dev/null +++ b/docs/en/08-client-libraries/_category_.yml @@ -0,0 +1 @@ +label: "Client Libraries" diff --git a/docs/en/14-reference/03-connector/_linux_install.mdx b/docs/en/08-client-libraries/_linux_install.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_linux_install.mdx rename to docs/en/08-client-libraries/_linux_install.mdx diff --git a/docs/en/14-reference/03-connector/_macos_install.mdx b/docs/en/08-client-libraries/_macos_install.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_macos_install.mdx rename to docs/en/08-client-libraries/_macos_install.mdx diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/08-client-libraries/_preparation.mdx similarity index 85% rename from docs/en/14-reference/03-connector/_preparation.mdx rename to docs/en/08-client-libraries/_preparation.mdx index 99887ac36b..ba282b0bfb 100644 --- a/docs/en/14-reference/03-connector/_preparation.mdx +++ b/docs/en/08-client-libraries/_preparation.mdx @@ -2,7 +2,7 @@ :::info -Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package). +Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package). - libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately. - taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately. diff --git a/docs/en/14-reference/03-connector/_verify_linux.mdx b/docs/en/08-client-libraries/_verify_linux.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_verify_linux.mdx rename to docs/en/08-client-libraries/_verify_linux.mdx diff --git a/docs/en/14-reference/03-connector/_verify_macos.mdx b/docs/en/08-client-libraries/_verify_macos.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_verify_macos.mdx rename to docs/en/08-client-libraries/_verify_macos.mdx diff --git a/docs/en/14-reference/03-connector/_verify_windows.mdx b/docs/en/08-client-libraries/_verify_windows.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_verify_windows.mdx rename to docs/en/08-client-libraries/_verify_windows.mdx diff --git a/docs/en/14-reference/03-connector/_windows_install.mdx b/docs/en/08-client-libraries/_windows_install.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_windows_install.mdx rename to docs/en/08-client-libraries/_windows_install.mdx diff --git a/docs/en/08-client-libraries/assets/odbc-ws-config-en.webp b/docs/en/08-client-libraries/assets/odbc-ws-config-en.webp new file mode 100644 index 0000000000..aaca2e99b4 Binary files /dev/null and b/docs/en/08-client-libraries/assets/odbc-ws-config-en.webp differ diff --git a/docs/en/14-reference/03-connector/connector.webp b/docs/en/08-client-libraries/connector.webp similarity index 100% rename from docs/en/14-reference/03-connector/connector.webp rename to docs/en/08-client-libraries/connector.webp diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/08-client-libraries/index.mdx similarity index 78% rename from docs/en/14-reference/03-connector/index.mdx rename to docs/en/08-client-libraries/index.mdx index 4a3e9195d6..7cf2839609 100644 --- a/docs/en/14-reference/03-connector/index.mdx +++ b/docs/en/08-client-libraries/index.mdx @@ -1,15 +1,15 @@ --- -title: Connector -description: This document describes the connectors that TDengine provides to interface with various programming languages. +title: Client Libraries +description: This document describes the client libraries that TDengine provides to interface with various programming languages. --- -TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. +TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports client libraries for multiple programming languages, including official libraries for C/C++, Java, Python, Go, Node.js, C#, and Rust. These client libraries support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial client libraries, such as the ADO.NET, Lua, and PHP libraries. -![TDengine Database image-connector](./connector.webp) +![TDengine client library connections](./connector.webp) ## Supported platforms -Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows. +Currently, TDengine's native interface client libraries can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows. | **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ | | -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- | @@ -25,7 +25,7 @@ Using REST connection can support a broader range of operating systems as it doe ## Version support -TDengine version updates often add new features, and the connector versions in the list are the best-fit versions of the connector. +TDengine version updates often add new features, and the client library versions in the list are the best-fit versions of the library. | **TDengine Versions** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | | --------------------------- | -------------- | -------------- | -------------- | ------------- | --------------- | --------------- | @@ -37,7 +37,7 @@ TDengine version updates often add new features, and the connector versions in t ## Functional Features -Comparing the connector support for TDengine functional features as follows. +Comparing the client library support for TDengine functional features as follows. ### Using the native interface (taosc) @@ -66,7 +66,7 @@ The different database framework specifications for various programming language :::warning -- Regardless of the programming language chosen for the connector, TDengine versions 2.0 and above recommend that each thread of a database application create a separate connection. Or create a connection pool based on threads to avoid interference between threads with the "USE statement" state within a connection (but the connection's query and write operations are thread-safe). +- Regardless of the programming language chosen for the client library, TDengine versions 2.0 and above recommend that each thread of a database application create a separate connection. Or create a connection pool based on threads to avoid interference between threads with the "USE statement" state within a connection (but the connection's query and write operations are thread-safe). ::: @@ -82,7 +82,7 @@ import VerifyMacOS from "./_verify_macos.mdx"; ## Install Client Driver :::info -The client driver needs to be installed if you use the native interface connector on a system that does not have the TDengine server software installed. +The client driver needs to be installed if you use the native interface connection on a system that does not have the TDengine server software installed. ::: diff --git a/docs/en/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs/en/08-client-libraries/tdengine-jdbc-connector.webp similarity index 100% rename from docs/en/14-reference/03-connector/tdengine-jdbc-connector.webp rename to docs/en/08-client-libraries/tdengine-jdbc-connector.webp diff --git a/docs/en/10-deployment/02-docker.md b/docs/en/10-deployment/02-docker.md index 63153f3033..2a4511c7b1 100644 --- a/docs/en/10-deployment/02-docker.md +++ b/docs/en/10-deployment/02-docker.md @@ -21,6 +21,7 @@ docker run -d --name tdengine \ * /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory * /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory + ::: The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. @@ -42,7 +43,7 @@ taos> show databases; Query OK, 2 row(s) in set (0.002843s) ``` -The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various client libraries (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various client libraries for complex scenarios. ## Start TDengine on the host network @@ -83,7 +84,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`. echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts ``` -Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address. +Finally, the TDengine service can be accessed from the TDengine CLI or any client library with "tdengine" as the server address. ```shell taos -h tdengine -P 6030 @@ -324,6 +325,7 @@ services: - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time + ::: 2. Start the cluster diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index a0e036664a..ccf340b511 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -46,6 +46,7 @@ database_option: { - last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function. - last_value: The last non-null value of each column in each subtable is cached. This option significantly improves the performance of the LAST function under normal circumstances, such as statements including the WHERE, ORDER BY, GROUP BY, and INTERVAL keywords. - both: The last row of each subtable and the last non-null value of each column in each subtable are cached. + Note: If you turn on cachemodel, then turn off, and turn on again, the result of last/last_row may be wrong, don't do like this, it's strongly recommended to always turn on the cache using "both". - CACHESIZE: specifies the amount (in MB) of memory used for subtable caching on each vnode. Enter a value between 1 and 65536. The default value is 1. - COMP: specifies how databases are compressed. The default value is 2. - 0: Compression is disabled. @@ -137,6 +138,10 @@ You can use `show .vgroups;` to check the value of cacheload. If the value of `cacheload` is very close to the value of `cachesize`, then it's very probably that `cachesize` is too small. If the value of `cacheload` is much smaller than the value of `cachesize`, then `cachesize` is big enough. You can use this simple principle to determine. Depending on how much memory is available in your system, you can choose to double `cachesize` or incrase it by even 5 or more times. +4. stt_trigger + +Pleae make sure stopping data writing before trying to alter stt_trigger parameter. + :::note Other parameters cannot be modified after the database has been created. diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md index f6e39a9734..462e7fc0ae 100644 --- a/docs/en/12-taos-sql/05-insert.md +++ b/docs/en/12-taos-sql/05-insert.md @@ -157,9 +157,9 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c Automatically creating table and the table name is specified through the `tbname` column ```sql -INSERT INTO meters(tbname, location, groupId, ts, current, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 0.32) - ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 0.33) +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) + ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) ``` diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index f1e19a5449..1fc6fb7e67 100755 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -9,7 +9,7 @@ description: This document describes how to query data in TDengine. ```sql SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() } -SELECT [hints] [DISTINCT] select_list +SELECT [hints] [DISTINCT] [TAGS] select_list from_clause [WHERE condition] [partition_by_clause] @@ -91,13 +91,15 @@ The list of currently supported Hints is as follows: | :-----------: | -------------- | -------------------------- | -----------------------------------| | BATCH_SCAN | None | Batch table scan | JOIN statment for stable | | NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable | -| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list | +| SORT_FOR_GROUP| None | Use sort for partition, conflict with PARTITION_FIRST | With normal column in partition by list | +| PARTITION_FIRST| None | Use Partition before aggregate, conflict with SORT_FOR_GROUP | With normal column in partition by list | For example: ```sql SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts; SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1; +SELECT /*+ PARTITION_FIRST() */ count(*), c1 FROM stable1 PARTITION BY c1; ``` ## Lists @@ -182,7 +184,7 @@ The TBNAME pseudocolumn in a supertable contains the names of subtables within t The following SQL statement returns all unique subtable names and locations within the meters supertable: ```mysql -SELECT DISTINCT TBNAME, location FROM meters; +SELECT TAGS TBNAME, location FROM meters; ``` Use the `INS_TAGS` system table in `INFORMATION_SCHEMA` to query the information for subtables in a supertable. For example, the following statement returns the name and tag values for each subtable in the `meters` supertable. @@ -227,6 +229,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear); ``` +### TAGS Query + +The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table. + +```sql +SELECT TAGS tag_name [, tag_name ...] FROM stb_name +``` + ## Query Objects `FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. @@ -259,7 +269,11 @@ The GROUP BY clause does not guarantee that the results are ordered. If you want ## PARTITION BY -The PARTITION BY clause is a TDengine-specific extension to standard SQL. This clause partitions data based on the part_list and performs computations per partition. +The PARTITION BY clause is a TDengine-specific extension to standard SQL introduced in TDengine 3.0. This clause partitions data based on the part_list and performs computations per partition. + +PARTITION BY and GROUP BY have similar meanings. They both group data according to a specified list and then perform calculations. The difference is that PARTITION BY does not have various restrictions on the SELECT list of the GROUP BY clause. Any operation can be performed within the group (constants, aggregations, scalars, expressions, etc.). Therefore, PARTITION BY is fully compatible with GROUP BY in terms of usage. All places that use the GROUP BY clause can be replaced with PARTITION BY. + +Because PARTITION BY does not require returning a row of aggregated data, it can also support various window operations after grouping slices. All window operations that need to be grouped can only use the PARTITION BY clause. For more information, see TDengine Extensions. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 4f8ccc418b..851ef86b67 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -626,9 +626,9 @@ TIMEDIFF(expr1, expr2 [, time_unit]) #### TIMETRUNCATE ```sql -TIMETRUNCATE(expr, time_unit [, ignore_timezone]) +TIMETRUNCATE(expr, time_unit [, use_current_timezone]) -ignore_timezone: { +use_current_timezone: { 0 | 1 } @@ -647,10 +647,11 @@ ignore_timezone: { 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) - The precision of the returned timestamp is same as the precision set for the current data base in use - If the input data is not formatted as a timestamp, the returned value is null. -- If `1d` is used as `time_unit` to truncate the timestamp, `ignore_timezone` option can be set to indicate if the returned result is affected by client timezone or not. - For example, if client timezone is set to UTC+0800, TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) will return '2020-01-01 08:00:00'. - Otherwise, TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) will return '2020-01-01 00:00:00'. - If `ignore_timezone` option is omitted, the default value is set to 1. +- When using 1d/1w as the time unit to truncate timestamp, you can specify whether to truncate based on the current time zone by setting the use_current_timezone parameter. + Value 0 indicates truncation using the UTC time zone, value 1 indicates truncation using the current time zone. + For example, if the time zone configured by the Client is UTC + 0800, TIMETRUNCATE ('2020-01-01 23:00:00', 1d, 0) returns the result of '2020-01-01 08:00:00'. + When using TIMETRUNCATE ('2020-01-01 23:00:00', 1d, 1), the result is 2020-01-01 00:00:00 '. + When use_current_timezone is not specified, use_current_timezone defaults to 1. #### TIMEZONE diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index 502b7562f3..90237a54f5 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -16,7 +16,10 @@ When you query a supertable, you may need to partition the supertable by some di PARTITION BY part_list ``` -part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. +part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. For example, grouping data by label location, taking the average voltage within each group. +```sql +select location, avg(voltage) from meters partition by location +``` A PARTITION BY clause is processed as follows: @@ -25,10 +28,13 @@ A PARTITION BY clause is processed as follows: - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql -select max(current) from meters partition by location interval(10m) +select _wstart, location, max(current) from meters partition by location interval(10m) ``` -The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data. +The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data. For example, calculate the average voltage of each meter every 10 minutes +```sql +select _wstart, tbname, avg(voltage) from meters partition by tbname interval(10m) +``` ## Windowed Queries diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md index 1a805c76fb..d14b6da2d3 100644 --- a/docs/en/12-taos-sql/13-tmq.md +++ b/docs/en/12-taos-sql/13-tmq.md @@ -41,7 +41,7 @@ The preceding command displays all topics in the current database. ## Create Consumer Group -You can create consumer groups only through the TDengine Client driver or the API provided by a connector. +You can create consumer groups only through the TDengine Client driver or the API provided by a client library. ## Delete Consumer Group diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 5cb680e34b..76dc3b6b58 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -6,7 +6,7 @@ description: This document describes the TDengine REST API. To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note -One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. +One difference from the native connection is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. ::: ## Installation diff --git a/docs/en/14-reference/03-connector/_category_.yml b/docs/en/14-reference/03-connector/_category_.yml deleted file mode 100644 index 6a766e9657..0000000000 --- a/docs/en/14-reference/03-connector/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Connector" diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md index 8512f5b59d..d69f3876d4 100644 --- a/docs/en/14-reference/08-taos-shell.md +++ b/docs/en/14-reference/08-taos-shell.md @@ -8,7 +8,7 @@ The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](../connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Install Client Driver](../../client-libraries/#install-client-driver). ## Execution diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md index 779882f582..bc1133b9a7 100644 --- a/docs/en/14-reference/09-support-platform/index.md +++ b/docs/en/14-reference/09-support-platform/index.md @@ -1,6 +1,6 @@ --- title: List of supported platforms -description: This document describes the supported platforms for the TDengine server, client, and connectors. +description: This document describes the supported platforms for the TDengine server, client, and client libraries. --- ## List of supported platforms for TDengine server @@ -12,9 +12,9 @@ description: This document describes the supported platforms for the TDengine se Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by the enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition. -## List of supported platforms for TDengine clients and connectors +## List of supported platforms for TDengine clients and client libraries -TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha/LoongArch64 hardware platforms and Linux/Win64/Win32/macOS development environments. +TDengine's client libraries can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha/LoongArch64 hardware platforms and Linux/Win64/Win32/macOS development environments. The comparison matrix is as follows. diff --git a/docs/en/14-reference/index.md b/docs/en/14-reference/index.md index bc8ec69965..d57eee512a 100644 --- a/docs/en/14-reference/index.md +++ b/docs/en/14-reference/index.md @@ -1,9 +1,9 @@ --- title: Reference -description: This document describes TDengine connectors and utilities. +description: This document describes TDengine utilities. --- -This section describes the TDengine connectors and utilities. +This section describes the TDengine utilities. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md index cac4f5f604..42266d232c 100644 --- a/docs/en/20-third-party/11-kafka.md +++ b/docs/en/20-third-party/11-kafka.md @@ -94,7 +94,7 @@ The output as bellow: The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix). -TDengine Sink Connector internally uses TDengine [modeless write interface](../../reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json). +TDengine Sink Connector internally uses TDengine [modeless write interface](../../client-libraries/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json). The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. diff --git a/docs/en/20-third-party/13-Jupyter.md b/docs/en/20-third-party/13-Jupyter.md index 1ac9df1da4..79636cd504 100644 --- a/docs/en/20-third-party/13-Jupyter.md +++ b/docs/en/20-third-party/13-Jupyter.md @@ -4,7 +4,7 @@ sidebar_label: JupyterLab description: This document describes how to integrate TDengine with JupyterLab. --- -JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. +JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python client library to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. ## Install JupyterLab Installing JupyterLab is very easy. Installation instructions can be found at: @@ -36,8 +36,8 @@ jupyter lab ```` This will automatically launch your default browser and connect to your JupyterLab instance, usually on port 8888. -## Install the TDengine Python connector -You can now install the TDengine Python connector as follows. +## Install the TDengine Python client library +You can now install the TDengine Python client library as follows. Start a new Python kernel in JupyterLab. @@ -55,8 +55,8 @@ import sys ```` ## Connect to TDengine -You can find detailed examples to use the Python connector, in the TDengine documentation here. -Once you have installed the TDengine Python connector in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab. +You can find detailed examples to use the Python client library, in the TDengine documentation here. +Once you have installed the TDengine Python client library in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab. Each TDengine instance, has a database called "log" which has monitoring information about the TDengine instance. In the "log" database there is a [supertable](https://docs.tdengine.com/taos-sql/stable/) called "disks_info". @@ -96,4 +96,4 @@ result = sqlQuery(conn) print(result) ```` -TDengine has connectors for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels). +TDengine has client libraries for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels). diff --git a/docs/en/20-third-party/14-dbeaver.md b/docs/en/20-third-party/14-dbeaver.md index fd0a0672f2..c949987705 100644 --- a/docs/en/20-third-party/14-dbeaver.md +++ b/docs/en/20-third-party/14-dbeaver.md @@ -19,7 +19,7 @@ To use DBeaver to manage TDengine, you need to prepare the following: ![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp) -2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. +2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java client library installed on the local machine, DBeaver will prompt you to download and install it. ![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp)) diff --git a/docs/en/20-third-party/50-qstudio.md b/docs/en/20-third-party/50-qstudio.md index 8339678a0e..4140f87775 100644 --- a/docs/en/20-third-party/50-qstudio.md +++ b/docs/en/20-third-party/50-qstudio.md @@ -19,7 +19,7 @@ To connect TDengine using qStudio, you need to complete the following preparatio ![qConnecting TDengine with qStudio](./qstudio/qstudio-connect-tdengine.webp) -2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java connector is not installed on the local machine, qStudio will prompt you to download and install it. +2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java client library is not installed on the local machine, qStudio will prompt you to download and install it. ![Download Java Connector](./qstudio/qstudio-jdbc-connector-download.webp) diff --git a/docs/en/20-third-party/70-seeq.md b/docs/en/20-third-party/70-seeq.md index e42204dd5d..43c3e79b72 100644 --- a/docs/en/20-third-party/70-seeq.md +++ b/docs/en/20-third-party/70-seeq.md @@ -10,20 +10,20 @@ description: How to use Seeq and TDengine to perform time series data analysis Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers. -TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting. +TDengine can be added as a data source into Seeq via JDBC client library. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting. ## Prerequisite 1. Install Seeq Server and Seeq Data Lab software 2. Install TDengine or register TDengine Cloud service -## Install TDengine JDBC connector +## Install TDengine JDBC client library 1. Get Seeq data location configuration ``` sudo seeq config get Folders/Data ``` -2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/ +2. Download the latest TDengine Java client library from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/ 3. Restart Seeq server ``` sudo seeq restart diff --git a/docs/en/20-third-party/75-powerbi.md b/docs/en/20-third-party/75-powerbi.md new file mode 100644 index 0000000000..4744467f30 --- /dev/null +++ b/docs/en/20-third-party/75-powerbi.md @@ -0,0 +1,65 @@ +--- +sidebar_label: Power BI +title: Power BI +description: Use PowerBI and TDengine to analyze time series data +--- + +## Introduction + +With TDengine ODBC driver, PowerBI can access time series data stored in TDengine. You can import tag data, original time series data, or aggregated data into PowerBI from TDengine, to create reports or dashboard without any coding effort. + +## Steps +![Power BI use step](./powerbi-step-en.webp) + +### Prerequisites + +1. TDengine server has been installed and running well. +2. Power BI Desktop has been installed and running. (If not, please download and install latest Windows X64 version from [PowerBI](https://www.microsoft.com/download/details.aspx?id=58494). + + +## Install Driver + +Depending on your TDengine server version, download appropriate version of TDengine client package from TDengine website [Download Link](https://docs.taosdata.com/get-started/package/), or TDengine explorer if you are using a local TDengine cluster. Install the TDengine client package on same Windows machine where PowerBI is running. + +### Configure Data Source + +Please refer to [ODBC](../../client-libraries/odbc) to configure TDengine ODBC Driver with WebSocket connection. + +### Import Data from TDengine to Power BI + +1. Open Power BI and logon, add data source following steps "Home Page" -> "Get Data" -> "Others" -> "ODBC" -> "Connect" + +2. Choose data source name, connect to configured data source, go to the nativator, browse tables of the selected database and load data + +3. If you want to input some specific SQL, click "Advanced Options", and input your SQL in the open dialogue box and load the data. + + +To better use Power BI to analyze the data stored in TDengine, you need to understand the concepts of dimention, metric, time serie, correlation, and use your own SQL to import data. + +1. Dimention: it's normally category (text) data to describe such information as device, collection point, model. In the supertable template of TDengine, we use tag columns to store the dimention information. You can use SQL like `select distinct tbname, tag1, tag2 from supertable` to get dimentions. + +2. Metric: quantitive (numeric) fileds that can be calculated, like SUM, AVERAGE, MINIMUM. If the collecting frequency is 1 second, then there are 31,536,000 records in one year, it will be too low efficient to import so big data into Power BI. In TDengine, you can use data partition query, window partition query, in combination with pseudo columns related to window, to import downsampled data into Power BI. For more details, please refer to [TDengine Specialized Queries](https://docs.taosdata.com/taos-sql/distinguished/)。 + + - Window partition query: for example, thermal meters collect one data per second, but you need to query the average temperature every 10 minutes, you can use window subclause to get the downsampling data you need. The corresponding SQL is like `select tbname, _wstart date,avg(temperature) temp from table interval(10m)`, in which _wstart is a pseudo column indicting the start time of a widow, 10m is the duration of the window, `avg(temperature)` indicates the aggregate value inside a window. + + - Data partition query: If you want to get the aggregate value of a lot of thermal meters, you can first partition the data and then perform a series of calculation in the partitioned data spaces. The SQL you need to use is `partition by part_list`. The most common of data partition usage is that when querying a supertable, you can partition data by subtable according to tags to form the data of each subtable into a single time serie to facilitate analytical processing of time series data. + +3. Time Serie: When curve plotting or aggregating data based on time lines, date is normally required. Data or time can be imported from Excel, or retrieved from TDengine using SQL statement like `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`, in which the fill() subclause indicates the fill mode when there is data missing, pseudo column _wstart indicates the date to retrieve. + +4. Correlation: Indicates how to correlate data. Dimentions and Metrics can be correlated by tbname, dates and metrics can be correlated by date. All these can cooperate to form visual reports. + +### Example - Meters + +TDengine has its own specific data model, which uses supertable as template and creates a specific table for each device. Each table can have maximum 4,096 data columns and 128 tags. In the example of meters, assume each meter generates one record per second, then there will be 86,400 records each day and 31,536,000 records every year, then only 1,000 meters will occupy 500GB disk space. So, the common usage of Power BI should be mapping tags to dimention columns, mapping the aggregation of data columns to metric columns, to provide indicators for decision makers. + +1. Import Dimentions + +Import the tags of tables in PowerBI, and name as "tags", the SQL is like `select distinct tbname, groupid, location from test.meters;`. + +2. Import Metrics + +In Power BI, import the average current, average voltage, average phase with 1 hour window, and name it as "data", the SQL is like `select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)` . + +3. Correlate Dimentions and Metrics + +In Power BI, open model view, correlate "tags" and "data", and set "tabname" as the correlation column, then you can use the data in histogram, pie chart, etc. For more information about building visual reports in PowerBI, please refer to [Power BI](https://learn.microsoft.com/power-bi/)。 \ No newline at end of file diff --git a/docs/en/20-third-party/powerbi-step-en.webp b/docs/en/20-third-party/powerbi-step-en.webp new file mode 100644 index 0000000000..5d6eff1ac2 Binary files /dev/null and b/docs/en/20-third-party/powerbi-step-en.webp differ diff --git a/docs/en/25-application/_03-immigrate.md b/docs/en/25-application/_03-immigrate.md index cdb3d5591c..4dc7ee711c 100644 --- a/docs/en/25-application/_03-immigrate.md +++ b/docs/en/25-application/_03-immigrate.md @@ -176,7 +176,7 @@ After completing the query, if the data written does not differ from what is exp TDengine does not support querying, or data fetching using the OpenTSDB query syntax but does provide a counterpart for each of the OpenTSDB queries. The corresponding query processing can be adapted and applied in a manner obtained by examining Appendix 1. To fully understand the types of queries supported by TDengine, refer to the TDengine user manual. -TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language connectors for querying and reading data to suit your application. Please read the user manual for specific operations and usage. +TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language client libraries for querying and reading data to suit your application. Please read the user manual for specific operations and usage. ## Historical Data Migration diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index 715704a0c3..e8e1386197 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -54,7 +54,7 @@ This error indicates that the client could not connect to the server. Perform th 6. Verify that your firewall settings allow all hosts in the cluster to communicate on ports 6030 and 6041 (TCP and UDP). You can run `ufw status` (Ubuntu) or `firewall-cmd --list-port` (CentOS) to check the configuration. -7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. +7. If you are using the Python, Java, Go, Rust, C#, or Node.js client library on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. 8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable.. @@ -142,7 +142,7 @@ Timestamps are processed as follows: 1. The client uses its system timezone unless it has been configured otherwise. 2. A timezone configured in `taos.cfg` takes precedence over the system timezone. -3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL. +3. A timezone explicitly specified when establishing a connection to TDengine through a client library takes precedence over `taos.cfg` and the system timezone. For example, the Java client library allows you to specify a timezone in the JDBC URL. 4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method. ### 11. Which network ports are required by TDengine? diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index f846ede52f..f5a4789976 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t import Release from "/components/ReleaseV3"; +## 3.2.2.0 + + + ## 3.2.1.0 diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index f945f53b5c..50913e87c8 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -23,7 +23,7 @@ import CDemo from "./_sub_c.mdx"; 为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。 -下面为关于数据订阅的一些说明,需要对TDengine的架构有一些了解,结合各个语言链接器的接口使用。 +下面为关于数据订阅的一些说明,需要对TDengine的架构有一些了解,结合各个语言链接器的接口使用。(可使用时再了解) - 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; - 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; - 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; @@ -45,12 +45,13 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的知识做更多的介绍,如果需要了解,请自行搜索。 +说明: 从3.2.0.0版本开始,数据订阅支持vnode迁移和分裂。 由于数据订阅依赖wal文件,而在vnode迁移和分裂的过程中,wal并不会同步过去,所以迁移或分裂后,之前没消费完的wal数据后消费不到。所以请保证之前把数据全部消费完后,再进行vnode迁移或分裂,否则,消费会丢失数据。 ## 主要数据结构和 API -不同语言下, TMQ 订阅相关的 API 及数据结构如下: +不同语言下, TMQ 订阅相关的 API 及数据结构如下(注意consumer结构不是线程安全的,在一个线程使用consumer时,不要在另一个线程close这个consumer): diff --git a/docs/zh/08-connector/50-odbc.mdx b/docs/zh/08-connector/50-odbc.mdx new file mode 100644 index 0000000000..0668aed7df --- /dev/null +++ b/docs/zh/08-connector/50-odbc.mdx @@ -0,0 +1,99 @@ +--- +sidebar_label: ODBC +title: TDengine ODBC +--- + + +## 简介 + +TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 + +TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 + +注意:TDengine ODBC 只支持 64 位系统,调用 TDengine ODBC 必须通过 64 位的 ODBC 驱动管理器进行。因此调用 ODBC 的程序不能使用 32 位版本。 + +想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 + +## 安装 + +1. 仅支持 Windows 平台。Windows 上需要安装过 VC 运行时库,可在此下载安装 [VC运行时库](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) 如果已经安装VS开发工具可忽略。 + +2. 安装 TDengine Windows 客户端,版本在 3.2.1.0 或以上,都会包含 TDengine 的 ODBC 驱动。 + +## 配置数据源 + +### 数据源连接类型与区别 + +TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 Native 连接,其区别如下: + +1. 只有 Websocket 支持连接云服务 + +2. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 + +3. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 + +4. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 + +### WebSocket 连接 + +1. 【开始】菜单搜索打开【ODBC 数据源(64 位)】管理工具(注意不要选择ODBC 数据源(32 位)) + +2. 选中【用户 DSN】标签页,通过【添加(D)】按钮进入"创建数据源"界面 + +3. 选择想要添加的数据源,这里我们选择【TDengine】 + +4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 + + ![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp) + + 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 + + 4.2【Connection Type】 : 必选,选择连接类型,这里选择 【Websocket】 + + 4.3【URL】必填,ODBC 数据源 URL,example: `http://localhost:6041`, 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token` + + 4.4【Database】选填,需要连接的默认数据库 + + 4.5【User】仅供第5步测试连接使用,选填,数据库用户名,如果不填,TDengine 默认 root + + 4.6【Password】仅供第5步测试连接使用,选填,数据库用户密码,如果不填,TDengine 默认 taosdata + +5. 点【Test Connecting...】测试连接情况,如果成功,提示"connecting success" + +6. 点【确定】,即可保存配置并退出 + +7. 也可以在第2步选择已经配置好的数据源名通过【配置】按钮进入配置页面,修改已有配置 + +### 原生连接(不支持云服务) + +1. 【开始】菜单搜索打开【ODBC 数据源(64 位)】管理工具(注意不要选择ODBC 数据源(32 位)) + +2. 选中【用户 DSN】标签页,通过【添加(D)】按钮进入"创建数据源"界面 + +3. 选择想要添加的数据源,这里我们选择【TDengine】 + +4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 + + ![ODBC native connection config](./assets/odbc-native-config-zh.webp) + + 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 + + 4.2 【Connection Type】 : 必选,选择连接类型,这里选择 【Native】 原生连接; + + 4.3 【Server】必填,ODBC 数据源 Server 地址,example: `localhost:6030` + + 4.4 【Database】选填,需要连接的默认数据库 + + 4.5 【User】仅供第5步测试连接使用,选填,数据库用户名,如果不填,TDengine 默认 root + + 4.6 【Password】仅供第5步测试连接使用,选填,数据库用户密码,如果不填,TDengine 默认 taosdata + +5. 点【Test Connecting...】测试连接情况,如果成功,提示"connecting success" + +6. 点【确定】,即可保存配置并退出 + +7. 也可以在第2步选择已经配置好的数据源名通过【配置】按钮进入配置页面,修改已有配置 + +## 与第三方集成 + +作为使用 TDengine ODBC driver 的一个示例,你可以使用 Power BI 与 TDengine 分析时序数据。更多细节请参考 [Power BI](../../third-party/powerbi) diff --git a/docs/zh/08-connector/assets/odbc-check-data.webp b/docs/zh/08-connector/assets/odbc-check-data.webp new file mode 100644 index 0000000000..bc7867c089 Binary files /dev/null and b/docs/zh/08-connector/assets/odbc-check-data.webp differ diff --git a/docs/zh/08-connector/assets/odbc-native-config-zh.webp b/docs/zh/08-connector/assets/odbc-native-config-zh.webp new file mode 100644 index 0000000000..ed9005f2a1 Binary files /dev/null and b/docs/zh/08-connector/assets/odbc-native-config-zh.webp differ diff --git a/docs/zh/08-connector/assets/odbc-ws-config-zh.webp b/docs/zh/08-connector/assets/odbc-ws-config-zh.webp new file mode 100644 index 0000000000..c8a6e11011 Binary files /dev/null and b/docs/zh/08-connector/assets/odbc-ws-config-zh.webp differ diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index c3e0e9a07a..58625d22e2 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -46,6 +46,7 @@ database_option: { - last_row:表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。 - last_value:表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。 - both:表示同时打开缓存最近行和列功能。 + Note:CacheModel 值来回切换有可能导致 last/last_row 的查询结果不准确,请谨慎操作。推荐保持打开。 - CACHESIZE:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。 - COMP:表示数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。 - 0:表示不压缩。 @@ -137,6 +138,10 @@ alter_database_option: { 如果 cacheload 非常接近 cachesize,则 cachesize 可能过小。 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。可以根据这个原则判断是否需要修改 cachesize 。具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。 +4. stt_trigger + +在修改 stt_trigger 参数之前请先停止数据库写入。 + :::note 其它参数在 3.0.0.0 中暂不支持修改 diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md index 583d047c43..fa330cabae 100644 --- a/docs/zh/12-taos-sql/05-insert.md +++ b/docs/zh/12-taos-sql/05-insert.md @@ -157,8 +157,8 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c 自动建表, 表名通过tbname列指定 ```sql -INSERT INTO meters(tbname, location, groupId, ts, current, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 0.32) - ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 0.33) +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) + ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) ``` diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 04508ceede..2a7dff6f7d 100755 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -9,7 +9,7 @@ description: 查询数据的详细语法 ```sql SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() } -SELECT [hints] [DISTINCT] select_list +SELECT [hints] [DISTINCT] [TAGS] select_list from_clause [WHERE condition] [partition_by_clause] @@ -91,13 +91,15 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适 | :-----------: | -------------- | -------------------------- | -----------------------------| | BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 | | NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 | -| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 | +| SORT_FOR_GROUP| 无 | 采用sort方式进行分组, 与PARTITION_FIRST冲突 | partition by 列表有普通列时 | +| PARTITION_FIRST| 无 | 在聚合之前使用PARTITION计算分组, 与SORT_FOR_GROUP冲突 | partition by 列表有普通列时 | 举例: ```sql SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts; SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1; +SELECT /*+ PARTITION_FIRST() */ count(*), c1 FROM stable1 PARTITION BY c1; ``` ## 列表 @@ -162,6 +164,16 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; ::: +### 标签查询 + +当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。 + +返回所有子表的标签列: + +```sql +SELECT TAGS tag_name [, tag_name ...] FROM stb_name +``` + ### 结果集列名 `SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如: @@ -182,7 +194,7 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001; 获取一个超级表所有的子表名及相关的标签信息: ```mysql -SELECT DISTINCT TBNAME, location FROM meters; +SELECT TAGS TBNAME, location FROM meters; ``` 建议用户使用 INFORMATION_SCHEMA 下的 INS_TAGS 系统表来查询超级表的子表标签信息,例如获取超级表 meters 所有的子表名和标签值: @@ -259,7 +271,12 @@ GROUP BY 子句中的表达式可以包含表或视图中的任何列,这些 ## PARTITION BY -PARTITION BY 子句是 TDengine 特色语法,按 part_list 对数据进行切分,在每个切分的分片中进行计算。 +PARTITION BY 子句是 TDengine 3.0版本引入的特色语法,用于根据 part_list 对数据进行切分,在每个切分的分片中可以进行各种计算。 + +PARTITION BY 与 GROUP BY 基本含义相似,都是按照指定列表进行数据分组然后进行计算,不同点在于 PARTITION BY 没有 GROUP BY 子句的 SELECT 列表的各种限制,组内可以进行任意运算(常量、聚合、标量、表达式等),因此在使用上 PARTITION BY 完全兼容 GROUP BY,所有使用 GROUP BY 子句的地方都可以替换为 PARTITION BY。 + +因为 PARTITION BY 没有返回一行聚合数据的要求,因此还可以支持在分组切片后的各种窗口运算,所有需要分组进行的窗口运算都只能使用 PARTITION BY 子句。 + 详见 [TDengine 特色查询](../distinguished) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 26313390a6..66322d55f1 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -1,1499 +1,1500 @@ ---- -sidebar_label: 函数 -title: 函数 -description: TDengine 支持的函数列表 -toc_max_heading_level: 4 ---- - -## 单行函数 - -单行函数为查询结果中的每一行返回一个结果行。 - -### 数学函数 - -#### ABS - -```sql -ABS(expr) -``` - -**功能说明**:获得指定字段的绝对值。 - -**返回结果类型**:与指定字段的原始数据类型一致。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### ACOS - -```sql -ACOS(expr) -``` - -**功能说明**:获得指定字段的反余弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### ASIN - -```sql -ASIN(expr) -``` - -**功能说明**:获得指定字段的反正弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### ATAN - -```sql -ATAN(expr) -``` - -**功能说明**:获得指定字段的反正切结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### CEIL - -```sql -CEIL(expr) -``` - -**功能说明**:获得指定字段的向上取整数的结果。 - -**返回结果类型**:与指定字段的原始数据类型一致。 - -**适用数据类型**:数值类型。 - -**适用于**: 表和超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### COS - -```sql -COS(expr) -``` - -**功能说明**:获得指定字段的余弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### FLOOR - -```sql -FLOOR(expr) -``` - -**功能说明**:获得指定字段的向下取整数的结果。 - 其他使用说明参见 CEIL 函数描述。 - -#### LOG - -```sql -LOG(expr1[, expr2]) -``` - -**功能说明**:获得 expr1 对于底数 expr2 的对数。如果 expr2 参数省略,则返回指定字段的自然对数值。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### POW - -```sql -POW(expr1, expr2) -``` - -**功能说明**:获得 expr1 的指数为 expr2 的幂。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### ROUND - -```sql -ROUND(expr) -``` - -**功能说明**:获得指定字段的四舍五入的结果。 - 其他使用说明参见 CEIL 函数描述。 - - -#### SIN - -```sql -SIN(expr) -``` - -**功能说明**:获得指定字段的正弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### SQRT - -```sql -SQRT(expr) -``` - -**功能说明**:获得指定字段的平方根。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### TAN - -```sql -TAN(expr) -``` - -**功能说明**:获得指定字段的正切结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -### 字符串函数 - -字符串函数的输入参数为字符串类型,返回结果为数值类型或字符串类型。 - -#### CHAR_LENGTH - -```sql -CHAR_LENGTH(expr) -``` - -**功能说明**:以字符计数的字符串长度。 - -**返回结果类型**:BIGINT。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -#### CONCAT - -```sql -CONCAT(expr1, expr2 [, expr] ... ) -``` - -**功能说明**:字符串连接函数。 - -**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 - -**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### CONCAT_WS - -```sql -CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...) -``` - -**功能说明**:带分隔符的字符串连接函数。 - -**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 - -**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### LENGTH - -```sql -LENGTH(expr) -``` - -**功能说明**:以字节计数的长度。 - -**返回结果类型**:BIGINT。 - -**适用数据类型**:VARCHAR, NCHAR, VARBINARY。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### LOWER - -```sql -LOWER(expr) -``` - -**功能说明**:将字符串参数值转换为全小写字母。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### LTRIM - -```sql -LTRIM(expr) -``` - -**功能说明**:返回清除左边空格后的字符串。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### RTRIM - -```sql -RTRIM(expr) -``` - -**功能说明**:返回清除右边空格后的字符串。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### SUBSTR - -```sql -SUBSTR(expr, pos [,len]) -``` - -**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。输入参数 pos 可以为正数,也可以为负数。如果 pos 是正数,表示开始位置从字符串开头正数计算。如果 pos 为负数,表示开始位置从字符串结尾倒数计算。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### UPPER - -```sql -UPPER(expr) -``` - -**功能说明**:将字符串参数值转换为全大写字母。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -### 转换函数 - -转换函数将值从一种数据类型转换为另一种数据类型。 - -#### CAST - -```sql -CAST(expr AS type_name) -``` - -**功能说明**:数据类型转换函数,返回 expr 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。 - -**返回结果类型**:CAST 中指定的类型(type_name)。 - -**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**: - -- 对于不能支持的类型转换会直接报错。 -- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: - 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 - 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 - 3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。 - -#### TO_ISO8601 - -```sql -TO_ISO8601(expr [, timezone]) -``` - -**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 - -**返回结果数据类型**:VARCHAR 类型。 - -**适用数据类型**:INTEGER, TIMESTAMP。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**: - -- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 -- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; -- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 - - -#### TO_JSON - -```sql -TO_JSON(str_literal) -``` - -**功能说明**: 将字符串常量转换为 JSON 类型。 - -**返回结果数据类型**: JSON。 - -**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### TO_UNIXTIMESTAMP - -```sql -TO_UNIXTIMESTAMP(expr [, return_timestamp]) - -return_timestamp: { - 0 - | 1 -} -``` - -**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 - -**返回结果数据类型**:BIGINT, TIMESTAMP。 - -**应用字段**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -- return_timestamp 指定函数返回值是否为时间戳类型,设置为1时返回 TIMESTAMP 类型,设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。 - -#### TO_CHAR - -```sql -TO_CHAR(ts, format_str_literal) -``` - -**功能说明**: 将timestamp类型按照指定格式转换为字符串 - -**返回结果数据类型**: VARCHAR - -**应用字段**: TIMESTAMP - -**嵌套子查询支持**: 适用于内层查询和外层查询 - -**适用于**: 表和超级表 - -**支持的格式** - -| **格式** | **说明**| **例子** | -| --- | --- | --- | -|AM,am,PM,pm| 无点分隔的上午下午 | 07:00:00am| -|A.M.,a.m.,P.M.,p.m.| 有点分隔的上午下午| 07:00:00a.m.| -|YYYY,yyyy|年, 4个及以上数字| 2023-10-10| -|YYY,yyy| 年, 最后3位数字| 023-10-10| -|YY,yy| 年, 最后2位数字| 23-10-10| -|Y,y|年, 最后一位数字| 3-10-10| -|MONTH|月, 全大写| 2023-JANUARY-01| -|Month|月, 首字母大写| 2023-January-01| -|month|月, 全小写| 2023-january-01| -|MON| 月, 缩写, 全大写(三个字符)| JAN, SEP| -|Mon| 月, 缩写, 首字母大写| Jan, Sep| -|mon|月, 缩写, 全小写| jan, sep| -|MM,mm|月, 数字 01-12|2023-01-01| -|DD,dd|月日, 01-31|| -|DAY|周日, 全大写|MONDAY| -|Day|周日, 首字符大写|Monday| -|day|周日, 全小写|monday| -|DY|周日, 缩写, 全大写|MON| -|Dy|周日, 缩写, 首字符大写|Mon| -|dy|周日, 缩写, 全小写|mon| -|DDD|年日, 001-366|| -|D,d|周日, 数字, 1-7, Sunday(1) to Saturday(7)|| -|HH24,hh24|小时, 00-23|2023-01-30 23:59:59| -|hh12,HH12, hh, HH| 小时, 01-12|2023-01-30 12:59:59PM| -|MI,mi|分钟, 00-59|| -|SS,ss|秒, 00-59|| -|MS,ms|毫秒, 000-999|| -|US,us|微秒, 000000-999999|| -|NS,ns|纳秒, 000000000-999999999|| -|TZH,tzh|时区小时|2023-01-30 11:59:59PM +08| - -**使用说明**: -- `Month`, `Day`等的输出格式是左对齐的, 右侧添加空格, 如`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, 9月是月份中英文字母数最长的, 因此9月没有空格. 星期类似. -- 使用`ms`, `us`, `ns`时, 以上三种格式的输出只在精度上不同, 比如ts为 `1697182085123`, `ms` 的输出为 `123`, `us` 的输出为 `123000`, `ns` 的输出为 `123000000`. -- 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`. -- 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换. -- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区. -- 输入时间戳的精度由所查询表的精度确定, 若未指定表, 则精度为毫秒. - -#### TO_TIMESTAMP - -```sql -TO_TIMESTAMP(ts_str_literal, format_str_literal) -``` - -**功能说明**: 将字符串按照指定格式转化为时间戳. - -**返回结果数据类型**: TIMESTAMP - -**应用字段**: VARCHAR - -**嵌套子查询支持**: 适用于内层查询和外层查询 - -**适用于**: 表和超级表 - -**支持的格式**: 与`to_char`相同 - -**使用说明**: -- 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳. -- `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`. -- 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`. -- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。 -- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分. 暂不支持只指定年日而不指定月日的格式, 如'yyyy-mm-DDD', 支持'yyyy-mm-DD'. -- 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12. -- `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换. -- 输出时间戳的精度与查询表的精度相同, 若查询未指定表, 则输出精度为毫秒. 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')`的输出将会把微妙和纳秒进行截断. 如果指定一张纳秒表, 那么就不会发生截断, 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`. - - -### 时间和日期函数 - -时间和日期函数对时间戳类型进行操作。 - -所有返回当前时间的函数,如NOW、TODAY和TIMEZONE,在一条SQL语句中不论出现多少次都只会被计算一次。 - -#### NOW - -```sql -NOW() -``` - -**功能说明**:返回客户端当前系统时间。 - -**返回结果数据类型**:TIMESTAMP。 - -**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 - -**适用于**:表和超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: - -- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下: - b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 - - -#### TIMEDIFF - -```sql -TIMEDIFF(expr1, expr2 [, time_unit]) -``` - -**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 - -**返回结果数据类型**:BIGINT。 - -**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 - -**适用于**:表和超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: -- 支持的时间单位 time_unit 如下: - 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 -- 输入包含不符合时间日期格式的字符串则返回 NULL。 - - -#### TIMETRUNCATE - -```sql -TIMETRUNCATE(expr, time_unit [, ignore_timezone]) - -ignore_timezone: { - 0 - | 1 -} -``` - -**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。 - -**返回结果数据类型**:TIMESTAMP。 - -**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 - -**适用于**:表和超级表。 - -**使用说明**: -- 支持的时间单位 time_unit 如下: - 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -- 输入包含不符合时间日期格式的字符串则返回 NULL。 -- 当使用 1d 作为时间单位对时间戳进行截断时, 可通过设置 ignore_timezone 参数指定返回结果的显示是否忽略客户端时区的影响。 - 例如客户端所配置时区为 UTC+0800, 则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为 '2020-01-01 08:00:00'。 - 而使用 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) 设置忽略时区时,返回结果为 '2020-01-01 00:00:00' - ignore_timezone 如果忽略的话,则默认值为 1 。 - - - -#### TIMEZONE - -```sql -TIMEZONE() -``` - -**功能说明**:返回客户端当前时区信息。 - -**返回结果数据类型**:VARCHAR。 - -**应用字段**:无 - -**适用于**:表和超级表。 - - -#### TODAY - -```sql -TODAY() -``` - -**功能说明**:返回客户端当日零时的系统时间。 - -**返回结果数据类型**:TIMESTAMP。 - -**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下: - b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 - - -## 聚合函数 - -聚合函数为查询结果集的每一个分组返回单个结果行。可以由 GROUP BY 或窗口切分子句指定分组,如果没有,则整个查询结果集视为一个分组。 - -TDengine 支持针对数据的聚合查询。提供如下聚合函数。 - -### APERCENTILE - -```sql -APERCENTILE(expr, p [, algo_type]) - -algo_type: { - "default" - | "t-digest" -} -``` - -**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。 - -**返回数据类型**: DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**说明**: -- p值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。 -- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。 -- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。 - -### AVG - -```sql -AVG(expr) -``` - -**功能说明**:统计指定字段的平均值。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### COUNT - -```sql -COUNT({* | expr}) -``` - -**功能说明**:统计指定字段的记录行数。 - -**返回数据类型**:BIGINT。 - -**适用数据类型**:全部类型字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。 -- 如果统计字段是具体的列,则返回该列中非 NULL 值的记录数量。 - - -### ELAPSED - -```sql -ELAPSED(ts_primary_key [, time_unit]) -``` - -**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:TIMESTAMP。 - -**适用于**: 表,超级表,嵌套查询的外层查询 - -**说明**: -- ts_primary_key参数只能是表的第一列,即 TIMESTAMP 类型的主键列。 -- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下: - 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 -- order by asc/desc不影响差值的计算结果。 -- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 -- 对于普通表,不支持和group by子句组合使用。 -- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 -- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 - - -### LEASTSQUARES - -```sql -LEASTSQUARES(expr, start_val, step_val) -``` - -**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 - -**返回数据类型**:字符串表达式(斜率, 截距)。 - -**适用数据类型**:expr 必须是数值类型。 - -**适用于**:表。 - - -### SPREAD - -```sql -SPREAD(expr) -``` - -**功能说明**:统计表中某列的最大值和最小值之差。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:INTEGER, TIMESTAMP。 - -**适用于**:表和超级表。 - - -### STDDEV - -```sql -STDDEV(expr) -``` - -**功能说明**:统计表中某列的均方差。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### SUM - -```sql -SUM(expr) -``` - -**功能说明**:统计表/超级表中某列的和。 - -**返回数据类型**:DOUBLE, BIGINT。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### HYPERLOGLOG - -```sql -HYPERLOGLOG(expr) -``` - -**功能说明**: - - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。 - - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。 - -**返回结果类型**:INTEGER。 - -**适用数据类型**:任何类型。 - -**适用于**:表和超级表。 - - -### HISTOGRAM - -```sql -HISTOGRAM(expr,bin_type, bin_description, normalized) -``` - -**功能说明**:统计数据按照用户指定区间的分布。 - -**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为 DOUBLE 类型,否则为 BIGINT 类型。 - -**适用数据类型**:数值型字段。 - -**适用于**: 表和超级表。 - -**详细说明**: -- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 -- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): - - "user_input": "[1, 3, 5, 7]" - 用户指定 bin 的具体数值。 - - - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, - 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 - - - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, - 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 -- normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 - - -### PERCENTILE - -```sql -PERCENTILE(expr, p [, p1] ... ) -``` - -**功能说明**:统计表中某列的值百分比分位数。 - -**返回数据类型**: 该函数最小参数个数为 2 个,最大参数个数为 11 个。可以最多同时返回 10 个百分比分位数。当参数个数为 2 时, 返回一个分位数, 类型为DOUBLE,当参数个数大于 2 时,返回类型为VARCHAR, 格式为包含多个返回值的JSON数组。 - -**应用字段**:数值类型。 - -**适用于**:表。 - -**使用说明**: - -- *P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX; -- 同时计算针对同一列的多个分位数时,建议使用一个PERCENTILE函数和多个参数的方式,能很大程度上降低查询的响应时间。 - 比如,使用查询SELECT percentile(col, 90, 95, 99) FROM table, 性能会优于SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table。 - - -## 选择函数 - -选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。 - -### BOTTOM - -```sql -BOTTOM(expr, k) -``` - -**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- *k*值取值范围 1≤*k*≤100; -- 系统同时返回该记录关联的时间戳列; -- 限制:BOTTOM 函数不支持 FILL 子句。 - -### FIRST - -```sql -FIRST(expr) -``` - -**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:所有字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*); -- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL; -- 如果结果集中所有列全部为 NULL 值,则不返回结果。 - -### INTERP - -```sql -INTERP(expr [, ignore_null_values]) - -ignore_null_values: { - 0 - | 1 -} -``` - -**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。 - -**返回数据类型**:同字段类型。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明** - -- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 -- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 -- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。 -- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 -- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. -- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) -- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear). -- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 -- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 -- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 - -### LAST - -```sql -LAST(expr) -``` - -**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:所有字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); -- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 -- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 - - -### LAST_ROW - -```sql -LAST_ROW(expr) -``` - -**功能说明**:返回表/超级表的最后一条记录。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:所有字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 -- 不能与 INTERVAL 一起使用。 - -### MAX - -```sql -MAX(expr) -``` - -**功能说明**:统计表/超级表中某列的值最大值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### MIN - -```sql -MIN(expr) -``` - -**功能说明**:统计表/超级表中某列的值最小值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### MODE - -```sql -MODE(expr) -``` - -**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,则随机输出其中某个值。 - -**返回数据类型**:与输入数据类型一致。 - -**适用数据类型**:全部类型字段。 - -**适用于**:表和超级表。 - - -### SAMPLE - -```sql -SAMPLE(expr, k) -``` - -**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - -**返回结果类型**: 同原始数据类型。 - -**适用数据类型**: 全部类型字段。 - -**嵌套子查询支持**: 适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - - -### TAIL - -```sql -TAIL(expr, k [, offset_rows]) -``` - -**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 - -**参数范围**:k: [1,100] offset_val: [0,100]。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:适合于除时间主键列外的任何类型。 - -**适用于**:表、超级表。 - - -### TOP - -```sql -TOP(expr, k) -``` - -**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- *k*值取值范围 1≤*k*≤100; -- 系统同时返回该记录关联的时间戳列; -- 限制:TOP 函数不支持 FILL 子句。 - -### UNIQUE - -```sql -UNIQUE(expr) -``` - -**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:全部类型字段。 - -**适用于**: 表和超级表。 - - -## 时序数据特有函数 - -时序数据特有函数是 TDengine 为了满足时序数据的查询场景而量身定做出来的。在通用数据库中,实现类似功能通常需要复杂的查询语法,且效率很低。TDengine 以函数的方式内置了这些功能,最大程度的减轻了用户的使用成本。 - -### CSUM - -```sql -CSUM(expr) -``` - -**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。 - -**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**: 适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 -- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - - -### DERIVATIVE - -```sql -DERIVATIVE(expr, time_interval, ignore_negative) - -ignore_negative: { - 0 - | 1 -} -``` - -**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 - -### DIFF - -```sql -DIFF(expr [, ignore_negative]) - -ignore_negative: { - 0 - | 1 -} -``` - -**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。 - -**返回数据类型**:同应用字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 输出结果行数是范围内总行数减一,第一行没有结果输出。 -- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 - - -### IRATE - -```sql -IRATE(expr) -``` - -**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### MAVG - -```sql -MAVG(expr, k) -``` - -**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - -**返回结果类型**: DOUBLE。 - -**适用数据类型**: 数值类型。 - -**嵌套子查询支持**: 适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - - -### STATECOUNT - -```sql -STATECOUNT(expr, oper, val) -``` - -**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 - -**参数范围**: - -- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。 -- val : 数值型 - -**返回结果类型**:INTEGER。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:不支持应用在子查询上。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 - - -### STATEDURATION - -```sql -STATEDURATION(expr, oper, val, unit) -``` - -**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 - -**参数范围**: - -- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。 -- val : 数值型 -- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。 - -**返回结果类型**:INTEGER。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:不支持应用在子查询上。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 - - -### TWA - -```sql -TWA(expr) -``` - -**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -## 系统信息函数 - -### DATABASE - -```sql -SELECT DATABASE(); -``` - -**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。 - - -### CLIENT_VERSION - -```sql -SELECT CLIENT_VERSION(); -``` - -**说明**:返回客户端版本。 - -### SERVER_VERSION - -```sql -SELECT SERVER_VERSION(); -``` - -**说明**:返回服务端版本。 - -### SERVER_STATUS - -```sql -SELECT SERVER_STATUS(); -``` - -**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。 - -### CURRENT_USER - -```sql -SELECT CURRENT_USER(); -``` - -**说明**:获取当前用户。 - - -## Geometry 函数 - -### Geometry 输入函数: - -#### ST_GeomFromText - -```sql -ST_GeomFromText(VARCHAR WKT expr) -``` - -**功能说明**:根据 Well-Known Text (WKT) 表示从指定的几何值创建几何数据。 - -**返回值类型**:GEOMETRY - -**适用数据类型**:VARCHAR - -**适用表类型**:标准表和超表 - -**使用说明**:输入可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。输出是以二进制字符串形式定义的 GEOMETRY 数据类型。 - -### Geometry 输出函数: - -#### ST_AsText - -```sql -ST_AsText(GEOMETRY geom) -``` - -**功能说明**:从几何数据中返回指定的 Well-Known Text (WKT) 表示。 - -**返回值类型**:VARCHAR - -**适用数据类型**:GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:输出可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。 - -### Geometry 关系函数: - -#### ST_Intersects - -```sql -ST_Intersects(GEOMETRY geomA, GEOMETRY geomB) -``` - -##功能说明**:比较两个几何对象,并在它们相交时返回 true。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:如果两个几何对象有任何一个共享点,则它们相交。 - -#### ST_Equals - -```sql -ST_Equals(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果给定的几何对象是"空间相等"的,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:"空间相等"意味着 ST_Contains(A,B) = true 和 ST_Contains(B,A) = true,并且点的顺序可能不同,但表示相同的几何结构。 - -#### ST_Touches - -```sql -ST_Touches(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 A 和 B 相交,但它们的内部不相交,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:A 和 B 至少有一个公共点,并且这些公共点位于至少一个边界中。对于点/点输入,关系始终为 FALSE,因为点没有边界。 - -#### ST_Covers - -```sql -ST_Covers(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 B 中的每个点都位于几何形状 A 内部(与内部或边界相交),则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:A 包含 B 意味着 B 中的没有点位于 A 的外部(在外部)。 - -#### ST_Contains - -```sql -ST_Contains(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 A 包含 B,描述:如果几何形状 A 包含几何形状 B,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:A 包含 B 当且仅当 B 的所有点位于 A 的内部(即位于内部或边界上)(或等效地,B 的没有点位于 A 的外部),并且 A 和 B 的内部至少有一个公共点。 - -#### ST_ContainsProperly - -```sql -ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 B 的每个点都位于 A 内部,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:B 的没有点位于 A 的边界或外部。 +--- +sidebar_label: 函数 +title: 函数 +description: TDengine 支持的函数列表 +toc_max_heading_level: 4 +--- + +## 单行函数 + +单行函数为查询结果中的每一行返回一个结果行。 + +### 数学函数 + +#### ABS + +```sql +ABS(expr) +``` + +**功能说明**:获得指定字段的绝对值。 + +**返回结果类型**:与指定字段的原始数据类型一致。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### ACOS + +```sql +ACOS(expr) +``` + +**功能说明**:获得指定字段的反余弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### ASIN + +```sql +ASIN(expr) +``` + +**功能说明**:获得指定字段的反正弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### ATAN + +```sql +ATAN(expr) +``` + +**功能说明**:获得指定字段的反正切结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### CEIL + +```sql +CEIL(expr) +``` + +**功能说明**:获得指定字段的向上取整数的结果。 + +**返回结果类型**:与指定字段的原始数据类型一致。 + +**适用数据类型**:数值类型。 + +**适用于**: 表和超级表。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**使用说明**: 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### COS + +```sql +COS(expr) +``` + +**功能说明**:获得指定字段的余弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### FLOOR + +```sql +FLOOR(expr) +``` + +**功能说明**:获得指定字段的向下取整数的结果。 + 其他使用说明参见 CEIL 函数描述。 + +#### LOG + +```sql +LOG(expr1[, expr2]) +``` + +**功能说明**:获得 expr1 对于底数 expr2 的对数。如果 expr2 参数省略,则返回指定字段的自然对数值。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### POW + +```sql +POW(expr1, expr2) +``` + +**功能说明**:获得 expr1 的指数为 expr2 的幂。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### ROUND + +```sql +ROUND(expr) +``` + +**功能说明**:获得指定字段的四舍五入的结果。 + 其他使用说明参见 CEIL 函数描述。 + + +#### SIN + +```sql +SIN(expr) +``` + +**功能说明**:获得指定字段的正弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### SQRT + +```sql +SQRT(expr) +``` + +**功能说明**:获得指定字段的平方根。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### TAN + +```sql +TAN(expr) +``` + +**功能说明**:获得指定字段的正切结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +### 字符串函数 + +字符串函数的输入参数为字符串类型,返回结果为数值类型或字符串类型。 + +#### CHAR_LENGTH + +```sql +CHAR_LENGTH(expr) +``` + +**功能说明**:以字符计数的字符串长度。 + +**返回结果类型**:BIGINT。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +#### CONCAT + +```sql +CONCAT(expr1, expr2 [, expr] ... ) +``` + +**功能说明**:字符串连接函数。 + +**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 + +**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### CONCAT_WS + +```sql +CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...) +``` + +**功能说明**:带分隔符的字符串连接函数。 + +**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 + +**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### LENGTH + +```sql +LENGTH(expr) +``` + +**功能说明**:以字节计数的长度。 + +**返回结果类型**:BIGINT。 + +**适用数据类型**:VARCHAR, NCHAR, VARBINARY。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### LOWER + +```sql +LOWER(expr) +``` + +**功能说明**:将字符串参数值转换为全小写字母。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### LTRIM + +```sql +LTRIM(expr) +``` + +**功能说明**:返回清除左边空格后的字符串。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### RTRIM + +```sql +RTRIM(expr) +``` + +**功能说明**:返回清除右边空格后的字符串。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### SUBSTR + +```sql +SUBSTR(expr, pos [,len]) +``` + +**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。输入参数 pos 可以为正数,也可以为负数。如果 pos 是正数,表示开始位置从字符串开头正数计算。如果 pos 为负数,表示开始位置从字符串结尾倒数计算。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### UPPER + +```sql +UPPER(expr) +``` + +**功能说明**:将字符串参数值转换为全大写字母。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +### 转换函数 + +转换函数将值从一种数据类型转换为另一种数据类型。 + +#### CAST + +```sql +CAST(expr AS type_name) +``` + +**功能说明**:数据类型转换函数,返回 expr 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。 + +**返回结果类型**:CAST 中指定的类型(type_name)。 + +**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**: + +- 对于不能支持的类型转换会直接报错。 +- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: + 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 + 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 + 3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。 + +#### TO_ISO8601 + +```sql +TO_ISO8601(expr [, timezone]) +``` + +**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 + +**返回结果数据类型**:VARCHAR 类型。 + +**适用数据类型**:INTEGER, TIMESTAMP。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**: + +- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 +- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; +- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 + + +#### TO_JSON + +```sql +TO_JSON(str_literal) +``` + +**功能说明**: 将字符串常量转换为 JSON 类型。 + +**返回结果数据类型**: JSON。 + +**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### TO_UNIXTIMESTAMP + +```sql +TO_UNIXTIMESTAMP(expr [, return_timestamp]) + +return_timestamp: { + 0 + | 1 +} +``` + +**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 + +**返回结果数据类型**:BIGINT, TIMESTAMP。 + +**应用字段**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- return_timestamp 指定函数返回值是否为时间戳类型,设置为1时返回 TIMESTAMP 类型,设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。 + +#### TO_CHAR + +```sql +TO_CHAR(ts, format_str_literal) +``` + +**功能说明**: 将timestamp类型按照指定格式转换为字符串 + +**返回结果数据类型**: VARCHAR + +**应用字段**: TIMESTAMP + +**嵌套子查询支持**: 适用于内层查询和外层查询 + +**适用于**: 表和超级表 + +**支持的格式** + +| **格式** | **说明**| **例子** | +| --- | --- | --- | +|AM,am,PM,pm| 无点分隔的上午下午 | 07:00:00am| +|A.M.,a.m.,P.M.,p.m.| 有点分隔的上午下午| 07:00:00a.m.| +|YYYY,yyyy|年, 4个及以上数字| 2023-10-10| +|YYY,yyy| 年, 最后3位数字| 023-10-10| +|YY,yy| 年, 最后2位数字| 23-10-10| +|Y,y|年, 最后一位数字| 3-10-10| +|MONTH|月, 全大写| 2023-JANUARY-01| +|Month|月, 首字母大写| 2023-January-01| +|month|月, 全小写| 2023-january-01| +|MON| 月, 缩写, 全大写(三个字符)| JAN, SEP| +|Mon| 月, 缩写, 首字母大写| Jan, Sep| +|mon|月, 缩写, 全小写| jan, sep| +|MM,mm|月, 数字 01-12|2023-01-01| +|DD,dd|月日, 01-31|| +|DAY|周日, 全大写|MONDAY| +|Day|周日, 首字符大写|Monday| +|day|周日, 全小写|monday| +|DY|周日, 缩写, 全大写|MON| +|Dy|周日, 缩写, 首字符大写|Mon| +|dy|周日, 缩写, 全小写|mon| +|DDD|年日, 001-366|| +|D,d|周日, 数字, 1-7, Sunday(1) to Saturday(7)|| +|HH24,hh24|小时, 00-23|2023-01-30 23:59:59| +|hh12,HH12, hh, HH| 小时, 01-12|2023-01-30 12:59:59PM| +|MI,mi|分钟, 00-59|| +|SS,ss|秒, 00-59|| +|MS,ms|毫秒, 000-999|| +|US,us|微秒, 000000-999999|| +|NS,ns|纳秒, 000000000-999999999|| +|TZH,tzh|时区小时|2023-01-30 11:59:59PM +08| + +**使用说明**: +- `Month`, `Day`等的输出格式是左对齐的, 右侧添加空格, 如`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, 9月是月份中英文字母数最长的, 因此9月没有空格. 星期类似. +- 使用`ms`, `us`, `ns`时, 以上三种格式的输出只在精度上不同, 比如ts为 `1697182085123`, `ms` 的输出为 `123`, `us` 的输出为 `123000`, `ns` 的输出为 `123000000`. +- 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`. +- 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换. +- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区. +- 输入时间戳的精度由所查询表的精度确定, 若未指定表, 则精度为毫秒. + +#### TO_TIMESTAMP + +```sql +TO_TIMESTAMP(ts_str_literal, format_str_literal) +``` + +**功能说明**: 将字符串按照指定格式转化为时间戳. + +**返回结果数据类型**: TIMESTAMP + +**应用字段**: VARCHAR + +**嵌套子查询支持**: 适用于内层查询和外层查询 + +**适用于**: 表和超级表 + +**支持的格式**: 与`to_char`相同 + +**使用说明**: +- 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳. +- `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`. +- 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`. +- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。 +- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分. 暂不支持只指定年日而不指定月日的格式, 如'yyyy-mm-DDD', 支持'yyyy-mm-DD'. +- 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12. +- `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换. +- 输出时间戳的精度与查询表的精度相同, 若查询未指定表, 则输出精度为毫秒. 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')`的输出将会把微妙和纳秒进行截断. 如果指定一张纳秒表, 那么就不会发生截断, 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`. + + +### 时间和日期函数 + +时间和日期函数对时间戳类型进行操作。 + +所有返回当前时间的函数,如NOW、TODAY和TIMEZONE,在一条SQL语句中不论出现多少次都只会被计算一次。 + +#### NOW + +```sql +NOW() +``` + +**功能说明**:返回客户端当前系统时间。 + +**返回结果数据类型**:TIMESTAMP。 + +**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 + +**适用于**:表和超级表。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**使用说明**: + +- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下: + b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 + + +#### TIMEDIFF + +```sql +TIMEDIFF(expr1, expr2 [, time_unit]) +``` + +**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 + +**返回结果数据类型**:BIGINT。 + +**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 + +**适用于**:表和超级表。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**使用说明**: +- 支持的时间单位 time_unit 如下: + 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 +- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 + + +#### TIMETRUNCATE + +```sql +TIMETRUNCATE(expr, time_unit [, use_current_timezone]) + +use_current_timezone: { + 0 + | 1 +} +``` + +**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。 + +**返回结果数据类型**:TIMESTAMP。 + +**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 + +**适用于**:表和超级表。 + +**使用说明**: +- 支持的时间单位 time_unit 如下: + 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 +- 当使用 1d/1w 作为时间单位对时间戳进行截断时, 可通过设置 use_current_timezone 参数指定是否根据当前时区进行截断处理。 + 值 0 表示使用 UTC 时区进行截断,值 1 表示使用当前时区进行截断。 + 例如客户端所配置时区为 UTC+0800, 则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为东八区时间 '2020-01-01 08:00:00'。 + 而使用 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) 时,返回结果为东八区时间 '2020-01-01 00:00:00'。 + 当不指定 use_current_timezone 时,use_current_timezone 默认值为 1 。 + + + +#### TIMEZONE + +```sql +TIMEZONE() +``` + +**功能说明**:返回客户端当前时区信息。 + +**返回结果数据类型**:VARCHAR。 + +**应用字段**:无 + +**适用于**:表和超级表。 + + +#### TODAY + +```sql +TODAY() +``` + +**功能说明**:返回客户端当日零时的系统时间。 + +**返回结果数据类型**:TIMESTAMP。 + +**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下: + b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 + + +## 聚合函数 + +聚合函数为查询结果集的每一个分组返回单个结果行。可以由 GROUP BY 或窗口切分子句指定分组,如果没有,则整个查询结果集视为一个分组。 + +TDengine 支持针对数据的聚合查询。提供如下聚合函数。 + +### APERCENTILE + +```sql +APERCENTILE(expr, p [, algo_type]) + +algo_type: { + "default" + | "t-digest" +} +``` + +**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。 + +**返回数据类型**: DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**说明**: +- p值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。 +- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。 +- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。 + +### AVG + +```sql +AVG(expr) +``` + +**功能说明**:统计指定字段的平均值。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### COUNT + +```sql +COUNT({* | expr}) +``` + +**功能说明**:统计指定字段的记录行数。 + +**返回数据类型**:BIGINT。 + +**适用数据类型**:全部类型字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。 +- 如果统计字段是具体的列,则返回该列中非 NULL 值的记录数量。 + + +### ELAPSED + +```sql +ELAPSED(ts_primary_key [, time_unit]) +``` + +**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:TIMESTAMP。 + +**适用于**: 表,超级表,嵌套查询的外层查询 + +**说明**: +- ts_primary_key参数只能是表的第一列,即 TIMESTAMP 类型的主键列。 +- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下: + 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 +- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 +- order by asc/desc不影响差值的计算结果。 +- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 +- 对于普通表,不支持和group by子句组合使用。 +- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 +- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 + + +### LEASTSQUARES + +```sql +LEASTSQUARES(expr, start_val, step_val) +``` + +**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 + +**返回数据类型**:字符串表达式(斜率, 截距)。 + +**适用数据类型**:expr 必须是数值类型。 + +**适用于**:表。 + + +### SPREAD + +```sql +SPREAD(expr) +``` + +**功能说明**:统计表中某列的最大值和最小值之差。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:INTEGER, TIMESTAMP。 + +**适用于**:表和超级表。 + + +### STDDEV + +```sql +STDDEV(expr) +``` + +**功能说明**:统计表中某列的均方差。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### SUM + +```sql +SUM(expr) +``` + +**功能说明**:统计表/超级表中某列的和。 + +**返回数据类型**:DOUBLE, BIGINT。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### HYPERLOGLOG + +```sql +HYPERLOGLOG(expr) +``` + +**功能说明**: + - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。 + - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。 + +**返回结果类型**:INTEGER。 + +**适用数据类型**:任何类型。 + +**适用于**:表和超级表。 + + +### HISTOGRAM + +```sql +HISTOGRAM(expr,bin_type, bin_description, normalized) +``` + +**功能说明**:统计数据按照用户指定区间的分布。 + +**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为 DOUBLE 类型,否则为 BIGINT 类型。 + +**适用数据类型**:数值型字段。 + +**适用于**: 表和超级表。 + +**详细说明**: +- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" + 用户指定 bin 的具体数值。 + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, + 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, + 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 +- normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 + + +### PERCENTILE + +```sql +PERCENTILE(expr, p [, p1] ... ) +``` + +**功能说明**:统计表中某列的值百分比分位数。 + +**返回数据类型**: 该函数最小参数个数为 2 个,最大参数个数为 11 个。可以最多同时返回 10 个百分比分位数。当参数个数为 2 时, 返回一个分位数, 类型为DOUBLE,当参数个数大于 2 时,返回类型为VARCHAR, 格式为包含多个返回值的JSON数组。 + +**应用字段**:数值类型。 + +**适用于**:表。 + +**使用说明**: + +- *P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX; +- 同时计算针对同一列的多个分位数时,建议使用一个PERCENTILE函数和多个参数的方式,能很大程度上降低查询的响应时间。 + 比如,使用查询SELECT percentile(col, 90, 95, 99) FROM table, 性能会优于SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table。 + + +## 选择函数 + +选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。 + +### BOTTOM + +```sql +BOTTOM(expr, k) +``` + +**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- *k*值取值范围 1≤*k*≤100; +- 系统同时返回该记录关联的时间戳列; +- 限制:BOTTOM 函数不支持 FILL 子句。 + +### FIRST + +```sql +FIRST(expr) +``` + +**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:所有字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*); +- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL; +- 如果结果集中所有列全部为 NULL 值,则不返回结果。 + +### INTERP + +```sql +INTERP(expr [, ignore_null_values]) + +ignore_null_values: { + 0 + | 1 +} +``` + +**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。 + +**返回数据类型**:同字段类型。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明** + +- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 +- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 +- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。 +- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 +- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. +- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) +- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear). +- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 +- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 +- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 + +### LAST + +```sql +LAST(expr) +``` + +**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:所有字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); +- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 +- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 + + +### LAST_ROW + +```sql +LAST_ROW(expr) +``` + +**功能说明**:返回表/超级表的最后一条记录。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:所有字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 +- 不能与 INTERVAL 一起使用。 + +### MAX + +```sql +MAX(expr) +``` + +**功能说明**:统计表/超级表中某列的值最大值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### MIN + +```sql +MIN(expr) +``` + +**功能说明**:统计表/超级表中某列的值最小值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### MODE + +```sql +MODE(expr) +``` + +**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,则随机输出其中某个值。 + +**返回数据类型**:与输入数据类型一致。 + +**适用数据类型**:全部类型字段。 + +**适用于**:表和超级表。 + + +### SAMPLE + +```sql +SAMPLE(expr, k) +``` + +**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 + +**返回结果类型**: 同原始数据类型。 + +**适用数据类型**: 全部类型字段。 + +**嵌套子查询支持**: 适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + + +### TAIL + +```sql +TAIL(expr, k [, offset_rows]) +``` + +**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 + +**参数范围**:k: [1,100] offset_val: [0,100]。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:适合于除时间主键列外的任何类型。 + +**适用于**:表、超级表。 + + +### TOP + +```sql +TOP(expr, k) +``` + +**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- *k*值取值范围 1≤*k*≤100; +- 系统同时返回该记录关联的时间戳列; +- 限制:TOP 函数不支持 FILL 子句。 + +### UNIQUE + +```sql +UNIQUE(expr) +``` + +**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:全部类型字段。 + +**适用于**: 表和超级表。 + + +## 时序数据特有函数 + +时序数据特有函数是 TDengine 为了满足时序数据的查询场景而量身定做出来的。在通用数据库中,实现类似功能通常需要复杂的查询语法,且效率很低。TDengine 以函数的方式内置了这些功能,最大程度的减轻了用户的使用成本。 + +### CSUM + +```sql +CSUM(expr) +``` + +**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。 + +**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**: 适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 +- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 + + +### DERIVATIVE + +```sql +DERIVATIVE(expr, time_interval, ignore_negative) + +ignore_negative: { + 0 + | 1 +} +``` + +**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 + +### DIFF + +```sql +DIFF(expr [, ignore_negative]) + +ignore_negative: { + 0 + | 1 +} +``` + +**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。 + +**返回数据类型**:同应用字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 输出结果行数是范围内总行数减一,第一行没有结果输出。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 + + +### IRATE + +```sql +IRATE(expr) +``` + +**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### MAVG + +```sql +MAVG(expr, k) +``` + +**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 + +**返回结果类型**: DOUBLE。 + +**适用数据类型**: 数值类型。 + +**嵌套子查询支持**: 适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); +- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; + + +### STATECOUNT + +```sql +STATECOUNT(expr, oper, val) +``` + +**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 + +**参数范围**: + +- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。 +- val : 数值型 + +**返回结果类型**:INTEGER。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:不支持应用在子查询上。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 + + +### STATEDURATION + +```sql +STATEDURATION(expr, oper, val, unit) +``` + +**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 + +**参数范围**: + +- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。 +- val : 数值型 +- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。 + +**返回结果类型**:INTEGER。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:不支持应用在子查询上。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 + + +### TWA + +```sql +TWA(expr) +``` + +**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +## 系统信息函数 + +### DATABASE + +```sql +SELECT DATABASE(); +``` + +**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。 + + +### CLIENT_VERSION + +```sql +SELECT CLIENT_VERSION(); +``` + +**说明**:返回客户端版本。 + +### SERVER_VERSION + +```sql +SELECT SERVER_VERSION(); +``` + +**说明**:返回服务端版本。 + +### SERVER_STATUS + +```sql +SELECT SERVER_STATUS(); +``` + +**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。 + +### CURRENT_USER + +```sql +SELECT CURRENT_USER(); +``` + +**说明**:获取当前用户。 + + +## Geometry 函数 + +### Geometry 输入函数: + +#### ST_GeomFromText + +```sql +ST_GeomFromText(VARCHAR WKT expr) +``` + +**功能说明**:根据 Well-Known Text (WKT) 表示从指定的几何值创建几何数据。 + +**返回值类型**:GEOMETRY + +**适用数据类型**:VARCHAR + +**适用表类型**:标准表和超表 + +**使用说明**:输入可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。输出是以二进制字符串形式定义的 GEOMETRY 数据类型。 + +### Geometry 输出函数: + +#### ST_AsText + +```sql +ST_AsText(GEOMETRY geom) +``` + +**功能说明**:从几何数据中返回指定的 Well-Known Text (WKT) 表示。 + +**返回值类型**:VARCHAR + +**适用数据类型**:GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:输出可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。 + +### Geometry 关系函数: + +#### ST_Intersects + +```sql +ST_Intersects(GEOMETRY geomA, GEOMETRY geomB) +``` + +##功能说明**:比较两个几何对象,并在它们相交时返回 true。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:如果两个几何对象有任何一个共享点,则它们相交。 + +#### ST_Equals + +```sql +ST_Equals(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果给定的几何对象是"空间相等"的,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:"空间相等"意味着 ST_Contains(A,B) = true 和 ST_Contains(B,A) = true,并且点的顺序可能不同,但表示相同的几何结构。 + +#### ST_Touches + +```sql +ST_Touches(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 A 和 B 相交,但它们的内部不相交,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 和 B 至少有一个公共点,并且这些公共点位于至少一个边界中。对于点/点输入,关系始终为 FALSE,因为点没有边界。 + +#### ST_Covers + +```sql +ST_Covers(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 B 中的每个点都位于几何形状 A 内部(与内部或边界相交),则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 包含 B 意味着 B 中的没有点位于 A 的外部(在外部)。 + +#### ST_Contains + +```sql +ST_Contains(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 A 包含 B,描述:如果几何形状 A 包含几何形状 B,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 包含 B 当且仅当 B 的所有点位于 A 的内部(即位于内部或边界上)(或等效地,B 的没有点位于 A 的外部),并且 A 和 B 的内部至少有一个公共点。 + +#### ST_ContainsProperly + +```sql +ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 B 的每个点都位于 A 内部,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:B 的没有点位于 A 的边界或外部。 diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md index 98bfd3567a..8ae3f900f4 100755 --- a/docs/zh/12-taos-sql/12-distinguished.md +++ b/docs/zh/12-taos-sql/12-distinguished.md @@ -16,7 +16,11 @@ TDengine 提供的特色查询包括数据切分查询和时间窗口切分查 PARTITION BY part_list ``` -part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。 +part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。例如,将数据按标签 location 进行分组,取每个分组内的电压平均值: +```sql +select location, avg(voltage) from meters partition by location +``` + TDengine 按如下方式处理数据切分子句: @@ -25,9 +29,13 @@ TDengine 按如下方式处理数据切分子句: - 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。 ```sql -select max(current) from meters partition by location interval(10m) +select _wstart, location, max(current) from meters partition by location interval(10m) ``` -数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。 +数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。例如,统计每个电表每 10 分钟内的电压平均值: +```sql +select _wstart, tbname, avg(voltage) from meters partition by tbname interval(10m) +``` + ## 窗口切分查询 diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index 597c188c11..bb88c2dede 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -81,13 +81,13 @@ taosBenchmark -f -#### 订阅场景 JSON 配置文件示例 +#### 订阅场景 JSON 配置文件示例
-subscribe.json +tmq.json ```json -{{#include /taos-tools/example/subscribe.json}} +{{#include /taos-tools/example/tmq.json}} ```
diff --git a/docs/zh/20-third-party/75-powerbi.md b/docs/zh/20-third-party/75-powerbi.md new file mode 100644 index 0000000000..255b23402b --- /dev/null +++ b/docs/zh/20-third-party/75-powerbi.md @@ -0,0 +1,50 @@ +--- +sidebar_label: Power BI +title: Power BI +description: 如何使用 Power BI 和 TDengine 进行时序数据分析 +--- + +# 如何使用 Power BI 和 TDengine 进行时序数据分析 + +## 方案介绍 + +使用 ODBC 连接器,Power BI 可以快速的访问 TDengine。您可以将标签数据、原始时序数据或按时间聚合后的时序数据从 TDengine 导入到 Power BI,制作报表或仪表盘,整个过程不需要任何的代码编写过程。 + +### 整体步骤 +![Power BI use step](./powerbi-step-zh.webp) + +### 前置要求 +1. TDengine 服务端软件已经安装并运行 +2. Power BI Desktop 软件已经安装并运行(如未安装,请从[官方地址](https://www.microsoft.com/zh-cn/download/details.aspx?id=58494)下载最新的 Windows X64 版本)。 + + +### 安装驱动 +从 TDengine 官网下载最新的 Windows X64 客户端驱动程序 [下载地址](https://docs.taosdata.com/get-started/package/),并安装在 Power BI 运行的机器上 + +### 配置数据源 +请参考 [ODBC](../../connector/odbc) 配置Websocket数据源。 + +### 导入 TDengine 数据到 Power BI +1. 打开 Power BI 并登录后,通过如下步骤添加数据源,“主页” -> “获取数据” -> “其他” -> “ODBC” -> “连接” +2. 选择数据源名称后,连接到配置好的数据源,进入导航器,浏览对应数据库的数据表并加载 +3. 如果需要输入 SQL 语句,可以点击“高级选项”,在展开的对话框中输入并加载数据 + + +为了更好的使用 Power BI 分析 TDengine 中的数据,您需要理解维度、度量、时序、相关性的概念,然后通过自定义的 SQL 语句导入数据。 +1. 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 `select distinct tbname, tag1, tag2 from supertable` 的 SQL 语法快速获得维度信息。 +2. 度量:可以用于进行计算的定量(数值)字段, 常见计算有求和、平均值和最小值等。如果测点的采集频率为秒,那么一年就有 31,536,000 条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中,您可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入到 Power BI 中,具体语法参考 [TDengine 特色查询功能介绍](https://docs.taosdata.com/taos-sql/distinguished/)。 + - 窗口切分查询:比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值,这种场景下可以使用窗口子句来获得需要的降采样查询结果,对应的 SQL 语句形如 `select tbname, _wstart date,avg(temperature) temp from table interval(10m)` ,其中 _wstart 是伪列,表示时间窗口起始时间,10m 表示时间窗口的持续时间,`avg(temperature)` 表示时间窗口内的聚合值。 + - 数据切分查询:如果需要同时获取很多温度传感器的聚合数值,可对数据进行切分然后在切分出的数据空间内再进行一系列的计算,对应的 SQL 语法参考 `partition by part_list`。数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,将每个子表的数据独立出来,形成一条条独立的时间序列,方便各种时序场景的统计分析。 +3. 时序:在绘制曲线或者按照时间聚合数据时,通常需要引入日期表。日期表可以从 Excel 表格中导入,也可以在 TDengine 中执行 SQL 语句获取,例如 `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`,其中 fill 字句表示数据缺失情况下的填充模式,伪列_wstart 则为要获取的日期列。 +4. 相关性:告诉数据之间如何关联,度量和维度可以通过 tbname 列关联在一起,日期表和度量则可以通过 date 列关联,配合形成可视化报表。 + +### 智能电表样例 +TDengine 有自己独特的数据模型,它使用超级表作为模板,为每个设备创建一个表,每个表最多可创建 4096 个数据列和 128 个标签列。在智能电表样例中,假如一个电表每秒产生一条记录,一天就有 86,400 条记录,一年就有 31,536,000 条记录,1000 个电表将占用 600 GB 原始磁盘空间。因此,Power BI 更多的应用方式是将标签列映射为维度列,数据列的聚合结果导入为度量列,最终为关键决策制定者提供所需的指标。 +1. 导入维度数据 +在 Power BI 中导入表的标签列,取名为 tags,SQL 如下 +select distinct tbname, groupid, location from test.meters; +2. 导入度量数据 +在 Power BI 中,按照 1 小时的时间窗口,导入每个电表的电流均值、电压均值、相位均值,取名为 data,SQL 如下 +`select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)` ; +3. 建立维度和度量的关联关系 +在 Power BI 中,打开模型视图,建立表 tags 和 data 的关联关系,将 tbname 设置为关联数据列。之后,就可以在柱状图、饼图等控件中使用这些数据。更多有关 Power BI 构建视觉效果的信息,请查询 [Power BI 文档](https://learn.microsoft.com/zh-cn/power-bi/)。 \ No newline at end of file diff --git a/docs/zh/20-third-party/powerbi-step-zh.webp b/docs/zh/20-third-party/powerbi-step-zh.webp new file mode 100644 index 0000000000..16e48e8aaa Binary files /dev/null and b/docs/zh/20-third-party/powerbi-step-zh.webp differ diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index cfc4b92eee..b0a81e01a1 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.2.2.0 + + + ## 3.2.1.0 diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 85c4108299..a77f44eae9 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -74,6 +74,7 @@ extern int64_t tsRpcQueueMemoryAllowed; extern int32_t tsElectInterval; extern int32_t tsHeartbeatInterval; extern int32_t tsHeartbeatTimeout; +extern int32_t tsSnapReplMaxWaitN; // vnode extern int64_t tsVndCommitMaxIntervalMs; @@ -229,8 +230,8 @@ int32_t taosCfgDynamicOptions(SConfig *pCfg, char *name, bool forServer); struct SConfig *taosGetCfg(); -void taosSetAllDebugFlag(int32_t flag, bool rewrite); -void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite); +void taosSetAllDebugFlag(int32_t flag); +void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal); void taosLocalCfgForbiddenToChange(char *name, bool *forbidden); int8_t taosGranted(); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 424bdf088f..20f64c1f79 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -32,6 +32,7 @@ extern "C" { #endif /* ------------------------ MESSAGE DEFINITIONS ------------------------ */ + #define TD_MSG_NUMBER_ #undef TD_MSG_DICT_ #undef TD_MSG_INFO_ @@ -357,7 +358,7 @@ typedef enum ENodeType { QUERY_NODE_SHOW_USER_PRIVILEGES_STMT, QUERY_NODE_SHOW_VIEWS_STMT, QUERY_NODE_SHOW_COMPACTS_STMT, - QUERY_NODE_SHOW_COMPACT_DETAILS_STMT, + QUERY_NODE_SHOW_COMPACT_DETAILS_STMT, // logic plan node QUERY_NODE_LOGIC_PLAN_SCAN = 1000, @@ -1700,7 +1701,7 @@ typedef struct { int32_t vgId; int32_t dnodeId; int32_t numberFileset; - int32_t finished; + int32_t finished; } SQueryCompactProgressRsp; int32_t tSerializeSQueryCompactProgressRsp(void* buf, int32_t bufLen, SQueryCompactProgressRsp* pReq); @@ -2544,12 +2545,6 @@ _err: return NULL; } -// this message is sent from mnode to mnode(read thread to write thread), -// so there is no need for serialization or deserialization -typedef struct { - SHashObj* rebSubHash; // SHashObj -} SMqDoRebalanceMsg; - typedef struct { int64_t streamId; int64_t checkpointId; diff --git a/include/common/tmsgcb.h b/include/common/tmsgcb.h index 311bffb7da..dd1e54318e 100644 --- a/include/common/tmsgcb.h +++ b/include/common/tmsgcb.h @@ -53,9 +53,12 @@ typedef struct { void* mgmt; void* clientRpc; void* serverRpc; + void* statusRpc; + void* syncRpc; PutToQueueFp putToQueueFp; GetQueueSizeFp qsizeFp; SendReqFp sendReqFp; + SendReqFp sendSyncReqFp; SendRspFp sendRspFp; RegisterBrokenLinkArgFp registerBrokenLinkArgFp; ReleaseHandleFp releaseHandleFp; @@ -67,6 +70,7 @@ void tmsgSetDefault(const SMsgCb* msgcb); int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg); int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype); int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg); +int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg); void tmsgSendRsp(SRpcMsg* pMsg); void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg); void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type); diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 5ad66c64e7..24ad5abded 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -38,7 +38,7 @@ #undef TD_NEW_MSG_SEG #undef TD_DEF_MSG_TYPE #undef TD_CLOSE_MSG_SEG - #define TD_NEW_MSG_SEG(TYPE) + #define TD_NEW_MSG_SEG(TYPE) #define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) #define TD_CLOSE_MSG_SEG(TYPE) TYPE, int32_t tMsgRangeDict[] = { @@ -76,9 +76,7 @@ #define TD_CLOSE_MSG_SEG(TYPE) enum { - - -#else +#else #undef TD_NEW_MSG_SEG #undef TD_DEF_MSG_TYPE @@ -109,7 +107,7 @@ TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_DND_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_DND_MSG) TD_NEW_MSG_SEG(TDMT_MND_MSG) // 1<<8 TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL) @@ -220,7 +218,7 @@ TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL) TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8 TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp) @@ -272,7 +270,7 @@ TD_DEF_MSG_TYPE(TDMT_VND_QUERY_COMPACT_PROGRESS, "vnode-query-compact-progress", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_KILL_COMPACT, "kill-compact", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_VND_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_VND_MSG) TD_NEW_MSG_SEG(TDMT_SCH_MSG) // 3<<8 TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL) @@ -287,7 +285,7 @@ TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_SCH_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_SCH_MSG) TD_NEW_MSG_SEG(TDMT_STREAM_MSG) //4 << 8 @@ -305,11 +303,11 @@ TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG) TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8 TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_MON_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_MON_MSG) TD_NEW_MSG_SEG(TDMT_SYNC_MSG) //6 << 8 TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL) @@ -341,8 +339,7 @@ TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_SYNC_MSG) - + TD_CLOSE_MSG_SEG(TDMT_END_SYNC_MSG) TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG) //7 << 8 TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL) @@ -352,7 +349,7 @@ TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_VND_STREAM_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_VND_STREAM_MSG) TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG) //8 << 8 TD_DEF_MSG_TYPE(TDMT_VND_TMQ_SUBSCRIBE, "vnode-tmq-subscribe", SMqRebVgReq, SMqRebVgRsp) @@ -366,13 +363,10 @@ TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_COMMITTEDINFO, "vnode-tmq-committedinfo", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_TMQ_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_TMQ_MSG) TD_NEW_MSG_SEG(TDMT_MAX_MSG) // msg end mark - TD_CLOSE_MSG_SEG(TDMT_END_MAX_MSG) - - - + TD_CLOSE_MSG_SEG(TDMT_END_MAX_MSG) #if defined(TD_MSG_NUMBER_) TDMT_MAX diff --git a/include/common/ttime.h b/include/common/ttime.h index 1dfa609064..4db25d2f71 100644 --- a/include/common/ttime.h +++ b/include/common/ttime.h @@ -75,6 +75,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) { int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision); int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval); +int64_t taosTimeGetIntervalEnd(int64_t ts, const SInterval* pInterval); int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order); int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision); @@ -105,7 +106,7 @@ int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision); /// formats array; If not NULL, [formats] will be used instead of [format] to skip parse formats again. /// @param out output buffer, should be initialized by memset /// @notes remember to free the generated formats -void taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen); +int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen); /// @brief convert a formatted timestamp string to a timestamp /// @param format must null terminated /// @param [in, out] formats, see taosTs2Char diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h index bdee3934fe..445fe0737b 100644 --- a/include/common/ttokendef.h +++ b/include/common/ttokendef.h @@ -373,6 +373,7 @@ #define TK_BATCH_SCAN 606 #define TK_NO_BATCH_SCAN 607 #define TK_SORT_FOR_GROUP 608 +#define TK_PARTITION_FIRST 609 #define TK_NK_NIL 65535 diff --git a/include/dnode/vnode/tqCommon.h b/include/dnode/vnode/tqCommon.h index 75dafcdbff..39e0c07406 100644 --- a/include/dnode/vnode/tqCommon.h +++ b/include/dnode/vnode/tqCommon.h @@ -18,6 +18,7 @@ // message process int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart); +int32_t tqStreamOneTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId); int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored); int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg); int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg); @@ -27,10 +28,14 @@ int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMs int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg); int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader); int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg); -int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader, bool restored); +int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sversion, char* msg, int32_t msgLen, + bool isLeader, bool restored); int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen); int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader); -int32_t startStreamTasks(SStreamMeta* pMeta); -int32_t resetStreamTaskStatus(SStreamMeta* pMeta); +int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks); +int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta); +int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg); +int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg); +int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* pMsg, bool fromVnode); #endif // TDENGINE_TQ_COMMON_H diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index f78b7a3126..be11d04ff8 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -210,6 +210,7 @@ void* qExtractReaderFromStreamScanner(void* scanner); int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner); int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo); +int32_t qResetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo); int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow); int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow); int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 003e9b900a..ffaad69373 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -237,9 +237,9 @@ struct SScalarParam { int32_t numOfQualified; // number of qualified elements in the final results }; -void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell); -bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry); -bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry); +#define cleanupResultRowEntry(p) p->initialized = false +#define isRowEntryCompleted(p) (p->complete) +#define isRowEntryInitialized(p) (p->initialized) typedef struct SPoint { int64_t key; diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 878990425b..f9b795c570 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -259,9 +259,13 @@ EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow* int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet); int32_t fmGetScalarFuncExecFuncs(int32_t funcId, SScalarFuncExecFuncs* pFpSet); int32_t fmGetUdafExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet); + +#ifdef BUILD_NO_CALL int32_t fmSetInvertFunc(int32_t funcId, SFuncExecFuncs* pFpSet); int32_t fmSetNormalFunc(int32_t funcId, SFuncExecFuncs* pFpSet); bool fmIsInvertible(int32_t funcId); +#endif + char* fmGetFuncName(int32_t funcId); #ifdef __cplusplus diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 06a6fd8752..5b32ec9529 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -119,6 +119,7 @@ typedef struct SScanLogicNode { bool groupOrderScan; bool onlyMetaCtbIdx; // for tag scan with no tbname bool filesetDelimited; // returned blocks delimited by fileset + bool isCountByTag; // true if selectstmt hasCountFunc & part by tag/tbname } SScanLogicNode; typedef struct SJoinLogicNode { @@ -435,6 +436,7 @@ typedef struct STableScanPhysiNode { bool assignBlockUid; int8_t igCheckUpdate; bool filesetDelimited; + bool needCountEmptyTable; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 5c5172b9cd..331679a3d3 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -126,6 +126,7 @@ typedef enum EHintOption { HINT_NO_BATCH_SCAN = 1, HINT_BATCH_SCAN, HINT_SORT_FOR_GROUP, + HINT_PARTITION_FIRST, } EHintOption; typedef struct SHintNode { @@ -363,6 +364,7 @@ typedef struct SSelectStmt { bool hasLastRowFunc; bool hasLastFunc; bool hasTimeLineFunc; + bool hasCountFunc; bool hasUdaf; bool hasStateKey; bool onlyHasKeepOrderFunc; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 090a43fdd8..993bb0a2a0 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -50,9 +50,13 @@ extern "C" { (_t)->hTaskInfo.id.streamId = 0; \ } while (0) -#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1) -#define STREAM_EXEC_START_ALL_TASKS_ID (-2) -#define STREAM_EXEC_RESTART_ALL_TASKS_ID (-3) +#define STREAM_EXEC_T_EXTRACT_WAL_DATA (-1) +#define STREAM_EXEC_T_START_ALL_TASKS (-2) +#define STREAM_EXEC_T_START_ONE_TASK (-3) +#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4) +#define STREAM_EXEC_T_STOP_ALL_TASKS (-5) +#define STREAM_EXEC_T_RESUME_TASK (-6) +#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7) typedef struct SStreamTask SStreamTask; typedef struct SStreamQueue SStreamQueue; @@ -80,14 +84,12 @@ typedef enum ETaskStatus { TASK_STATUS__HALT, // pause, but not be manipulated by user command TASK_STATUS__PAUSE, // pause TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore - TASK_STATUS__STREAM_SCAN_HISTORY, } ETaskStatus; enum { TASK_SCHED_STATUS__INACTIVE = 1, TASK_SCHED_STATUS__WAITING, TASK_SCHED_STATUS__ACTIVE, - TASK_SCHED_STATUS__FAILED, TASK_SCHED_STATUS__DROPPING, }; @@ -137,15 +139,14 @@ enum { typedef enum EStreamTaskEvent { TASK_EVENT_INIT = 0x1, TASK_EVENT_INIT_SCANHIST = 0x2, - TASK_EVENT_INIT_STREAM_SCANHIST = 0x3, - TASK_EVENT_SCANHIST_DONE = 0x4, - TASK_EVENT_STOP = 0x5, - TASK_EVENT_GEN_CHECKPOINT = 0x6, - TASK_EVENT_CHECKPOINT_DONE = 0x7, - TASK_EVENT_PAUSE = 0x8, - TASK_EVENT_RESUME = 0x9, - TASK_EVENT_HALT = 0xA, - TASK_EVENT_DROPPING = 0xB, + TASK_EVENT_SCANHIST_DONE = 0x3, + TASK_EVENT_STOP = 0x4, + TASK_EVENT_GEN_CHECKPOINT = 0x5, + TASK_EVENT_CHECKPOINT_DONE = 0x6, + TASK_EVENT_PAUSE = 0x7, + TASK_EVENT_RESUME = 0x8, + TASK_EVENT_HALT = 0x9, + TASK_EVENT_DROPPING = 0xA, } EStreamTaskEvent; typedef struct { @@ -314,6 +315,7 @@ typedef struct SCheckpointInfo { int32_t checkpointNotReadyTasks; bool dispatchCheckpointTrigger; int64_t msgVer; + int32_t transId; } SCheckpointInfo; typedef struct SStreamStatus { @@ -321,9 +323,10 @@ typedef struct SStreamStatus { int8_t taskStatus; int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set int8_t schedStatus; - int8_t keepTaskStatus; - bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it - int8_t pauseAllowed; // allowed task status to be set to be paused + int32_t schedIdleTime; // idle time before invoke again + int64_t lastExecTs; // last exec time stamp + int8_t statusBackup; + bool appendTranstateBlock; // has append the transfer state data block already int32_t timerActive; // timer is active int32_t inScanHistorySentinel; } SStreamStatus; @@ -365,7 +368,8 @@ typedef struct STaskQueue { typedef struct STaskSchedInfo { int8_t status; - void* pTimer; + tmr_h pDelayTimer; + tmr_h pIdleTimer; } STaskSchedInfo; typedef struct SSinkRecorder { @@ -461,14 +465,18 @@ struct SStreamTask { char reserve[256]; }; +typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*); + typedef struct STaskStartInfo { int64_t startTs; int64_t readyTs; int32_t tasksWillRestart; - int32_t taskStarting; // restart flag, sentinel to guard the restart procedure. - SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing - SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed + int32_t taskStarting; // restart flag, sentinel to guard the restart procedure. + SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing + SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed int64_t elapsedTime; + int32_t restartCount; // restart task counter + startComplete_fn_t completeFn; // complete callback function } STaskStartInfo; typedef struct STaskUpdateInfo { @@ -476,6 +484,11 @@ typedef struct STaskUpdateInfo { int32_t transId; } STaskUpdateInfo; +typedef struct SScanWalInfo { + int32_t scanCounter; + tmr_h scanTimer; +} SScanWalInfo; + // meta typedef struct SStreamMeta { char* path; @@ -490,9 +503,10 @@ typedef struct SStreamMeta { int32_t vgId; int64_t stage; int32_t role; + bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower. STaskStartInfo startInfo; - SRWLatch lock; - int32_t walScanCounter; + TdThreadRwlock lock; + SScanWalInfo scanInfo; void* streamBackend; int64_t streamBackendRid; SHashObj* pTaskDbUnique; @@ -535,6 +549,7 @@ typedef struct { SMsgHead head; int64_t streamId; int32_t taskId; + int32_t reqType; } SStreamTaskRunReq; struct SStreamDispatchReq { @@ -547,6 +562,7 @@ struct SStreamDispatchReq { int32_t upstreamTaskId; int32_t upstreamChildId; int32_t upstreamNodeId; + int32_t upstreamRelTaskId; int32_t blockNum; int64_t totalLen; SArray* dataLen; // SArray @@ -630,6 +646,7 @@ typedef struct { int32_t nodeId; SEpSet mgmtEps; int32_t mnodeId; + int32_t transId; int64_t expireTime; } SStreamCheckpointSourceReq; @@ -669,18 +686,18 @@ typedef struct STaskStatusEntry { int32_t statusLastDuration; // to record the last duration of current status int64_t stage; int32_t nodeId; - int64_t verStart; // start version in WAL, only valid for source task - int64_t verEnd; // end version in WAL, only valid for source task - int64_t processedVer; // only valid for source task - int32_t relatedHTask; // has related fill-history task + int64_t verStart; // start version in WAL, only valid for source task + int64_t verEnd; // end version in WAL, only valid for source task + int64_t processedVer; // only valid for source task int64_t activeCheckpointId; // current active checkpoint id - bool checkpointFailed; // denote if the checkpoint is failed or not - bool inputQChanging; // inputQ is changing or not + int32_t chkpointTransId; // checkpoint trans id + bool checkpointFailed; // denote if the checkpoint is failed or not + bool inputQChanging; // inputQ is changing or not int64_t inputQUnchangeCounter; - double inputQUsed; // in MiB + double inputQUsed; // in MiB double inputRate; - double sinkQuota; // existed quota size for sink task - double sinkDataSize; // sink to dst data size + double sinkQuota; // existed quota size for sink task + double sinkDataSize; // sink to dst data size } STaskStatusEntry; typedef struct SStreamHbMsg { @@ -692,6 +709,7 @@ typedef struct SStreamHbMsg { int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp); int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp); +void streamMetaClearHbMsg(SStreamHbMsg* pMsg); typedef struct { int64_t streamId; @@ -720,6 +738,11 @@ typedef struct SStreamTaskNodeUpdateMsg { int32_t tEncodeStreamTaskUpdateMsg(SEncoder* pEncoder, const SStreamTaskNodeUpdateMsg* pMsg); int32_t tDecodeStreamTaskUpdateMsg(SDecoder* pDecoder, SStreamTaskNodeUpdateMsg* pMsg); +typedef struct SStreamTaskState { + ETaskStatus state; + char* name; +} SStreamTaskState; + typedef struct { int64_t streamId; int32_t downstreamTaskId; @@ -755,17 +778,18 @@ SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t void streamTaskInputFail(SStreamTask* pTask); int32_t streamExecTask(SStreamTask* pTask); +int32_t streamResumeTask(SStreamTask* pTask); int32_t streamSchedExec(SStreamTask* pTask); bool streamTaskShouldStop(const SStreamTask* pStatus); bool streamTaskShouldPause(const SStreamTask* pStatus); bool streamTaskIsIdle(const SStreamTask* pTask); bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus); -char* createStreamTaskIdStr(int64_t streamId, int32_t taskId); -ETaskStatus streamTaskGetStatus(const SStreamTask* pTask, char** pStr); -const char* streamTaskGetStatusStr(ETaskStatus status); -void streamTaskResetStatus(SStreamTask* pTask); -void streamTaskSetStatusReady(SStreamTask* pTask); +char* createStreamTaskIdStr(int64_t streamId, int32_t taskId); +SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask); +const char* streamTaskGetStatusStr(ETaskStatus status); +void streamTaskResetStatus(SStreamTask* pTask); +void streamTaskSetStatusReady(SStreamTask* pTask); void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen); @@ -783,6 +807,7 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask); int32_t streamTaskClearHTaskAttr(SStreamTask* pTask); int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event); +int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, void* pFn); int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event); void streamTaskRestoreStatus(SStreamTask* pTask); @@ -800,6 +825,7 @@ int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue); // common int32_t streamRestoreParam(SStreamTask* pTask); +int32_t streamResetParamForScanHistory(SStreamTask* pTask); void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta); void streamTaskResume(SStreamTask* pTask); int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask); @@ -810,6 +836,7 @@ int32_t streamTaskReleaseState(SStreamTask* pTask); int32_t streamTaskReloadState(SStreamTask* pTask); void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId); void streamTaskOpenAllUpstreamInput(SStreamTask* pTask); +int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key); void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask); void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc); @@ -827,31 +854,36 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask); // stream task meta void streamMetaInit(); void streamMetaCleanup(); -SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage); +SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage, startComplete_fn_t fn); void streamMetaClose(SStreamMeta* streamMeta); int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey); int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded); int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); +SStreamTask* streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); -int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta); int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta); -int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key); void streamMetaStartHb(SStreamMeta* pMeta); bool streamMetaTaskInTimer(SStreamMeta* pMeta); -int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, +int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int64_t endTs, bool ready); void streamMetaRLock(SStreamMeta* pMeta); void streamMetaRUnLock(SStreamMeta* pMeta); void streamMetaWLock(SStreamMeta* pMeta); void streamMetaWUnLock(SStreamMeta* pMeta); void streamMetaResetStartInfo(STaskStartInfo* pMeta); +SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta); +void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader); +int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta); +int32_t streamMetaStartAllTasks(SStreamMeta* pMeta); +int32_t streamMetaStopAllTasks(SStreamMeta* pMeta); +int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); // checkpoint int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq); diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index e54237fe8b..cb053d2548 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -46,8 +46,8 @@ extern "C" { #define SYNC_HEARTBEAT_SLOW_MS 1500 #define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500 -#define SYNC_SNAP_RESEND_MS 1000 * 300 -#define SYNC_SNAP_TIMEOUT_MS 1000 * 1800 +#define SYNC_SNAP_RESEND_MS 1000 * 60 +#define SYNC_SNAP_TIMEOUT_MS 1000 * 300 #define SYNC_VND_COMMIT_MIN_MS 3000 diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index accb7e6f24..e7b0b25653 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -163,7 +163,8 @@ int rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc in // These functions will not be called in the child process int rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx); int rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp); -int rpcSendRecvWithTimeout(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp, int32_t timeoutMs); +int rpcSendRecvWithTimeout(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp, int8_t *epUpdated, + int32_t timeoutMs); int rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn); void *rpcAllocHandle(); void rpcSetIpWhite(void *thandl, void *arg); @@ -171,6 +172,7 @@ void rpcSetIpWhite(void *thandl, void *arg); int32_t rpcUtilSIpRangeToStr(SIpV4Range *pRange, char *buf); int32_t rpcUtilSWhiteListToStr(SIpWhiteList *pWhiteList, char **ppBuf); +int32_t rpcCvtErrCode(int32_t code); #ifdef __cplusplus } diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 06e6b8d041..b41078dfbf 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -68,6 +68,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected" #define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // #define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // +#define TSDB_CODE_RPC_NETWORK_ERROR TAOS_DEF_ERROR_CODE(0, 0x0023) @@ -355,6 +356,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL TAOS_DEF_ERROR_CODE(0, 0x03D5) #define TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x03D6) //internal #define TSDB_CODE_MND_TRANS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7) +#define TSDB_CODE_MND_TRANS_CTX_SWITCH TAOS_DEF_ERROR_CODE(0, 0x03D8) #define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF) // mnode-mq @@ -760,6 +762,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_FORMAT_ERR TAOS_DEF_ERROR_CODE(0, 0x2806) #define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_TS_ERR TAOS_DEF_ERROR_CODE(0, 0x2807) #define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_NOT_SUPPORTED TAOS_DEF_ERROR_CODE(0, 0x2808) +#define TSDB_CODE_FUNC_TO_CHAR_NOT_SUPPORTED TAOS_DEF_ERROR_CODE(0, 0x2809) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) diff --git a/include/util/tconfig.h b/include/util/tconfig.h index 07abdd223d..f2a9446700 100644 --- a/include/util/tconfig.h +++ b/include/util/tconfig.h @@ -94,7 +94,7 @@ typedef struct SConfigItem { int64_t imax; double fmax; }; - SArray *array; // SDiskCfg + SArray *array; // SDiskCfg/SLogVar } SConfigItem; typedef struct { diff --git a/include/util/tdef.h b/include/util/tdef.h index 51b0b63da2..aee20514ad 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -109,12 +109,12 @@ extern const int32_t TYPE_BYTES[21]; #define TSDB_INS_USER_STABLES_DBNAME_COLID 2 static const int64_t TICK_PER_SECOND[] = { - 1000LL, // MILLISECOND - 1000000LL, // MICROSECOND - 1000000000LL, // NANOSECOND - 0LL, // HOUR - 0LL, // MINUTE - 1LL // SECOND + 1000LL, // MILLISECOND + 1000000LL, // MICROSECOND + 1000000000LL, // NANOSECOND + 0LL, // HOUR + 0LL, // MINUTE + 1LL // SECOND }; #define TSDB_TICK_PER_SECOND(precision) \ @@ -239,8 +239,8 @@ typedef enum ELogicConditionType { #define TSDB_MAX_SQL_SHOW_LEN 1024 #define TSDB_MAX_ALLOWED_SQL_LEN (1 * 1024 * 1024u) // sql length should be less than 1mb -#define TSDB_VIEW_NAME_LEN 193 -#define TSDB_VIEW_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_VIEW_NAME_LEN + TSDB_NAME_DELIMITER_LEN) +#define TSDB_VIEW_NAME_LEN 193 +#define TSDB_VIEW_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_VIEW_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_APP_NAME_LEN TSDB_UNI_LEN #define TSDB_TB_COMMENT_LEN 1025 @@ -260,7 +260,7 @@ typedef enum ELogicConditionType { #define TSDB_PASSWORD_LEN 32 #define TSDB_USET_PASSWORD_LEN 129 #define TSDB_VERSION_LEN 32 -#define TSDB_LABEL_LEN 12 +#define TSDB_LABEL_LEN 16 #define TSDB_JOB_STATUS_LEN 32 #define TSDB_CLUSTER_ID_LEN 40 @@ -272,6 +272,7 @@ typedef enum ELogicConditionType { #define TSDB_SHOW_SCHEMA_JSON_LEN TSDB_MAX_COLUMNS * 256 #define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SHOW_SUBQUERY_LEN 1000 +#define TSDB_LOG_VAR_LEN 32 #define TSDB_TRANS_STAGE_LEN 12 #define TSDB_TRANS_TYPE_LEN 16 @@ -287,7 +288,7 @@ typedef enum ELogicConditionType { #define TSDB_ACTIVE_KEY_LEN 109 #define TSDB_CONN_ACTIVE_KEY_LEN 255 -#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE +#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE #define TSDB_SNAP_DATA_PAYLOAD_SIZE (1 * 1024 * 1024) #define TSDB_PAYLOAD_SIZE TSDB_DEFAULT_PKT_SIZE @@ -396,13 +397,13 @@ typedef enum ELogicConditionType { #define TSDB_MAX_STT_TRIGGER 1 #define TSDB_DEFAULT_SST_TRIGGER 1 #endif -#define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY -#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN) -#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2) -#define TSDB_DEFAULT_HASH_PREFIX 0 -#define TSDB_MIN_HASH_SUFFIX (2 - TSDB_TABLE_NAME_LEN) -#define TSDB_MAX_HASH_SUFFIX (TSDB_TABLE_NAME_LEN - 2) -#define TSDB_DEFAULT_HASH_SUFFIX 0 +#define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY +#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN) +#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2) +#define TSDB_DEFAULT_HASH_PREFIX 0 +#define TSDB_MIN_HASH_SUFFIX (2 - TSDB_TABLE_NAME_LEN) +#define TSDB_MAX_HASH_SUFFIX (TSDB_TABLE_NAME_LEN - 2) +#define TSDB_DEFAULT_HASH_SUFFIX 0 #define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1 #define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600 @@ -503,6 +504,10 @@ typedef struct { int32_t primary; } SDiskCfg; +typedef struct { + char name[TSDB_LOG_VAR_LEN]; +} SLogVar; + #define TMQ_SEPARATOR ':' enum { diff --git a/include/util/tlog.h b/include/util/tlog.h index 6d393bfefb..11ac0e1fae 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -67,6 +67,7 @@ extern int32_t smaDebugFlag; extern int32_t idxDebugFlag; extern int32_t tdbDebugFlag; extern int32_t sndDebugFlag; +extern int32_t simDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles); void taosCloseLog(); diff --git a/include/util/tworker.h b/include/util/tworker.h index 8508adf052..f39540d24b 100644 --- a/include/util/tworker.h +++ b/include/util/tworker.h @@ -23,7 +23,6 @@ extern "C" { #endif -typedef struct SQWorkerPool SQWorkerPool; typedef struct SWWorkerPool SWWorkerPool; typedef struct SQueueWorker { @@ -60,14 +59,14 @@ typedef struct SWWorker { SWWorkerPool *pool; } SWWorker; -typedef struct SWWorkerPool { +struct SWWorkerPool { int32_t max; // max number of workers int32_t num; int32_t nextId; // from 0 to max-1, cyclic const char *name; SWWorker *workers; TdThreadMutex mutex; -} SWWorkerPool; +}; int32_t tQWorkerInit(SQWorkerPool *pool); void tQWorkerCleanup(SQWorkerPool *pool); diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index eca75ce71a..c029b1871a 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -87,6 +87,7 @@ cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_h cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/udfd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin if [ -f "${compile_dir}/build/bin/taosadapter" ]; then cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||: diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 7d90beac1c..99a10e0285 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:18.04 +FROM ubuntu:latest WORKDIR /root diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index e93af2470a..b846cd447b 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -82,6 +82,7 @@ cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper ]; then cp %{_compiledir}/../build-taoskeeper/taoskeeper %{buildroot}%{homepath}/bin diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 02ebb182fa..ae774a3289 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -621,8 +621,7 @@ function install_share_etc() { ${csudo} cp ${script_dir}/share/srv/* ${service_config_dir} ||: } -function install_log() { - ${csudo}rm -rf ${log_dir} || : +function install_log() { ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} ${csudo}ln -sf ${log_dir} ${install_main_dir}/log @@ -935,7 +934,9 @@ function updateProduct() { install_adapter_service install_adapter_config install_keeper_service - install_keeper_config + if [ "${verMode}" != "cloud" ]; then + install_keeper_config + fi openresty_work=false @@ -1036,8 +1037,9 @@ function installProduct() { install_adapter_service install_adapter_config install_keeper_service - install_keeper_config - + if [ "${verMode}" != "cloud" ]; then + install_keeper_config + fi openresty_work=false diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 1ec83b7b0d..36298a291e 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -401,7 +401,14 @@ echo echo "Do you want to remove all the data, log and configuration files? [y/n]" read answer if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then + confirmMsg="I confirm that I would like to delete all data, log and configuration files" + echo "Please enter '${confirmMsg}' to continue" + read answer + if [ X"$answer" == X"${confirmMsg}" ]; then remove_data_and_config + else + echo "answer doesn't match, skip this step" + fi fi echo diff --git a/packaging/tools/tdengine.iss b/packaging/tools/tdengine.iss index 8676fa2c51..ca6b5a3e5f 100644 --- a/packaging/tools/tdengine.iss +++ b/packaging/tools/tdengine.iss @@ -39,6 +39,8 @@ Compression=lzma SolidCompression=yes DisableDirPage=yes Uninstallable=yes +ArchitecturesAllowed=x64 +ArchitecturesInstallIn64BitMode=x64 [Languages] Name: "chinesesimp"; MessagesFile: "compiler:Default.isl" @@ -53,6 +55,7 @@ Source: favicon.ico; DestDir: "{app}\include"; Flags: igNoreversion; Source: {#MyAppSourceDir}{#MyAppDLLName}; DestDir: "{win}\System32"; Flags: igNoreversion recursesubdirs createallsubdirs 64bit;Check:IsWin64; Source: {#MyAppSourceDir}{#MyAppCfgName}; DestDir: "{app}\cfg"; Flags: igNoreversion recursesubdirs createallsubdirs onlyifdoesntexist uninsneveruninstall Source: {#MyAppSourceDir}{#MyAppDriverName}; DestDir: "{app}\driver"; Flags: igNoreversion recursesubdirs createallsubdirs +Source: {#MyAppSourceDir}\taos_odbc\*; DestDir: "{app}\taos_odbc\"; Flags: igNoreversion recursesubdirs createallsubdirs ;Source: {#MyAppSourceDir}{#MyAppConnectorName}; DestDir: "{app}\connector"; Flags: igNoreversion recursesubdirs createallsubdirs ;Source: {#MyAppSourceDir}{#MyAppExamplesName}; DestDir: "{app}\examples"; Flags: igNoreversion recursesubdirs createallsubdirs Source: {#MyAppSourceDir}{#MyAppIncludeName}; DestDir: "{app}\include"; Flags: igNoreversion recursesubdirs createallsubdirs @@ -66,6 +69,7 @@ Source: {#MyAppSourceDir}\taosdump.exe; DestDir: "{app}"; DestName: "{#CusPrompt [run] Filename: {sys}\sc.exe; Parameters: "create taosd start= DEMAND binPath= ""C:\\TDengine\\taosd.exe --win_service""" ; Flags: runhidden Filename: {sys}\sc.exe; Parameters: "create taosadapter start= DEMAND binPath= ""C:\\TDengine\\taosadapter.exe""" ; Flags: runhidden +Filename: "{cmd}"; Parameters: "/c odbcconf /F ""C:\TDengine\taos_odbc\win_odbcinst.in"""; WorkingDir: "{app}"; Flags: runhidden; StatusMsg: "Configuring ODBC" [UninstallRun] RunOnceId: "stoptaosd"; Filename: {sys}\sc.exe; Parameters: "stop taosd" ; Flags: runhidden @@ -95,6 +99,43 @@ begin Result := Pos(';' + Param + ';', ';' + OrigPath + ';') = 0; end; +function DeleteOdbcDsnRegistry: Boolean; +var + Names: TArrayOfString; + I: Integer; + Value: String; +begin + if RegGetValueNames(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources', Names) then + begin + for I := 0 to GetArrayLength(Names) - 1 do + begin + if RegQueryStringValue(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources', Names[I], Value) then + begin + if Value = 'TDengine' then + begin + RegDeleteKeyIncludingSubkeys(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\' + Names[I]); + RegDeleteValue(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources\', Names[I]); + end; + end; + end; + end; + Result := True; +end; + +function DeleteOdbcDriverRegistry: Boolean; +begin + RegDeleteKeyIncludingSubkeys(HKLM64, 'SOFTWARE\ODBC\ODBCINST.INI\TDengine'); + RegDeleteValue(HKLM64, 'SOFTWARE\ODBC\ODBCINST.INI\ODBC Drivers', 'TDengine'); + Result := True; +end; + + +procedure DeinitializeUninstall(); +begin + DeleteOdbcDsnRegistry(); + DeleteOdbcDriverRegistry(); +end; + [UninstallDelete] Name: {app}\driver; Type: filesandordirs Name: {app}\connector; Type: filesandordirs diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index b4ee619332..a6d5039be7 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -273,6 +273,7 @@ typedef struct SRequestObj { bool killed; bool inRetry; bool isSubReq; + bool inCallback; uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog uint32_t retry; int64_t allocatorRefId; diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 9ae28dd55e..b732abffb1 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -171,7 +171,6 @@ typedef struct { int8_t precision; bool reRun; bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol) - bool isRawLine; int32_t ttl; int32_t uid; // used for automatic create child table diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 77cda347a4..b6c5701915 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -336,6 +336,7 @@ void *createRequest(uint64_t connId, int32_t type, int64_t reqid) { pRequest->pDb = getDbOfConnection(pTscObj); pRequest->pTscObj = pTscObj; + pRequest->inCallback = false; pRequest->msgBuf = taosMemoryCalloc(1, ERROR_MSG_BUF_DEFAULT_SIZE); pRequest->msgBufLen = ERROR_MSG_BUF_DEFAULT_SIZE; diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 1de5941ac3..b3d9a9ced5 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -902,6 +902,7 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req if (TSDB_CODE_SUCCESS != code) { return code; } + taosHashPut(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(uint64_t), NULL, 0); } // invoke after hbGetExpiredUserInfo @@ -1121,7 +1122,6 @@ static void *hbThreadFunc(void *param) { asyncSendMsgToServer(pAppInstInfo->pTransporter, &epSet, &transporterId, pInfo); tFreeClientHbBatchReq(pReq); // hbClearReqInfo(pAppHbMgr); - taosHashPut(clientHbMgr.appHbHash, &pAppHbMgr->pAppInstInfo->clusterId, sizeof(uint64_t), NULL, 0); atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1); } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index f615111b7f..28a29c2138 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1711,6 +1711,7 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU taos_fetch_rows_a(pRequest, syncFetchFn, &sem); tsem_wait(&sem); tsem_destroy(&sem); + pRequest->inCallback = false; } if (pResultInfo->numOfRows == 0 || pRequest->code != TSDB_CODE_SUCCESS) { @@ -2490,6 +2491,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) { if (param->pRequest != NULL) { param->pRequest->syncQuery = true; pRequest = param->pRequest; + param->pRequest->inCallback = false; } taosMemoryFree(param); @@ -2607,7 +2609,14 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param } void doRequestCallback(SRequestObj* pRequest, int32_t code) { + pRequest->inCallback = true; + int64_t this = pRequest->self; pRequest->body.queryFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, code); + SRequestObj* pReq = acquireRequest(this); + if (pReq != NULL) { + pReq->inCallback = false; + releaseRequest(this); + } } int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) { diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 1ce7c02dcf..b99654172c 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -418,6 +418,12 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { return NULL; } + if(pRequest->inCallback) { + tscError("can not call taos_fetch_row before query callback ends."); + terrno = TSDB_CODE_TSC_INVALID_OPERATION; + return NULL; + } + return doAsyncFetchRows(pRequest, true, true); } else if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) { SMqRspObj *msg = ((SMqRspObj *)res); @@ -1309,6 +1315,10 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } SRequestObj *pRequest = res; + if (TSDB_SQL_RETRIEVE_EMPTY_RESULT == pRequest->type) { + fp(param, res, 0); + return; + } taosAsyncFetchImpl(pRequest, fp, param); } diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 67b23792ad..6bcdb4e973 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1640,6 +1640,37 @@ int32_t smlClearForRerun(SSmlHandle *info) { return TSDB_CODE_SUCCESS; } +static bool getLine(SSmlHandle *info, char *lines[], char **rawLine, char *rawLineEnd, + int numLines, int i, char** tmp, int *len){ + if (lines) { + *tmp = lines[i]; + *len = strlen(*tmp); + } else if (*rawLine) { + *tmp = *rawLine; + while (*rawLine < rawLineEnd) { + if (*((*rawLine)++) == '\n') { + break; + } + (*len)++; + } + if (info->protocol == TSDB_SML_LINE_PROTOCOL && (*tmp)[0] == '#') { // this line is comment + return false; + } + } + + if(*rawLine != NULL && (uDebugFlag & DEBUG_DEBUG)){ + char* print = taosMemoryCalloc(*len + 1, 1); + memcpy(print, *tmp, *len); + uDebug("SML:0x%" PRIx64 " smlParseLine is raw, numLines:%d, protocol:%d, len:%d, data:%s", info->id, + numLines, info->protocol, *len, print); + taosMemoryFree(print); + }else{ + uDebug("SML:0x%" PRIx64 " smlParseLine is not numLines:%d, protocol:%d, len:%d, data:%s", info->id, + numLines, info->protocol, *len, *tmp); + } + return true; +} + static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char *rawLineEnd, int numLines) { uDebug("SML:0x%" PRIx64 " smlParseLine start", info->id); int32_t code = TSDB_CODE_SUCCESS; @@ -1661,25 +1692,9 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char while (i < numLines) { char *tmp = NULL; int len = 0; - if (lines) { - tmp = lines[i]; - len = strlen(tmp); - } else if (rawLine) { - tmp = rawLine; - while (rawLine < rawLineEnd) { - if (*(rawLine++) == '\n') { - break; - } - len++; - } - if (info->protocol == TSDB_SML_LINE_PROTOCOL && tmp[0] == '#') { // this line is comment - continue; - } + if(!getLine(info, lines, &rawLine, rawLineEnd, numLines, i, &tmp, &len)){ + continue; } - - uDebug("SML:0x%" PRIx64 " smlParseLine israw:%d, numLines:%d, protocol:%d, len:%d, sql:%s", info->id, - info->isRawLine, numLines, info->protocol, len, info->isRawLine ? "rawdata" : tmp); - if (info->protocol == TSDB_SML_LINE_PROTOCOL) { if (info->dataFormat) { SSmlLineInfo element = {0}; @@ -1699,7 +1714,14 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE; } if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, info->isRawLine ? "rawdata" : tmp); + if(rawLine != NULL){ + char* print = taosMemoryCalloc(len + 1, 1); + memcpy(print, tmp, len); + uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, print); + taosMemoryFree(print); + }else{ + uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp); + } return code; } if (info->reRun) { @@ -1828,7 +1850,6 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, return (TAOS_RES *)request; } info->pRequest = request; - info->isRawLine = rawLine != NULL; info->ttl = ttl; info->precision = precision; info->protocol = (TSDB_SML_PROTOCOL_TYPE)protocol; @@ -1934,8 +1955,7 @@ TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLi reqid); } -TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, - int precision, int32_t ttl, int64_t reqid) { +static void getRawLineLen(char *lines, int len, int32_t *totalRows, int protocol){ int numLines = 0; *totalRows = 0; char *tmp = lines; @@ -1948,6 +1968,11 @@ TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int tmp = lines + i + 1; } } +} + +TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision, int32_t ttl, int64_t reqid) { + getRawLineLen(lines, len, totalRows, protocol); return taos_schemaless_insert_inner(taos, NULL, lines, lines + len, *totalRows, protocol, precision, ttl, reqid); } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index d05cdc0156..28e269ee10 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1028,11 +1028,16 @@ static void tmqMgmtInit(void) { } } +#define SET_ERROR_MSG(MSG) if(errstr!=NULL)snprintf(errstr,errstrLen,MSG); tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { - if(conf == NULL) return NULL; + if(conf == NULL) { + SET_ERROR_MSG("configure is null") + return NULL; + } taosThreadOnce(&tmqInit, tmqMgmtInit); if (tmqInitRes != 0) { terrno = tmqInitRes; + SET_ERROR_MSG("tmq timer init error") return NULL; } @@ -1040,6 +1045,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (pTmq == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; tscError("failed to create consumer, groupId:%s, code:%s", conf->groupId, terrstr()); + SET_ERROR_MSG("malloc tmq failed") return NULL; } @@ -1055,6 +1061,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { conf->groupId[0] == 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; tscError("consumer:0x%" PRIx64 " setup failed since %s, groupId:%s", pTmq->consumerId, terrstr(), pTmq->groupId); + SET_ERROR_MSG("malloc tmq element failed or group is empty") goto _failed; } @@ -1086,6 +1093,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (tsem_init(&pTmq->rspSem, 0, 0) != 0) { tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); + SET_ERROR_MSG("init t_sem failed") goto _failed; } @@ -1094,11 +1102,13 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (pTmq->pTscObj == NULL) { tscError("consumer:0x%" PRIx64 " setup failed since %s, groupId:%s", pTmq->consumerId, terrstr(), pTmq->groupId); tsem_destroy(&pTmq->rspSem); + SET_ERROR_MSG("init tscObj failed") goto _failed; } pTmq->refId = taosAddRef(tmqMgmt.rsetId, pTmq); if (pTmq->refId < 0) { + SET_ERROR_MSG("add tscObj ref failed") goto _failed; } @@ -1958,7 +1968,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { void* rspObj = NULL; int64_t startTime = taosGetTimestampMs(); - tscInfo("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime, + tscDebug("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime, timeout); // in no topic status, delayed task also need to be processed @@ -2005,7 +2015,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { int64_t currentTime = taosGetTimestampMs(); int64_t elapsedTime = currentTime - startTime; if (elapsedTime > timeout) { - tscInfo("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64, + tscDebug("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64, tmq->consumerId, tmq->epoch, startTime, currentTime); return NULL; } diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index e78783cf3c..e6519a436e 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -939,8 +939,8 @@ TEST(clientCase, agg_query_tables) { } taos_free_result(pRes); - int64_t st = 1685959293000; - for (int32_t i = 0; i < 10000000; ++i) { + int64_t st = 1685959293299; + for (int32_t i = 0; i < 5; ++i) { char s[256] = {0}; while (1) { @@ -954,16 +954,16 @@ TEST(clientCase, agg_query_tables) { } } - while (1) { - sprintf(s, "insert into t2 values(%ld, %d)", st + i, i); - pRes = taos_query(pConn, s); - int32_t ret = taos_errno(pRes); - - taos_free_result(pRes); - if (ret == 0) { - break; - } - } +// while (1) { +// sprintf(s, "insert into t2 values(%ld, %d)", st + i, i); +// pRes = taos_query(pConn, s); +// int32_t ret = taos_errno(pRes); +// +// taos_free_result(pRes); +// if (ret == 0) { +// break; +// } +// } } // pRes = taos_query(pConn, "show table distributed tup"); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 2e52c77080..75a54a0cd5 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -166,7 +166,7 @@ static const SSysDbTableSchema streamTaskSchema[] = { {.name = "node_type", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "status", .bytes = 15 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, {.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index dbf9838527..144c145214 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -58,7 +58,7 @@ int32_t tsNumOfMnodeQueryThreads = 4; int32_t tsNumOfMnodeFetchThreads = 1; int32_t tsNumOfMnodeReadThreads = 1; int32_t tsNumOfVnodeQueryThreads = 4; -float tsRatioOfVnodeStreamThreads = 4.0; +float tsRatioOfVnodeStreamThreads = 1.0; int32_t tsNumOfVnodeFetchThreads = 4; int32_t tsNumOfVnodeRsmaThreads = 2; int32_t tsNumOfQnodeQueryThreads = 4; @@ -72,6 +72,7 @@ int32_t tsPQSortMemThreshold = 16; // M int32_t tsElectInterval = 25 * 1000; int32_t tsHeartbeatInterval = 1000; int32_t tsHeartbeatTimeout = 20 * 1000; +int32_t tsSnapReplMaxWaitN = 128; // mnode int64_t tsMndSdbWriteDelta = 200; @@ -95,9 +96,9 @@ int32_t tsMonitorMaxLogs = 100; bool tsMonitorComp = false; // audit -bool tsEnableAudit = true; -bool tsEnableAuditCreateTable = true; -int32_t tsAuditInterval = 5000; +bool tsEnableAudit = true; +bool tsEnableAuditCreateTable = true; +int32_t tsAuditInterval = 5000; // telem #ifdef TD_ENTERPRISE @@ -108,7 +109,7 @@ bool tsEnableTelem = true; int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.tdengine.com"; uint16_t tsTelemPort = 80; -char * tsTelemUri = "/report"; +char *tsTelemUri = "/report"; #ifdef TD_ENTERPRISE bool tsEnableCrashReport = false; @@ -252,7 +253,7 @@ int32_t tsCompactPullupInterval = 10; int32_t tsMqRebalanceInterval = 2; int32_t tsStreamCheckpointInterval = 60; float tsSinkDataRate = 2.0; -int32_t tsStreamNodeCheckInterval = 15; +int32_t tsStreamNodeCheckInterval = 16; int32_t tsTtlUnit = 86400; int32_t tsTtlPushIntervalSec = 10; int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups @@ -283,7 +284,7 @@ int32_t tsS3BlockCacheSize = 16; // number of blocks int32_t tsS3PageCacheSize = 4096; // number of pages int32_t tsS3UploadDelaySec = 60 * 60 * 24; -bool tsExperimental = true; +bool tsExperimental = true; #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { @@ -424,7 +425,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "asyncLog", tsAsyncLog, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "logKeepDays", 0, -365000, 365000, CFG_SCOPE_BOTH, CFG_DYN_ENT_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; - if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1; + if (cfgAddInt32(pCfg, "simDebugFlag", simDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "uDebugFlag", uDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "rpcDebugFlag", rpcDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; @@ -498,7 +499,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0) return -1; - if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER, CFG_DYN_CLIENT) != 0) return -1; + if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0) return -1; if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT, CFG_DYN_NONE) != 0) return -1; @@ -622,7 +623,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { 0) return -1; - if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER, + if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 10, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1; @@ -674,6 +675,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1; + if (cfgAddInt32(pCfg, "syncSnapReplMaxWaitN", tsSnapReplMaxWaitN, 16, + (TSDB_SYNC_SNAP_BUFFER_SIZE >> 2), CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) + return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; @@ -692,8 +696,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1; - if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) - return -1; + if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1; if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER) != 0) return -1; @@ -712,8 +715,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != - 0) + if (cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER, + CFG_DYN_ENT_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) @@ -792,7 +795,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "s3PageCacheSize", tsS3PageCacheSize, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 60 * 10, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, + if (cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 60 * 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; @@ -965,6 +968,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) { rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32; qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32; cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32; + simDebugFlag = cfgGetItem(pCfg, "simDebugFlag")->i32; } static void taosSetServerLogCfg(SConfig *pCfg) { @@ -1185,6 +1189,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsElectInterval = cfgGetItem(pCfg, "syncElectInterval")->i32; tsHeartbeatInterval = cfgGetItem(pCfg, "syncHeartbeatInterval")->i32; tsHeartbeatTimeout = cfgGetItem(pCfg, "syncHeartbeatTimeout")->i32; + tsSnapReplMaxWaitN = cfgGetItem(pCfg, "syncSnapReplMaxWaitN")->i32; tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64; tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64; @@ -1283,7 +1288,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi taosSetServerLogCfg(pCfg); } - taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false); + taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32); if (taosMulModeMkDir(tsLogDir, 0777, true) != 0) { terrno = TAOS_SYSTEM_ERROR(errno); @@ -1361,6 +1366,8 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile taosSetSystemCfg(tsCfg); if (taosSetFileHandlesLimit() != 0) return -1; + taosSetAllDebugFlag(cfgGetItem(tsCfg, "debugFlag")->i32); + cfgDumpCfg(tsCfg, tsc, false); if (taosCheckGlobalCfg() != 0) { @@ -1379,7 +1386,7 @@ void taosCleanupCfg() { typedef struct { const char *optionName; - void * optionVar; + void *optionVar; } OptionNameAndVar; static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, SConfigItem *pItem, bool isDebugflag) { @@ -1392,7 +1399,7 @@ static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, switch (pItem->dtype) { case CFG_DTYPE_BOOL: { int32_t flag = pItem->i32; - bool * pVar = pOptions[d].optionVar; + bool *pVar = pOptions[d].optionVar; uInfo("%s set from %d to %d", optName, *pVar, flag); *pVar = flag; terrno = TSDB_CODE_SUCCESS; @@ -1404,7 +1411,7 @@ static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, *pVar = flag; if (isDebugflag) { - taosSetDebugFlag(pOptions[d].optionVar, optName, flag, true); + taosSetDebugFlag(pOptions[d].optionVar, optName, flag); } terrno = TSDB_CODE_SUCCESS; } break; @@ -1437,6 +1444,13 @@ static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) { terrno = TSDB_CODE_SUCCESS; + if (strcasecmp(name, "resetlog") == 0) { + // trigger, no item in cfg + taosResetLog(); + cfgDumpCfg(tsCfg, 0, false); + return 0; + } + SConfigItem *pItem = cfgGetItem(pCfg, name); if (!pItem || (pItem->dynScope & CFG_DYN_SERVER) == 0) { uError("failed to config:%s, not support", name); @@ -1445,14 +1459,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) { } if (strncasecmp(name, "debugFlag", 9) == 0) { - int32_t flag = pItem->i32; - taosSetAllDebugFlag(flag, true); - return 0; - } - - if (strcasecmp(name, "resetlog") == 0) { - taosResetLog(); - cfgDumpCfg(tsCfg, 0, false); + taosSetAllDebugFlag(pItem->i32); return 0; } @@ -1464,43 +1471,41 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) { {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, - {"jniDebugFlag", &jniDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, + {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, }; - static OptionNameAndVar options[] = { - {"audit", &tsEnableAudit}, - {"asynclog", &tsAsyncLog}, - {"disableStream", &tsDisableStream}, - {"enableWhiteList", &tsEnableWhiteList}, - {"telemetryReporting", &tsEnableTelem}, - {"monitor", &tsEnableMonitor}, + static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, + {"asynclog", &tsAsyncLog}, + {"disableStream", &tsDisableStream}, + {"enableWhiteList", &tsEnableWhiteList}, + {"telemetryReporting", &tsEnableTelem}, + {"monitor", &tsEnableMonitor}, - {"mndSdbWriteDelta", &tsMndSdbWriteDelta}, - {"minDiskFreeSize", &tsMinDiskFreeSize}, + {"mndSdbWriteDelta", &tsMndSdbWriteDelta}, + {"minDiskFreeSize", &tsMinDiskFreeSize}, - {"cacheLazyLoadThreshold", &tsCacheLazyLoadThreshold}, - {"checkpointInterval", &tsStreamCheckpointInterval}, - {"keepAliveIdle", &tsKeepAliveIdle}, - {"logKeepDays", &tsLogKeepDays}, - {"maxStreamBackendCache", &tsMaxStreamBackendCache}, - {"mqRebalanceInterval", &tsMqRebalanceInterval}, - {"numOfLogLines", &tsNumOfLogLines}, - {"queryRspPolicy", &tsQueryRspPolicy}, - {"timeseriesThreshold", &tsTimeSeriesThreshold}, - {"tmqMaxTopicNum", &tmqMaxTopicNum}, - {"transPullupInterval", &tsTransPullupInterval}, - {"compactPullupInterval", &tsCompactPullupInterval}, - {"trimVDbIntervalSec", &tsTrimVDbIntervalSec}, - {"ttlBatchDropNum", &tsTtlBatchDropNum}, - {"ttlFlushThreshold", &tsTtlFlushThreshold}, - {"ttlPushInterval", &tsTtlPushIntervalSec}, - //{"s3BlockSize", &tsS3BlockSize}, - {"s3BlockCacheSize", &tsS3BlockCacheSize}, - {"s3PageCacheSize", &tsS3PageCacheSize}, - {"s3UploadDelaySec", &tsS3UploadDelaySec}, - {"supportVnodes", &tsNumOfSupportVnodes}, - {"experimental", &tsExperimental} - }; + {"cacheLazyLoadThreshold", &tsCacheLazyLoadThreshold}, + {"checkpointInterval", &tsStreamCheckpointInterval}, + {"keepAliveIdle", &tsKeepAliveIdle}, + {"logKeepDays", &tsLogKeepDays}, + {"maxStreamBackendCache", &tsMaxStreamBackendCache}, + {"mqRebalanceInterval", &tsMqRebalanceInterval}, + {"numOfLogLines", &tsNumOfLogLines}, + {"queryRspPolicy", &tsQueryRspPolicy}, + {"timeseriesThreshold", &tsTimeSeriesThreshold}, + {"tmqMaxTopicNum", &tmqMaxTopicNum}, + {"transPullupInterval", &tsTransPullupInterval}, + {"compactPullupInterval", &tsCompactPullupInterval}, + {"trimVDbIntervalSec", &tsTrimVDbIntervalSec}, + {"ttlBatchDropNum", &tsTtlBatchDropNum}, + {"ttlFlushThreshold", &tsTtlFlushThreshold}, + {"ttlPushInterval", &tsTtlPushIntervalSec}, + //{"s3BlockSize", &tsS3BlockSize}, + {"s3BlockCacheSize", &tsS3BlockCacheSize}, + {"s3PageCacheSize", &tsS3PageCacheSize}, + {"s3UploadDelaySec", &tsS3UploadDelaySec}, + {"supportVnodes", &tsNumOfSupportVnodes}, + {"experimental", &tsExperimental}}; if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) { taosCfgSetOption(options, tListLen(options), pItem, false); @@ -1528,8 +1533,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) { switch (lowcaseName[0]) { case 'd': { if (strcasecmp("debugFlag", name) == 0) { - int32_t flag = pItem->i32; - taosSetAllDebugFlag(flag, true); + taosSetAllDebugFlag(pItem->i32); matched = true; } break; @@ -1695,36 +1699,34 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) { {"cDebugFlag", &cDebugFlag}, {"dDebugFlag", &dDebugFlag}, {"fsDebugFlag", &fsDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"jniDebugFlag", &jniDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, - {"uDebugFlag", &uDebugFlag}, + {"uDebugFlag", &uDebugFlag}, {"simDebugFlag", &simDebugFlag}, }; - static OptionNameAndVar options[] = { - {"asyncLog", &tsAsyncLog}, - {"assert", &tsAssert}, - {"compressMsgSize", &tsCompressMsgSize}, - {"countAlwaysReturnValue", &tsCountAlwaysReturnValue}, - {"crashReporting", &tsEnableCrashReport}, - {"enableCoreFile", &tsAsyncLog}, - {"enableQueryHb", &tsEnableQueryHb}, - {"keepColumnName", &tsKeepColumnName}, - {"keepAliveIdle", &tsKeepAliveIdle}, - {"logKeepDays", &tsLogKeepDays}, - {"maxInsertBatchRows", &tsMaxInsertBatchRows}, - {"maxRetryWaitTime", &tsMaxRetryWaitTime}, - {"minSlidingTime", &tsMinSlidingTime}, - {"minIntervalTime", &tsMinIntervalTime}, - {"numOfLogLines", &tsNumOfLogLines}, - {"querySmaOptimize", &tsQuerySmaOptimize}, - {"queryPolicy", &tsQueryPolicy}, - {"queryPlannerTrace", &tsQueryPlannerTrace}, - {"queryNodeChunkSize", &tsQueryNodeChunkSize}, - {"queryUseNodeAllocator", &tsQueryUseNodeAllocator}, - {"smlDot2Underline", &tsSmlDot2Underline}, - {"shellActivityTimer", &tsShellActivityTimer}, - {"slowLogThreshold", &tsSlowLogThreshold}, - {"useAdapter", &tsUseAdapter}, - {"experimental", &tsExperimental} - }; + static OptionNameAndVar options[] = {{"asyncLog", &tsAsyncLog}, + {"assert", &tsAssert}, + {"compressMsgSize", &tsCompressMsgSize}, + {"countAlwaysReturnValue", &tsCountAlwaysReturnValue}, + {"crashReporting", &tsEnableCrashReport}, + {"enableCoreFile", &tsAsyncLog}, + {"enableQueryHb", &tsEnableQueryHb}, + {"keepColumnName", &tsKeepColumnName}, + {"keepAliveIdle", &tsKeepAliveIdle}, + {"logKeepDays", &tsLogKeepDays}, + {"maxInsertBatchRows", &tsMaxInsertBatchRows}, + {"maxRetryWaitTime", &tsMaxRetryWaitTime}, + {"minSlidingTime", &tsMinSlidingTime}, + {"minIntervalTime", &tsMinIntervalTime}, + {"numOfLogLines", &tsNumOfLogLines}, + {"querySmaOptimize", &tsQuerySmaOptimize}, + {"queryPolicy", &tsQueryPolicy}, + {"queryPlannerTrace", &tsQueryPlannerTrace}, + {"queryNodeChunkSize", &tsQueryNodeChunkSize}, + {"queryUseNodeAllocator", &tsQueryUseNodeAllocator}, + {"smlDot2Underline", &tsSmlDot2Underline}, + {"shellActivityTimer", &tsShellActivityTimer}, + {"slowLogThreshold", &tsSlowLogThreshold}, + {"useAdapter", &tsUseAdapter}, + {"experimental", &tsExperimental}}; if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) { taosCfgSetOption(options, tListLen(options), pItem, false); @@ -1740,9 +1742,9 @@ int32_t taosCfgDynamicOptions(SConfig *pCfg, char *name, bool forServer) { return taosCfgDynamicOptionsForClient(pCfg, name); } -void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) { +void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) { SConfigItem *pItem = cfgGetItem(tsCfg, flagName); - if (pItem != NULL && (rewrite || pItem->i32 == 0)) { + if (pItem != NULL) { pItem->i32 = flagVal; } if (pFlagPtr != NULL) { @@ -1750,33 +1752,58 @@ void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, } } -void taosSetAllDebugFlag(int32_t flag, bool rewrite) { - if (flag <= 0) return; - - taosSetDebugFlag(NULL, "debugFlag", flag, rewrite); - taosSetDebugFlag(NULL, "simDebugFlag", flag, rewrite); - taosSetDebugFlag(NULL, "tmrDebugFlag", flag, rewrite); - taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite); - taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite); - taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite); - taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite); - taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite); - taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite); - taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite); - taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite); - taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite); - taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite); - taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite); - taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite); - taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite); - taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite); - taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite); - taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite); - taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite); - taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite); - taosSetDebugFlag(&stDebugFlag, "stDebugFlag", flag, rewrite); - taosSetDebugFlag(&sndDebugFlag, "sndDebugFlag", flag, rewrite); - uInfo("all debug flag are set to %d", flag); +static int taosLogVarComp(void const *lp, void const *rp) { + SLogVar *lpVar = (SLogVar *)lp; + SLogVar *rpVar = (SLogVar *)rp; + return strcasecmp(lpVar->name, rpVar->name); } -int8_t taosGranted() { return atomic_load_8(&tsGrant); } \ No newline at end of file +static void taosCheckAndSetDebugFlag(int32_t *pFlagPtr, char *name, int32_t flag, SArray *noNeedToSetVars) { + if (noNeedToSetVars != NULL && taosArraySearch(noNeedToSetVars, name, taosLogVarComp, TD_EQ) != NULL) { + return; + } + taosSetDebugFlag(pFlagPtr, name, flag); +} + +void taosSetAllDebugFlag(int32_t flag) { + if (flag <= 0) return; + + SArray *noNeedToSetVars = NULL; + SConfigItem *pItem = cfgGetItem(tsCfg, "debugFlag"); + if (pItem != NULL) { + pItem->i32 = flag; + noNeedToSetVars = pItem->array; + } + + taosCheckAndSetDebugFlag(&simDebugFlag, "simDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tmrDebugFlag, "tmrDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, noNeedToSetVars); + + taosCheckAndSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, noNeedToSetVars); + + taosCheckAndSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&stDebugFlag, "stDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&sndDebugFlag, "sndDebugFlag", flag, noNeedToSetVars); + + taosArrayClear(noNeedToSetVars); // reset array + + uInfo("all debug flag are set to %d", flag); + if (terrno == TSDB_CODE_CFG_NOT_FOUND) terrno = TSDB_CODE_SUCCESS; // ignore not exist +} + +int8_t taosGranted() { return atomic_load_8(&tsGrant); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 918632b7e7..c9e2908e8a 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -143,6 +143,7 @@ STSRow *tGetSubmitBlkNext(SSubmitBlkIter *pIter) { } } +#ifdef BUILD_NO_CALL int32_t tPrintFixedSchemaSubmitReq(SSubmitReq *pReq, STSchema *pTschema) { SSubmitMsgIter msgIter = {0}; if (tInitSubmitMsgIter(pReq, &msgIter) < 0) return -1; @@ -161,6 +162,7 @@ int32_t tPrintFixedSchemaSubmitReq(SSubmitReq *pReq, STSchema *pTschema) { } return 0; } +#endif int32_t tEncodeSEpSet(SEncoder *pEncoder, const SEpSet *pEp) { if (tEncodeI8(pEncoder, pEp->inUse) < 0) return -1; @@ -1827,7 +1829,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) char *tb = taosHashIterate(pRsp->readTbs, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1842,7 +1844,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->writeTbs, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1857,7 +1859,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->alterTbs, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1872,7 +1874,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->readViews, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1887,7 +1889,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->writeViews, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1902,7 +1904,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->alterViews, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1917,7 +1919,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) int32_t *useDb = taosHashIterate(pRsp->useDbs, NULL); while (useDb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(useDb, &keyLen); + void * key = taosHashGetKey(useDb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -8332,7 +8334,7 @@ int32_t tDecodeMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) { return 0; } -int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { +int32_t tEncodeMqDataRspCommon(SEncoder *pEncoder, const SMqDataRsp *pRsp) { if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1; if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1; @@ -8342,7 +8344,7 @@ int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { for (int32_t i = 0; i < pRsp->blockNum; i++) { int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i); - void *data = taosArrayGetP(pRsp->blockData, i); + void * data = taosArrayGetP(pRsp->blockData, i); if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1; if (pRsp->withSchema) { SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i); @@ -8354,11 +8356,16 @@ int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { } } } + return 0; +} + +int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { + if (tEncodeMqDataRspCommon(pEncoder, pRsp) < 0) return -1; if (tEncodeI64(pEncoder, pRsp->sleepTime) < 0) return -1; return 0; } -int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { +int32_t tDecodeMqDataRspCommon(SDecoder *pDecoder, SMqDataRsp *pRsp) { if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1; if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1; if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1; @@ -8375,7 +8382,7 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { } for (int32_t i = 0; i < pRsp->blockNum; i++) { - void *data; + void * data; uint64_t bLen; if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1; taosArrayPush(pRsp->blockData, &data); @@ -8400,7 +8407,15 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { } } } - if (tDecodeI64(pDecoder, &pRsp->sleepTime) < 0) return -1; + + return 0; +} + +int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { + if (tDecodeMqDataRspCommon(pDecoder, pRsp) < 0) return -1; + if (!tDecodeIsEnd(pDecoder)) { + if (tDecodeI64(pDecoder, &pRsp->sleepTime) < 0) return -1; + } return 0; } @@ -8416,12 +8431,12 @@ void tDeleteMqDataRsp(SMqDataRsp *pRsp) { } int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) { - if (tEncodeMqDataRsp(pEncoder, (const SMqDataRsp *)pRsp) < 0) return -1; + if (tEncodeMqDataRspCommon(pEncoder, (const SMqDataRsp *)pRsp) < 0) return -1; if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1; if (pRsp->createTableNum) { for (int32_t i = 0; i < pRsp->createTableNum; i++) { - void *createTableReq = taosArrayGetP(pRsp->createTableReq, i); + void * createTableReq = taosArrayGetP(pRsp->createTableReq, i); int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i); if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1; } @@ -8430,14 +8445,14 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) { } int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) { - if (tDecodeMqDataRsp(pDecoder, (SMqDataRsp *)pRsp) < 0) return -1; + if (tDecodeMqDataRspCommon(pDecoder, (SMqDataRsp*)pRsp) < 0) return -1; if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1; if (pRsp->createTableNum) { pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t)); pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *)); for (int32_t i = 0; i < pRsp->createTableNum; i++) { - void *pCreate = NULL; + void * pCreate = NULL; uint64_t len; if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1; int32_t l = (int32_t)len; @@ -8445,6 +8460,7 @@ int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) { taosArrayPush(pRsp->createTableReq, &pCreate); } } + return 0; } @@ -8739,7 +8755,7 @@ void tDestroySubmitTbData(SSubmitTbData *pTbData, int32_t flag) { taosArrayDestroy(pTbData->aCol); } else { int32_t nRow = TARRAY_SIZE(pTbData->aRowP); - SRow **rows = (SRow **)TARRAY_DATA(pTbData->aRowP); + SRow ** rows = (SRow **)TARRAY_DATA(pTbData->aRowP); for (int32_t i = 0; i < nRow; ++i) { tRowDestroy(rows[i]); diff --git a/source/common/src/trow.c b/source/common/src/trow.c index e2da4d166e..b91562be7a 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -17,6 +17,221 @@ #include "trow.h" #include "tlog.h" +static bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal); +static bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal); + +void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema) { + pIter->pSchema = pSchema; + pIter->maxColId = pSchema->columns[pSchema->numOfCols - 1].colId; +} + +void *tdGetBitmapAddr(STSRow *pRow, uint8_t rowType, uint32_t flen, col_id_t nKvCols) { +#ifdef TD_SUPPORT_BITMAP + switch (rowType) { + case TD_ROW_TP: + return tdGetBitmapAddrTp(pRow, flen); + case TD_ROW_KV: + return tdGetBitmapAddrKv(pRow, nKvCols); + default: + break; + } +#endif + return NULL; +} + +void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow) { + pIter->pRow = pRow; + pIter->pBitmap = tdGetBitmapAddr(pRow, pRow->type, pIter->pSchema->flen, tdRowGetNCols(pRow)); + pIter->offset = 0; + pIter->colIdx = 0; // PRIMARYKEY_TIMESTAMP_COL_ID; + pIter->kvIdx = 0; +} + +bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) { + if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + pVal->val = &pIter->pRow->ts; + pVal->valType = TD_VTYPE_NORM; + return true; + } + + if (TD_IS_TP_ROW(pIter->pRow)) { + STColumn *pCol = NULL; + STSchema *pSchema = pIter->pSchema; + while (pIter->colIdx < pSchema->numOfCols) { + pCol = &pSchema->columns[pIter->colIdx]; // 1st column of schema is primary TS key + if (colId == pCol->colId) { + break; + } else if (pCol->colId < colId) { + ++pIter->colIdx; + continue; + } else { + return false; + } + } + tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset, pVal); + ++pIter->colIdx; + } else if (TD_IS_KV_ROW(pIter->pRow)) { + return tdSTSRowIterGetKvVal(pIter, colId, &pIter->kvIdx, pVal); + } else { + pVal->valType = TD_VTYPE_NONE; + terrno = TSDB_CODE_INVALID_PARA; + if (COL_REACH_END(colId, pIter->maxColId)) return false; + } + return true; +} + +bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) { + STSRow *pRow = pIter->pRow; + if (pRow->statis == 0) { + pVal->valType = TD_VTYPE_NORM; + if (IS_VAR_DATA_TYPE(colType)) { + pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset)); + } else { + pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset); + } + return true; + } + + if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) { + pVal->valType = TD_VTYPE_NONE; + return true; + } + + if (pVal->valType == TD_VTYPE_NORM) { + if (IS_VAR_DATA_TYPE(colType)) { + pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset)); + } else { + pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset); + } + } + + return true; +} + +int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) { + if (!pBitmap || colIdx < 0) { + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + int16_t nBytes = colIdx / TD_VTYPE_PARTS; + int16_t nOffset = colIdx & TD_VTYPE_OPTR; + char *pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes); + // use literal value directly and not use formula to simplify the codes + switch (nOffset) { + case 0: + *pValType = (((*pDestByte) & 0xC0) >> 6); + break; + case 1: + *pValType = (((*pDestByte) & 0x30) >> 4); + break; + case 2: + *pValType = (((*pDestByte) & 0x0C) >> 2); + break; + case 3: + *pValType = ((*pDestByte) & 0x03); + break; + default: + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + return TSDB_CODE_SUCCESS; +} + +int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) { + if (!pBitmap || colIdx < 0) { + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + int16_t nBytes = colIdx / TD_VTYPE_PARTS_I; + int16_t nOffset = colIdx & TD_VTYPE_OPTR_I; + char *pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes); + // use literal value directly and not use formula to simplify the codes + switch (nOffset) { + case 0: + *pValType = (((*pDestByte) & 0x80) >> 7); + break; + case 1: + *pValType = (((*pDestByte) & 0x40) >> 6); + break; + case 2: + *pValType = (((*pDestByte) & 0x20) >> 5); + break; + case 3: + *pValType = (((*pDestByte) & 0x10) >> 4); + break; + case 4: + *pValType = (((*pDestByte) & 0x08) >> 3); + break; + case 5: + *pValType = (((*pDestByte) & 0x04) >> 2); + break; + case 6: + *pValType = (((*pDestByte) & 0x02) >> 1); + break; + case 7: + *pValType = ((*pDestByte) & 0x01); + break; + default: + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + return TSDB_CODE_SUCCESS; +} + +int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType, int8_t bitmapMode) { + switch (bitmapMode) { + case 0: + tdGetBitmapValTypeII(pBitmap, colIdx, pValType); + break; + case -1: + case 1: + tdGetBitmapValTypeI(pBitmap, colIdx, pValType); + break; + default: + terrno = TSDB_CODE_INVALID_PARA; + return TSDB_CODE_FAILED; + } + return TSDB_CODE_SUCCESS; +} + +bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal) { + STSRow *pRow = pIter->pRow; + SKvRowIdx *pKvIdx = NULL; + bool colFound = false; + col_id_t kvNCols = tdRowGetNCols(pRow) - 1; + void *pColIdx = TD_ROW_COL_IDX(pRow); + while (*nIdx < kvNCols) { + pKvIdx = (SKvRowIdx *)POINTER_SHIFT(pColIdx, *nIdx * sizeof(SKvRowIdx)); + if (pKvIdx->colId == colId) { + ++(*nIdx); + pVal->val = POINTER_SHIFT(pRow, pKvIdx->offset); + colFound = true; + break; + } else if (pKvIdx->colId > colId) { + pVal->valType = TD_VTYPE_NONE; + return true; + } else { + ++(*nIdx); + } + } + + if (!colFound) { + if (colId <= pIter->maxColId) { + pVal->valType = TD_VTYPE_NONE; + return true; + } else { + return false; + } + } + + if (tdGetBitmapValType(pIter->pBitmap, pIter->kvIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) { + pVal->valType = TD_VTYPE_NONE; + } + + return true; +} + +#ifdef BUILD_NO_CALL const uint8_t tdVTypeByte[2][3] = {{ // 2 bits TD_VTYPE_NORM_BYTE_II, @@ -34,8 +249,6 @@ const uint8_t tdVTypeByte[2][3] = {{ // declaration static uint8_t tdGetBitmapByte(uint8_t byte); -static bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal); -static bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal); static bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx, SCellVal *pVal); static bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal); @@ -199,38 +412,6 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl return true; } -bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) { - if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { - pVal->val = &pIter->pRow->ts; - pVal->valType = TD_VTYPE_NORM; - return true; - } - - if (TD_IS_TP_ROW(pIter->pRow)) { - STColumn *pCol = NULL; - STSchema *pSchema = pIter->pSchema; - while (pIter->colIdx < pSchema->numOfCols) { - pCol = &pSchema->columns[pIter->colIdx]; // 1st column of schema is primary TS key - if (colId == pCol->colId) { - break; - } else if (pCol->colId < colId) { - ++pIter->colIdx; - continue; - } else { - return false; - } - } - tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset, pVal); - ++pIter->colIdx; - } else if (TD_IS_KV_ROW(pIter->pRow)) { - return tdSTSRowIterGetKvVal(pIter, colId, &pIter->kvIdx, pVal); - } else { - pVal->valType = TD_VTYPE_NONE; - terrno = TSDB_CODE_INVALID_PARA; - if (COL_REACH_END(colId, pIter->maxColId)) return false; - } - return true; -} bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal) { if (pIter->colIdx >= pIter->pSchema->numOfCols) { @@ -258,71 +439,6 @@ bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal) { return true; } -bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) { - STSRow *pRow = pIter->pRow; - if (pRow->statis == 0) { - pVal->valType = TD_VTYPE_NORM; - if (IS_VAR_DATA_TYPE(colType)) { - pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset)); - } else { - pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset); - } - return true; - } - - if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) { - pVal->valType = TD_VTYPE_NONE; - return true; - } - - if (pVal->valType == TD_VTYPE_NORM) { - if (IS_VAR_DATA_TYPE(colType)) { - pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset)); - } else { - pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset); - } - } - - return true; -} - -bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal) { - STSRow *pRow = pIter->pRow; - SKvRowIdx *pKvIdx = NULL; - bool colFound = false; - col_id_t kvNCols = tdRowGetNCols(pRow) - 1; - void *pColIdx = TD_ROW_COL_IDX(pRow); - while (*nIdx < kvNCols) { - pKvIdx = (SKvRowIdx *)POINTER_SHIFT(pColIdx, *nIdx * sizeof(SKvRowIdx)); - if (pKvIdx->colId == colId) { - ++(*nIdx); - pVal->val = POINTER_SHIFT(pRow, pKvIdx->offset); - colFound = true; - break; - } else if (pKvIdx->colId > colId) { - pVal->valType = TD_VTYPE_NONE; - return true; - } else { - ++(*nIdx); - } - } - - if (!colFound) { - if (colId <= pIter->maxColId) { - pVal->valType = TD_VTYPE_NONE; - return true; - } else { - return false; - } - } - - if (tdGetBitmapValType(pIter->pBitmap, pIter->kvIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) { - pVal->valType = TD_VTYPE_NONE; - } - - return true; -} - int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) { STColumn *pTColumn; SColVal *pColVal; @@ -490,76 +606,6 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell return true; } -int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) { - if (!pBitmap || colIdx < 0) { - terrno = TSDB_CODE_INVALID_PARA; - return terrno; - } - int16_t nBytes = colIdx / TD_VTYPE_PARTS; - int16_t nOffset = colIdx & TD_VTYPE_OPTR; - char *pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes); - // use literal value directly and not use formula to simplify the codes - switch (nOffset) { - case 0: - *pValType = (((*pDestByte) & 0xC0) >> 6); - break; - case 1: - *pValType = (((*pDestByte) & 0x30) >> 4); - break; - case 2: - *pValType = (((*pDestByte) & 0x0C) >> 2); - break; - case 3: - *pValType = ((*pDestByte) & 0x03); - break; - default: - terrno = TSDB_CODE_INVALID_PARA; - return terrno; - } - return TSDB_CODE_SUCCESS; -} - -int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) { - if (!pBitmap || colIdx < 0) { - terrno = TSDB_CODE_INVALID_PARA; - return terrno; - } - int16_t nBytes = colIdx / TD_VTYPE_PARTS_I; - int16_t nOffset = colIdx & TD_VTYPE_OPTR_I; - char *pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes); - // use literal value directly and not use formula to simplify the codes - switch (nOffset) { - case 0: - *pValType = (((*pDestByte) & 0x80) >> 7); - break; - case 1: - *pValType = (((*pDestByte) & 0x40) >> 6); - break; - case 2: - *pValType = (((*pDestByte) & 0x20) >> 5); - break; - case 3: - *pValType = (((*pDestByte) & 0x10) >> 4); - break; - case 4: - *pValType = (((*pDestByte) & 0x08) >> 3); - break; - case 5: - *pValType = (((*pDestByte) & 0x04) >> 2); - break; - case 6: - *pValType = (((*pDestByte) & 0x02) >> 1); - break; - case 7: - *pValType = ((*pDestByte) & 0x01); - break; - default: - terrno = TSDB_CODE_INVALID_PARA; - return terrno; - } - return TSDB_CODE_SUCCESS; -} - int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType) { if (!pBitmap || colIdx < 0) { terrno = TSDB_CODE_INVALID_PARA; @@ -944,21 +990,6 @@ int32_t tdSRowSetInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBoundCols, return TSDB_CODE_SUCCESS; } -int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType, int8_t bitmapMode) { - switch (bitmapMode) { - case 0: - tdGetBitmapValTypeII(pBitmap, colIdx, pValType); - break; - case -1: - case 1: - tdGetBitmapValTypeI(pBitmap, colIdx, pValType); - break; - default: - terrno = TSDB_CODE_INVALID_PARA; - return TSDB_CODE_FAILED; - } - return TSDB_CODE_SUCCESS; -} #if 0 bool tdIsBitmapValTypeNorm(const void *pBitmap, int16_t idx, int8_t bitmapMode) { TDRowValT valType = 0; @@ -1020,32 +1051,7 @@ int32_t tdSetBitmapValType(void *pBitmap, int16_t colIdx, TDRowValT valType, int return TSDB_CODE_SUCCESS; } -void *tdGetBitmapAddr(STSRow *pRow, uint8_t rowType, uint32_t flen, col_id_t nKvCols) { -#ifdef TD_SUPPORT_BITMAP - switch (rowType) { - case TD_ROW_TP: - return tdGetBitmapAddrTp(pRow, flen); - case TD_ROW_KV: - return tdGetBitmapAddrKv(pRow, nKvCols); - default: - break; - } -#endif - return NULL; -} -void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow) { - pIter->pRow = pRow; - pIter->pBitmap = tdGetBitmapAddr(pRow, pRow->type, pIter->pSchema->flen, tdRowGetNCols(pRow)); - pIter->offset = 0; - pIter->colIdx = 0; // PRIMARYKEY_TIMESTAMP_COL_ID; - pIter->kvIdx = 0; -} - -void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema) { - pIter->pSchema = pSchema; - pIter->maxColId = pSchema->columns[pSchema->numOfCols - 1].colId; -} void tTSRowGetVal(STSRow *pRow, STSchema *pTSchema, int16_t iCol, SColVal *pColVal) { STColumn *pTColumn = &pTSchema->columns[iCol]; @@ -1079,3 +1085,4 @@ void tTSRowGetVal(STSRow *pRow, STSchema *pTSchema, int16_t iCol, SColVal *pColV } } } +#endif \ No newline at end of file diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index ec00fce4dd..775cff6670 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -683,6 +683,10 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati return getDuration(*duration, *unit, duration, timePrecision); } +static bool taosIsLeapYear(int32_t year) { + return (year % 4 == 0 && (year % 100 != 0 || year % 400 == 0)); +} + int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) { if (duration == 0) { return t; @@ -702,7 +706,13 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) { int32_t mon = tm.tm_year * 12 + tm.tm_mon + (int32_t)numOfMonth; tm.tm_year = mon / 12; tm.tm_mon = mon % 12; - + int daysOfMonth[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + if (taosIsLeapYear(1900 + tm.tm_year)) { + daysOfMonth[1] = 29; + } + if (tm.tm_mday > daysOfMonth[tm.tm_mon]) { + tm.tm_mday = daysOfMonth[tm.tm_mon]; + } return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision) + fraction); } @@ -872,23 +882,33 @@ int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) { ASSERT(pInterval->offset >= 0); if (pInterval->offset > 0) { - start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision); - // try to move current window to the left-hande-side, due to the offset effect. - int64_t end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1; - - int64_t newe = end; + int64_t newe = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1; + int64_t slidingStart = start; while (newe >= ts) { - end = newe; - newe = taosTimeAdd(newe, -pInterval->sliding, pInterval->slidingUnit, precision); + start = slidingStart; + slidingStart = taosTimeAdd(slidingStart, -pInterval->sliding, pInterval->slidingUnit, precision); + int64_t slidingEnd = taosTimeAdd(slidingStart, pInterval->interval, pInterval->intervalUnit, precision) - 1; + newe = taosTimeAdd(slidingEnd, pInterval->offset, pInterval->offsetUnit, precision); } - - start = taosTimeAdd(end, -pInterval->interval, pInterval->intervalUnit, precision) + 1; + start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision); } return start; } +// used together with taosTimeTruncate. when offset is great than zero, slide-start/slide-end is the anchor point +int64_t taosTimeGetIntervalEnd(int64_t intervalStart, const SInterval* pInterval) { + if (pInterval->offset > 0) { + int64_t slideStart = taosTimeAdd(intervalStart, -1 * pInterval->offset, pInterval->offsetUnit, pInterval->precision); + int64_t slideEnd = taosTimeAdd(slideStart, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; + int64_t result = taosTimeAdd(slideEnd, pInterval->offset, pInterval->offsetUnit, pInterval->precision); + return result; + } else { + int64_t result = taosTimeAdd(intervalStart, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; + return result; + } +} // internal function, when program is paused in debugger, // one can call this function from debugger to print a // timestamp as human readable string, for example (gdb): @@ -1297,7 +1317,7 @@ static void parseTsFormat(const char* formatStr, SArray* formats) { } } -static void tm2char(const SArray* formats, const struct STm* tm, char* s, int32_t outLen) { +static int32_t tm2char(const SArray* formats, const struct STm* tm, char* s, int32_t outLen) { int32_t size = taosArrayGetSize(formats); const char* start = s; for (int32_t i = 0; i < size; ++i) { @@ -1332,6 +1352,9 @@ static void tm2char(const SArray* formats, const struct STm* tm, char* s, int32_ s += 4; break; case TSFKW_DDD: +#ifdef WINDOWS + return TSDB_CODE_FUNC_TO_CHAR_NOT_SUPPORTED; +#endif sprintf(s, "%03d", tm->tm.tm_yday + 1); s += strlen(s); break; @@ -1486,6 +1509,7 @@ static void tm2char(const SArray* formats, const struct STm* tm, char* s, int32_ break; } } + return TSDB_CODE_SUCCESS; } /// @brief find s in arr case insensitively @@ -1889,14 +1913,14 @@ static int32_t char2ts(const char* s, SArray* formats, int64_t* ts, int32_t prec return ret; } -void taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen) { +int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen) { if (!*formats) { *formats = taosArrayInit(8, sizeof(TSFormatNode)); parseTsFormat(format, *formats); } struct STm tm; taosTs2Tm(ts, precision, &tm); - tm2char(*formats, &tm, out, outLen); + return tm2char(*formats, &tm, out, outLen); } int32_t taosChar2Ts(const char* format, SArray** formats, const char* tsStr, int64_t* ts, int32_t precision, char* errMsg, diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 3c08714218..756ac8167e 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -68,7 +68,7 @@ static struct { int64_t startTime; } global = {0}; -static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143, true); } +static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143); } static void dmSetAssert(int32_t signum, void *sigInfo, void *context) { tsAssert = 1; } static void dmStopDnode(int signum, void *sigInfo, void *context) { diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index ac94390619..14853009e0 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -45,7 +45,7 @@ static void dmMayShouldUpdateIpWhiteList(SDnodeMgmt *pMgmt, int64_t ver) { SRetrieveIpWhiteReq req = {.ipWhiteVer = oldVer}; int32_t contLen = tSerializeRetrieveIpWhite(NULL, 0, &req); - void *pHead = rpcMallocCont(contLen); + void * pHead = rpcMallocCont(contLen); tSerializeRetrieveIpWhite(pHead, contLen, &req); SRpcMsg rpcMsg = {.pCont = pHead, @@ -144,7 +144,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { req.ipWhiteVer = pMgmt->pData->ipWhiteVer; int32_t contLen = tSerializeSStatusReq(NULL, 0, &req); - void *pHead = rpcMallocCont(contLen); + void * pHead = rpcMallocCont(contLen); tSerializeSStatusReq(pHead, contLen, &req); tFreeSStatusReq(&req); @@ -159,13 +159,18 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { dTrace("send status req to mnode, dnodeVer:%" PRId64 " statusSeq:%d", req.dnodeVer, req.statusSeq); SEpSet epSet = {0}; + int8_t epUpdated = 0; dmGetMnodeEpSet(pMgmt->pData, &epSet); - rpcSendRecvWithTimeout(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp, 5000); + rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000); if (rpcRsp.code != 0) { dmRotateMnodeEpSet(pMgmt->pData); - char tbuf[256]; + char tbuf[512]; dmEpSetToStr(tbuf, sizeof(tbuf), &epSet); dError("failed to send status req since %s, epSet:%s, inUse:%d", tstrerror(rpcRsp.code), tbuf, epSet.inUse); + } else { + if (epUpdated == 1) { + dmSetMnodeEpSet(pMgmt->pData, &epSet); + } } dmProcessStatusRsp(pMgmt, &rpcRsp); } @@ -184,7 +189,7 @@ void dmSendNotifyReq(SDnodeMgmt *pMgmt) { req.pVloads = vinfo.pVloads; int32_t contLen = tSerializeSNotifyReq(NULL, 0, &req); - void *pHead = rpcMallocCont(contLen); + void * pHead = rpcMallocCont(contLen); tSerializeSNotifyReq(pHead, contLen, &req); tFreeSNotifyReq(&req); @@ -279,7 +284,7 @@ int32_t dmProcessServerRunStatus(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { } SSDataBlock *dmBuildVariablesBlock(void) { - SSDataBlock *pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + SSDataBlock * pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); size_t size = 0; const SSysTableMeta *pMeta = NULL; getInfosDbMeta(&pMeta, &size); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c index af43804db4..7ca19d7725 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c @@ -22,10 +22,8 @@ static void *dmStatusThreadFp(void *param) { int64_t lastTime = taosGetTimestampMs(); setThreadName("dnode-status"); - const static int16_t TRIM_FREQ = 30; - int32_t trimCount = 0; - int32_t upTimeCount = 0; - int64_t upTime = 0; + int32_t upTimeCount = 0; + int64_t upTime = 0; while (1) { taosMsleep(200); @@ -38,11 +36,6 @@ static void *dmStatusThreadFp(void *param) { dmSendStatusReq(pMgmt); lastTime = curTime; - trimCount = (trimCount + 1) % TRIM_FREQ; - if (trimCount == 0) { - taosMemoryTrim(0); - } - if ((upTimeCount = ((upTimeCount + 1) & 63)) == 0) { upTime = taosGetOsUptime() - tsDndStartOsUptime; tsDndUpTime = TMAX(tsDndUpTime, upTime); @@ -54,7 +47,8 @@ static void *dmStatusThreadFp(void *param) { } SDmNotifyHandle dmNotifyHdl = {.state = 0}; -static void *dmNotifyThreadFp(void *param) { + +static void *dmNotifyThreadFp(void *param) { SDnodeMgmt *pMgmt = param; setThreadName("dnode-notify"); @@ -83,6 +77,9 @@ static void *dmMonitorThreadFp(void *param) { int64_t lastTime = taosGetTimestampMs(); setThreadName("dnode-monitor"); + static int32_t TRIM_FREQ = 20; + int32_t trimCount = 0; + while (1) { taosMsleep(200); if (pMgmt->pData->dropped || pMgmt->pData->stopped) break; @@ -93,6 +90,11 @@ static void *dmMonitorThreadFp(void *param) { if (interval >= tsMonitorInterval) { (*pMgmt->sendMonitorReportFp)(); lastTime = curTime; + + trimCount = (trimCount + 1) % TRIM_FREQ; + if (trimCount == 0) { + taosMemoryTrim(0); + } } } @@ -126,14 +128,15 @@ static void *dmCrashReportThreadFp(void *param) { setThreadName("dnode-crashReport"); char filepath[PATH_MAX] = {0}; snprintf(filepath, sizeof(filepath), "%s%s.taosdCrashLog", tsLogDir, TD_DIRSEP); - char *pMsg = NULL; - int64_t msgLen = 0; + char *pMsg = NULL; + int64_t msgLen = 0; TdFilePtr pFile = NULL; - bool truncateFile = false; - int32_t sleepTime = 200; - int32_t reportPeriodNum = 3600 * 1000 / sleepTime;; + bool truncateFile = false; + int32_t sleepTime = 200; + int32_t reportPeriodNum = 3600 * 1000 / sleepTime; + ; int32_t loopTimes = reportPeriodNum; - + while (1) { if (pMgmt->pData->dropped || pMgmt->pData->stopped) break; if (loopTimes++ < reportPeriodNum) { @@ -167,13 +170,13 @@ static void *dmCrashReportThreadFp(void *param) { pMsg = NULL; continue; } - + if (pFile) { taosReleaseCrashLogFile(pFile, truncateFile); pFile = NULL; truncateFile = false; } - + taosMsleep(sleepTime); loopTimes = 0; } diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index d6de406987..0fb246e945 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -108,11 +108,11 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_SNODE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_SYNC_CONFIG_CHANGE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_SYNC_CONFIG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CONNECT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_ACCT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_snode/src/smHandle.c b/source/dnode/mgmt/mgmt_snode/src/smHandle.c index 6de29f8513..444739e461 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smHandle.c +++ b/source/dnode/mgmt/mgmt_snode/src/smHandle.c @@ -90,7 +90,7 @@ SArray *smGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER; - + if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER; code = 0; _OVER: diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 9a792a2774..8b80527447 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -407,12 +407,8 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) { if (tWWorkerInit(pFPool) != 0) return -1; SSingleWorkerCfg mgmtCfg = { - .min = 1, - .max = 1, - .name = "vnode-mgmt", - .fp = (FItem)vmProcessMgmtQueue, - .param = pMgmt, - }; + .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt}; + if (tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg) != 0) return -1; dDebug("vnode workers are initialized"); diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h index c646bb4bdd..cb3395dcc2 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h @@ -48,6 +48,8 @@ typedef struct { typedef struct { void *serverRpc; void *clientRpc; + void *statusRpc; + void *syncRpc; SDnodeHandle msgHandles[TDMT_MAX]; } SDnodeTrans; @@ -136,8 +138,10 @@ int32_t dmInitServer(SDnode *pDnode); void dmCleanupServer(SDnode *pDnode); int32_t dmInitClient(SDnode *pDnode); int32_t dmInitStatusClient(SDnode *pDnode); +int32_t dmInitSyncClient(SDnode *pDnode); void dmCleanupClient(SDnode *pDnode); void dmCleanupStatusClient(SDnode *pDnode); +void dmCleanupSyncClient(SDnode *pDnode); SMsgCb dmGetMsgcb(SDnode *pDnode); #ifdef TD_MODULE_OPTIMIZE int32_t dmInitMsgHandle(SDnode *pDnode, SMgmtWrapper *wrappers); diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index f9bba19fbb..d173c83e97 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -28,9 +28,6 @@ } \ } while (0) -extern int32_t streamTimerInit(); -extern void streamTimerCleanUp(); - static SDnode globalDnode = {0}; SDnode *dmInstance() { return &globalDnode; } @@ -169,7 +166,6 @@ int32_t dmInit() { #if defined(USE_S3) if (s3Begin() != 0) return -1; #endif - if (streamTimerInit() != 0) return -1; dInfo("dnode env is initialized"); return 0; @@ -196,10 +192,10 @@ void dmCleanup() { udfStopUdfd(); taosStopCacheRefreshWorker(); dmDiskClose(); + #if defined(USE_S3) s3End(); #endif - streamTimerCleanUp(); dInfo("dnode env is cleaned up"); diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index 84465640c0..1a222a3fd4 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -90,9 +90,13 @@ int32_t dmInitDnode(SDnode *pDnode) { goto _OVER; } #endif + indexInit(tsNumOfCommitThreads); streamMetaInit(); + dmInitStatusClient(pDnode); + dmInitSyncClient(pDnode); + dmReportStartup("dnode-transport", "initialized"); dDebug("dnode is created, ptr:%p", pDnode); code = 0; @@ -108,11 +112,15 @@ _OVER: } void dmCleanupDnode(SDnode *pDnode) { - if (pDnode == NULL) return; + if (pDnode == NULL) { + return; + } dmCleanupClient(pDnode); dmCleanupStatusClient(pDnode); + dmCleanupSyncClient(pDnode); dmCleanupServer(pDnode); + dmClearVars(pDnode); rpcCleanup(); streamMetaCleanup(); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 1ea61f0e93..3b7ecce77c 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -322,6 +322,23 @@ static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) { return 0; } } +static inline int32_t dmSendSyncReq(const SEpSet *pEpSet, SRpcMsg *pMsg) { + SDnode *pDnode = dmInstance(); + if (pDnode->status != DND_STAT_RUNNING && pMsg->msgType < TDMT_SYNC_MSG) { + rpcFreeCont(pMsg->pCont); + pMsg->pCont = NULL; + if (pDnode->status == DND_STAT_INIT) { + terrno = TSDB_CODE_APP_IS_STARTING; + } else { + terrno = TSDB_CODE_APP_IS_STOPPING; + } + dError("failed to send rpc msg:%s since %s, handle:%p", TMSG_INFO(pMsg->msgType), terrstr(), pMsg->info.handle); + return -1; + } else { + rpcSendRequest(pDnode->trans.syncRpc, pEpSet, pMsg, NULL); + return 0; + } +} static inline void dmRegisterBrokenLinkArg(SRpcMsg *pMsg) { rpcRegisterBrokenLinkArg(pMsg); } @@ -346,8 +363,8 @@ int32_t dmInitClient(SDnode *pDnode) { SDnodeTrans *pTrans = &pDnode->trans; SRpcInit rpcInit = {0}; - rpcInit.label = "DND-C"; - rpcInit.numOfThreads = tsNumOfRpcThreads; + rpcInit.label = "DNODE-CLI"; + rpcInit.numOfThreads = tsNumOfRpcThreads / 2; rpcInit.cfp = (RpcCfp)dmProcessRpcMsg; rpcInit.sessions = 1024; rpcInit.connType = TAOS_CONN_CLIENT; @@ -366,7 +383,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3); + int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3) / 2; connLimitNum = TMAX(connLimitNum, 10); connLimitNum = TMIN(connLimitNum, 500); @@ -390,7 +407,7 @@ int32_t dmInitStatusClient(SDnode *pDnode) { SDnodeTrans *pTrans = &pDnode->trans; SRpcInit rpcInit = {0}; - rpcInit.label = "DND-STATUS"; + rpcInit.label = "DNODE-STA-CLI"; rpcInit.numOfThreads = 1; rpcInit.cfp = (RpcCfp)dmProcessRpcMsg; rpcInit.sessions = 1024; @@ -421,16 +438,61 @@ int32_t dmInitStatusClient(SDnode *pDnode) { rpcInit.timeToGetConn = tsTimeToGetAvailableConn; taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); - // pTrans->statusClientRpc = rpcOpen(&rpcInit); - // if (pTrans->statusClientRpc == NULL) { - // dError("failed to init dnode rpc status client"); - // return -1; - // } + pTrans->statusRpc = rpcOpen(&rpcInit); + if (pTrans->statusRpc == NULL) { + dError("failed to init dnode rpc status client"); + return -1; + } dDebug("dnode rpc status client is initialized"); return 0; } +int32_t dmInitSyncClient(SDnode *pDnode) { + SDnodeTrans *pTrans = &pDnode->trans; + + SRpcInit rpcInit = {0}; + rpcInit.label = "DNODE-SYNC-CLI"; + rpcInit.numOfThreads = tsNumOfRpcThreads / 2; + rpcInit.cfp = (RpcCfp)dmProcessRpcMsg; + rpcInit.sessions = 1024; + rpcInit.connType = TAOS_CONN_CLIENT; + rpcInit.user = TSDB_DEFAULT_USER; + rpcInit.idleTime = tsShellActivityTimer * 1000; + rpcInit.parent = pDnode; + rpcInit.rfp = rpcRfp; + rpcInit.compressSize = tsCompressMsgSize; + + rpcInit.retryMinInterval = tsRedirectPeriod; + rpcInit.retryStepFactor = tsRedirectFactor; + rpcInit.retryMaxInterval = tsRedirectMaxPeriod; + rpcInit.retryMaxTimeout = tsMaxRetryWaitTime; + + rpcInit.failFastInterval = 5000; // interval threshold(ms) + rpcInit.failFastThreshold = 3; // failed threshold + rpcInit.ffp = dmFailFastFp; + + int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3) / 2; + connLimitNum = TMAX(connLimitNum, 10); + connLimitNum = TMIN(connLimitNum, 500); + + rpcInit.connLimitNum = connLimitNum; + rpcInit.connLimitLock = 1; + rpcInit.supportBatch = 1; + rpcInit.batchSize = 8 * 1024; + rpcInit.timeToGetConn = tsTimeToGetAvailableConn; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + + pTrans->syncRpc = rpcOpen(&rpcInit); + if (pTrans->syncRpc == NULL) { + dError("failed to init dnode rpc sync client"); + return -1; + } + + dDebug("dnode rpc sync client is initialized"); + return 0; +} + void dmCleanupClient(SDnode *pDnode) { SDnodeTrans *pTrans = &pDnode->trans; if (pTrans->clientRpc) { @@ -441,11 +503,19 @@ void dmCleanupClient(SDnode *pDnode) { } void dmCleanupStatusClient(SDnode *pDnode) { SDnodeTrans *pTrans = &pDnode->trans; - // if (pTrans->statusClientRpc) { - // rpcClose(pTrans->statusClientRpc); - // pTrans->statusClientRpc = NULL; - // dDebug("dnode rpc status client is closed"); - // } + if (pTrans->statusRpc) { + rpcClose(pTrans->statusRpc); + pTrans->statusRpc = NULL; + dDebug("dnode rpc status client is closed"); + } +} +void dmCleanupSyncClient(SDnode *pDnode) { + SDnodeTrans *pTrans = &pDnode->trans; + if (pTrans->syncRpc) { + rpcClose(pTrans->syncRpc); + pTrans->syncRpc = NULL; + dDebug("dnode rpc sync client is closed"); + } } int32_t dmInitServer(SDnode *pDnode) { @@ -486,7 +556,10 @@ SMsgCb dmGetMsgcb(SDnode *pDnode) { SMsgCb msgCb = { .clientRpc = pDnode->trans.clientRpc, .serverRpc = pDnode->trans.serverRpc, + .statusRpc = pDnode->trans.statusRpc, + .syncRpc = pDnode->trans.syncRpc, .sendReqFp = dmSendReq, + .sendSyncReqFp = dmSendSyncReq, .sendRspFp = dmSendRsp, .registerBrokenLinkArgFp = dmRegisterBrokenLinkArg, .releaseHandleFp = dmReleaseHandle, diff --git a/source/dnode/mgmt/test/snode/CMakeLists.txt b/source/dnode/mgmt/test/snode/CMakeLists.txt index 70f3054381..5de3e55b0a 100644 --- a/source/dnode/mgmt/test/snode/CMakeLists.txt +++ b/source/dnode/mgmt/test/snode/CMakeLists.txt @@ -5,7 +5,7 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME dsnodeTest - COMMAND dsnodeTest -) +#add_test( +# NAME dsnodeTest +# COMMAND dsnodeTest +#) diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 94c937e8f4..8c89ddc825 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -46,10 +46,7 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw); int32_t mndSetConsumerCommitLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *pConsumer); int32_t mndSetConsumerDropLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *pConsumer); -bool mndRebTryStart(); -void mndRebEnd(); -void mndRebCntInc(); -void mndRebCntDec(); +const char *mndConsumerStatusName(int status); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index b0f6273acc..58a4c92d3e 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -44,12 +44,11 @@ typedef struct SStreamTransMgmt { } SStreamTransMgmt; typedef struct SStreamExecInfo { - SArray * pNodeList; + SArray *pNodeList; int64_t ts; // snapshot ts SStreamTransMgmt transMgmt; - int64_t activeCheckpoint; // active check point id - SHashObj * pTaskMap; - SArray * pTaskList; + SHashObj *pTaskMap; + SArray *pTaskList; TdThreadMutex lock; } SStreamExecInfo; @@ -72,7 +71,8 @@ int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pTransName, int64_t streamUid); int32_t mndAddtoCheckpointWaitingList(SStreamObj *pStream, int64_t checkpointId); -bool streamTransConflictOtherTrans(SMnode *pMnode, int64_t streamUid, const char *pTransName, bool lock); +bool mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamUid, const char *pTransName, bool lock); +int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamUid); // for sma // TODO refactor diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index 10864da5fb..23b3a7d1fe 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -32,14 +32,13 @@ void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub); int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName); -//static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, const char *topicName, int32_t vgId) { -// return snprintf(key, TSDB_PARTITION_KEY_LEN, "%d:%s:%s", vgId, cgroup, topicName); -//} - -//int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topic); int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub); +bool mndRebTryStart(); +void mndRebCntInc(); +void mndRebCntDec(); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 04544da80e..1bd39a2299 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -97,7 +97,7 @@ SSdbRaw *mndTransEncode(STrans *pTrans); SSdbRow *mndTransDecode(SSdbRaw *pRaw); void mndTransDropData(STrans *pTrans); -bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans); +bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans, bool topHalf); #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c index 9b35343ed2..101022a44f 100644 --- a/source/dnode/mnode/impl/src/mndCompact.c +++ b/source/dnode/mnode/impl/src/mndCompact.c @@ -26,6 +26,7 @@ #include "mndTrans.h" #define MND_COMPACT_VER_NUMBER 1 +#define MND_COMPACT_ID_LEN 11 static int32_t mndProcessCompactTimer(SRpcMsg *pReq); @@ -288,7 +289,7 @@ int32_t mndRetrieveCompact(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, tNameFromString(&name, pCompact->dbname, T_NAME_ACCT | T_NAME_DB); tNameGetDbName(&name, varDataVal(tmpBuf)); } else { - strncpy(varDataVal(tmpBuf), pCompact->dbname, strlen(pCompact->dbname) + 1); + strncpy(varDataVal(tmpBuf), pCompact->dbname, TSDB_SHOW_SQL_LEN); } varDataSetLen(tmpBuf, strlen(varDataVal(tmpBuf))); colDataSetVal(pColInfo, numOfRows, (const char *)tmpBuf, false); @@ -451,7 +452,7 @@ int32_t mndProcessKillCompactReq(SRpcMsg *pReq){ code = TSDB_CODE_ACTION_IN_PROGRESS; - char obj[10] = {0}; + char obj[MND_COMPACT_ID_LEN] = {0}; sprintf(obj, "%d", pCompact->compactId); auditRecord(pReq, pMnode->clusterId, "killCompact", pCompact->dbname, obj, killCompactReq.sql, killCompactReq.sqlLen); diff --git a/source/dnode/mnode/impl/src/mndCompactDetail.c b/source/dnode/mnode/impl/src/mndCompactDetail.c index a1c0e95c20..6b1dd78093 100644 --- a/source/dnode/mnode/impl/src/mndCompactDetail.c +++ b/source/dnode/mnode/impl/src/mndCompactDetail.c @@ -290,11 +290,11 @@ int32_t mndAddCompactDetailToTran(SMnode *pMnode, STrans *pTrans, SCompactObj* p SSdbRaw *pVgRaw = mndCompactDetailActionEncode(&compactDetail); if (pVgRaw == NULL) return -1; - if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) { + if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) { sdbFreeRaw(pVgRaw); return -1; } (void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY); return 0; -} \ No newline at end of file +} diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index c9ee66d3a0..4db000287c 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -29,12 +29,6 @@ #define MND_CONSUMER_RESERVE_SIZE 64 #define MND_MAX_GROUP_PER_TOPIC 100 -#define MND_CONSUMER_LOST_HB_CNT 6 -#define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 - -static int32_t mqRebInExecCnt = 0; - -static const char *mndConsumerStatusName(int status); static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer); static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer); @@ -45,7 +39,6 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter); static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg); static int32_t mndProcessAskEpReq(SRpcMsg *pMsg); static int32_t mndProcessMqHbReq(SRpcMsg *pMsg); -static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg); static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg); static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg); @@ -63,7 +56,7 @@ int32_t mndInitConsumer(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TMQ_SUBSCRIBE, mndProcessSubscribeReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); +// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg); @@ -95,36 +88,6 @@ void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo* return; } -bool mndRebTryStart() { - int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); - mDebug("tq timer, rebalance counter old val:%d", old); - return old == 0; -} - -void mndRebEnd() { mndRebCntDec(); } - -void mndRebCntInc() { - int32_t val = atomic_add_fetch_32(&mqRebInExecCnt, 1); - mInfo("rebalance trans start, rebalance counter:%d", val); -} - -void mndRebCntDec() { - while (1) { - int32_t val = atomic_load_32(&mqRebInExecCnt); - if (val <= 0) { - mError("rebalance trans end, rebalance counter:%d should not be less equalled than 0, ignore counter desc", val); - break; - } - - int32_t newVal = val - 1; - int32_t oldVal = atomic_val_compare_exchange_32(&mqRebInExecCnt, val, newVal); - if (oldVal == val) { - mDebug("rebalance trans end, rebalance counter:%d", newVal); - break; - } - } -} - static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser, bool enableReplay) { SMqTopicObj *pTopic = NULL; int32_t code = 0; @@ -257,162 +220,6 @@ FAIL: return -1; } -static SMqRebInfo *mndGetOrCreateRebSub(SHashObj *pHash, const char *key) { - SMqRebInfo *pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); - if (pRebInfo == NULL) { - pRebInfo = tNewSMqRebSubscribe(key); - if (pRebInfo == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - taosHashPut(pHash, key, strlen(key) + 1, pRebInfo, sizeof(SMqRebInfo)); - taosMemoryFree(pRebInfo); - pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); - } - return pRebInfo; -} - -static void freeRebalanceItem(void *param) { - SMqRebInfo *pInfo = param; - taosArrayDestroy(pInfo->newConsumers); - taosArrayDestroy(pInfo->removedConsumers); -} - -static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SSdb *pSdb = pMnode->pSdb; - SMqConsumerObj *pConsumer; - void *pIter = NULL; - - mDebug("start to process mq timer"); - - // rebalance cannot be parallel - if (!mndRebTryStart()) { - mInfo("mq rebalance already in progress, do nothing"); - return 0; - } - - SMqDoRebalanceMsg *pRebMsg = rpcMallocCont(sizeof(SMqDoRebalanceMsg)); - if (pRebMsg == NULL) { - mError("failed to create the rebalance msg, size:%d, quit mq timer", (int32_t)sizeof(SMqDoRebalanceMsg)); - mndRebEnd(); - return TSDB_CODE_OUT_OF_MEMORY; - } - - pRebMsg->rebSubHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); - if (pRebMsg->rebSubHash == NULL) { - mError("failed to create rebalance hashmap"); - rpcFreeCont(pRebMsg); - mndRebEnd(); - return TSDB_CODE_OUT_OF_MEMORY; - } - - taosHashSetFreeFp(pRebMsg->rebSubHash, freeRebalanceItem); - - // iterate all consumers, find all modification - while (1) { - pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); - if (pIter == NULL) { - break; - } - - int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); - int32_t status = atomic_load_32(&pConsumer->status); - - mInfo("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", - pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, - hbStatus); - - if (status == MQ_CONSUMER_STATUS_READY) { - if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close - mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); - } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { - taosRLockLatch(&pConsumer->lock); - int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < topicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); - } - taosRUnLockLatch(&pConsumer->lock); - }else{ - int32_t newTopicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < newTopicNum; i++) { - char * topic = taosArrayGetP(pConsumer->currentTopics, i); - SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic); - if (pSub == NULL) { - continue; - } - taosRLockLatch(&pSub->lock); - - // 2.2 iterate all vg assigned to the consumer of that topic - SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &pConsumer->consumerId, sizeof(int64_t)); - int32_t vgNum = taosArrayGetSize(pConsumerEp->vgs); - - for (int32_t j = 0; j < vgNum; j++) { - SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j); - SVgObj * pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId); - if (!pVgroup) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - mndMakeSubscribeKey(key, pConsumer->cgroup, topic); - mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - mInfo("vnode splitted, vgId:%d rebalance will be triggered", pVgEp->vgId); - } - mndReleaseVgroup(pMnode, pVgroup); - } - taosRUnLockLatch(&pSub->lock); - mndReleaseSubscribe(pMnode, pSub); - } - } - } else if (status == MQ_CONSUMER_STATUS_LOST) { - if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day - mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); - } - } else { // MQ_CONSUMER_STATUS_REBALANCE - taosRLockLatch(&pConsumer->lock); - - int32_t newTopicNum = taosArrayGetSize(pConsumer->rebNewTopics); - for (int32_t i = 0; i < newTopicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *newTopic = taosArrayGetP(pConsumer->rebNewTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, newTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->newConsumers, &pConsumer->consumerId); - } - - int32_t removedTopicNum = taosArrayGetSize(pConsumer->rebRemovedTopics); - for (int32_t i = 0; i < removedTopicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->rebRemovedTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); - } - taosRUnLockLatch(&pConsumer->lock); - } - - mndReleaseConsumer(pMnode, pConsumer); - } - - if (taosHashGetSize(pRebMsg->rebSubHash) != 0) { - mInfo("mq rebalance will be triggered"); - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_DO_REBALANCE, - .pCont = pRebMsg, - .contLen = sizeof(SMqDoRebalanceMsg), - }; - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); - } else { - taosHashCleanup(pRebMsg->rebSubHash); - rpcFreeCont(pRebMsg); - mDebug("mq timer finished, no need to re-balance"); - mndRebEnd(); - } - return 0; -} - static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t code = 0; SMnode *pMnode = pMsg->info.node; @@ -948,7 +755,7 @@ static void updateConsumerStatus(SMqConsumerObj *pConsumer) { if (taosArrayGetSize(pConsumer->rebNewTopics) == 0 && taosArrayGetSize(pConsumer->rebRemovedTopics) == 0) { if (status == MQ_CONSUMER_STATUS_REBALANCE) { pConsumer->status = MQ_CONSUMER_STATUS_READY; - } else if (status == MQ_CONSUMER_STATUS_READY) { + } else if (status == MQ_CONSUMER_STATUS_READY && taosArrayGetSize(pConsumer->currentTopics) == 0) { pConsumer->status = MQ_CONSUMER_STATUS_LOST; } } @@ -1251,7 +1058,7 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { sdbCancelFetch(pSdb, pIter); } -static const char *mndConsumerStatusName(int status) { +const char *mndConsumerStatusName(int status) { switch (status) { case MQ_CONSUMER_STATUS_READY: return "ready"; diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index e224aceec2..6585e70533 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1094,6 +1094,32 @@ _OVER: return code; } +static bool mndIsEmptyDnode(SMnode *pMnode, int32_t dnodeId) { + bool isEmpty = false; + SMnodeObj *pMObj = NULL; + SQnodeObj *pQObj = NULL; + SSnodeObj *pSObj = NULL; + + pQObj = mndAcquireQnode(pMnode, dnodeId); + if (pQObj) goto _OVER; + + pSObj = mndAcquireSnode(pMnode, dnodeId); + if (pSObj) goto _OVER; + + pMObj = mndAcquireMnode(pMnode, dnodeId); + if (pMObj) goto _OVER; + + int32_t numOfVnodes = mndGetVnodesNum(pMnode, dnodeId); + if (numOfVnodes > 0) goto _OVER; + + isEmpty = true; +_OVER: + mndReleaseMnode(pMnode, pMObj); + mndReleaseQnode(pMnode, pQObj); + mndReleaseSnode(pMnode, pSObj); + return isEmpty; +} + static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; @@ -1155,7 +1181,8 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) { goto _OVER; } - if (!isonline && !force) { + bool isEmpty = mndIsEmptyDnode(pMnode, pDnode->id); + if (!isonline && !force && !isEmpty) { terrno = TSDB_CODE_DNODE_OFFLINE; mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(), numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL); diff --git a/source/dnode/mnode/impl/src/mndDump.c b/source/dnode/mnode/impl/src/mndDump.c index 5efebbc16e..c68b11d184 100644 --- a/source/dnode/mnode/impl/src/mndDump.c +++ b/source/dnode/mnode/impl/src/mndDump.c @@ -32,6 +32,10 @@ int32_t sendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) { terrno = TSDB_CODE_INVALID_PTR; return -1; } +int32_t sendSyncReq(const SEpSet *pEpSet, SRpcMsg *pMsg) { + terrno = TSDB_CODE_INVALID_PTR; + return -1; +} char *i642str(int64_t val) { static char str[24] = {0}; @@ -568,6 +572,7 @@ void mndDumpSdb() { SMsgCb msgCb = {0}; msgCb.reportStartupFp = reportStartup; msgCb.sendReqFp = sendReq; + msgCb.sendSyncReqFp = sendSyncReq; msgCb.sendRspFp = sendRsp; msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack tmsgSetDefault(&msgCb); @@ -590,7 +595,7 @@ void mndDumpSdb() { dumpTopic(pSdb, json); dumpConsumer(pSdb, json); dumpSubscribe(pSdb, json); -// dumpOffset(pSdb, json); + // dumpOffset(pSdb, json); dumpStream(pSdb, json); dumpAcct(pSdb, json); dumpAuth(pSdb, json); @@ -605,7 +610,7 @@ void mndDumpSdb() { char *pCont = tjsonToString(json); int32_t contLen = strlen(pCont); char file[] = "sdb.json"; - TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC| TD_FILE_WRITE_THROUGH); + TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pFile == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); mError("failed to write %s since %s", file, terrstr()); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 38a92b43e7..75d527bc6c 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -16,6 +16,8 @@ #define _DEFAULT_SOURCE #include "mndAcct.h" #include "mndCluster.h" +#include "mndCompact.h" +#include "mndCompactDetail.h" #include "mndConsumer.h" #include "mndDb.h" #include "mndDnode.h" @@ -42,8 +44,6 @@ #include "mndUser.h" #include "mndVgroup.h" #include "mndView.h" -#include "mndCompact.h" -#include "mndCompactDetail.h" static inline int32_t mndAcquireRpc(SMnode *pMnode) { int32_t code = 0; @@ -145,7 +145,7 @@ static void mndCalMqRebalance(SMnode *pMnode) { void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TMQ_TIMER, .pCont = pReq, .contLen = contLen}; - tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); } } @@ -158,16 +158,16 @@ static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) { } } -static void mndStreamCheckpointRemain(SMnode* pMnode) { +static void mndStreamCheckpointRemain(SMnode *pMnode) { int32_t contLen = 0; - void *pReq = mndBuildCheckpointTickMsg(&contLen, 0); + void *pReq = mndBuildCheckpointTickMsg(&contLen, 0); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); } } -static void mndStreamCheckNode(SMnode* pMnode) { +static void mndStreamCheckNode(SMnode *pMnode) { int32_t contLen = 0; void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { @@ -275,6 +275,100 @@ static void mndCheckDnodeOffline(SMnode *pMnode) { mndReleaseRpc(pMnode); } +static bool mnodeIsNotLeader(SMnode *pMnode) { + terrno = 0; + taosThreadRwlockRdlock(&pMnode->lock); + SSyncState state = syncGetState(pMnode->syncMgmt.sync); + if (terrno != 0) { + taosThreadRwlockUnlock(&pMnode->lock); + return true; + } + + if (state.state != TAOS_SYNC_STATE_LEADER) { + taosThreadRwlockUnlock(&pMnode->lock); + terrno = TSDB_CODE_SYN_NOT_LEADER; + return true; + } + if (!state.restored || !pMnode->restored) { + taosThreadRwlockUnlock(&pMnode->lock); + terrno = TSDB_CODE_SYN_RESTORING; + return true; + } + taosThreadRwlockUnlock(&pMnode->lock); + return false; +} + +static int32_t minCronTime() { + int32_t min = INT32_MAX; + min = TMIN(min, tsTtlPushIntervalSec); + min = TMIN(min, tsTrimVDbIntervalSec); + min = TMIN(min, tsTransPullupInterval); + min = TMIN(min, tsCompactPullupInterval); + min = TMIN(min, tsMqRebalanceInterval); + min = TMIN(min, tsStreamCheckpointInterval); + min = TMIN(min, 6); // checkpointRemain + min = TMIN(min, tsStreamNodeCheckInterval); + + int64_t telemInt = TMIN(60, (tsTelemInterval - 1)); + min = TMIN(min, telemInt); + min = TMIN(min, tsGrantHBInterval); + min = TMIN(min, tsUptimeInterval); + + return min <= 1 ? 2 : min; +} +void mndDoTimerPullupTask(SMnode *pMnode, int64_t sec) { + if (sec % tsTtlPushIntervalSec == 0) { + mndPullupTtl(pMnode); + } + + if (sec % tsTrimVDbIntervalSec == 0) { + mndPullupTrimDb(pMnode); + } + + if (sec % tsTransPullupInterval == 0) { + mndPullupTrans(pMnode); + } + + if (sec % tsCompactPullupInterval == 0) { + mndPullupCompacts(pMnode); + } + + if (sec % tsMqRebalanceInterval == 0) { + mndCalMqRebalance(pMnode); + } + + if (sec % tsStreamCheckpointInterval == 0) { + mndStreamCheckpointTick(pMnode, sec); + } + + if (sec % 5 == 0) { + mndStreamCheckpointRemain(pMnode); + } + + if (sec % tsStreamNodeCheckInterval == 0) { + mndStreamCheckNode(pMnode); + } + + if (sec % tsTelemInterval == (TMIN(60, (tsTelemInterval - 1)))) { + mndPullupTelem(pMnode); + } + + if (sec % tsGrantHBInterval == 0) { + mndPullupGrant(pMnode); + } + + if (sec % tsUptimeInterval == 0) { + mndIncreaseUpTime(pMnode); + } +} +void mndDoTimerCheckTask(SMnode *pMnode, int64_t sec) { + if (sec % (tsStatusInterval * 5) == 0) { + mndCheckDnodeOffline(pMnode); + } + if (sec % (MNODE_TIMEOUT_SEC / 2) == 0) { + mndSyncCheckTimeout(pMnode); + } +} static void *mndThreadFp(void *param) { SMnode *pMnode = param; int64_t lastTime = 0; @@ -287,57 +381,16 @@ static void *mndThreadFp(void *param) { if (lastTime % 10 != 0) continue; int64_t sec = lastTime / 10; - if (sec % tsTtlPushIntervalSec == 0) { - mndPullupTtl(pMnode); - } + mndDoTimerCheckTask(pMnode, sec); - if (sec % tsTrimVDbIntervalSec == 0) { - mndPullupTrimDb(pMnode); - } - - if (sec % tsTransPullupInterval == 0) { - mndPullupTrans(pMnode); - } - - if (sec % tsCompactPullupInterval == 0) { - mndPullupCompacts(pMnode); - } - - if (sec % tsMqRebalanceInterval == 0) { - mndCalMqRebalance(pMnode); - } - - if (sec % tsStreamCheckpointInterval == 0) { - mndStreamCheckpointTick(pMnode, sec); - } - - if (sec % 5 == 0) { - mndStreamCheckpointRemain(pMnode); - } - - if (sec % tsStreamNodeCheckInterval == 0) { - mndStreamCheckNode(pMnode); - } - - if (sec % tsTelemInterval == (TMIN(60, (tsTelemInterval - 1)))) { - mndPullupTelem(pMnode); - } - - if (sec % tsGrantHBInterval == 0) { - mndPullupGrant(pMnode); - } - - if (sec % tsUptimeInterval == 0) { - mndIncreaseUpTime(pMnode); - } - - if (sec % (tsStatusInterval * 5) == 0) { - mndCheckDnodeOffline(pMnode); - } - - if (sec % (MNODE_TIMEOUT_SEC / 2) == 0) { - mndSyncCheckTimeout(pMnode); + int64_t minCron = minCronTime(); + if (sec % minCron == 0 && mnodeIsNotLeader(pMnode)) { + // not leader, do nothing + mTrace("timer not process since mnode is not leader, reason: %s", tstrerror(terrno)); + terrno = 0; + continue; } + mndDoTimerPullupTask(pMnode, sec); } return NULL; @@ -712,7 +765,9 @@ _OVER: if (pMsg->msgType == TDMT_MND_TMQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER || pMsg->msgType == TDMT_MND_TRIM_DB_TIMER || pMsg->msgType == TDMT_MND_UPTIME_TIMER || - pMsg->msgType == TDMT_MND_COMPACT_TIMER) { + pMsg->msgType == TDMT_MND_COMPACT_TIMER || pMsg->msgType == TDMT_MND_NODECHECK_TIMER || + pMsg->msgType == TDMT_MND_GRANT_HB_TIMER || pMsg->msgType == TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE || + pMsg->msgType == TDMT_MND_STREAM_CHECKPOINT_TIMER) { mTrace("timer not process since mnode restored:%d stopped:%d, sync restored:%d role:%s ", pMnode->restored, pMnode->stopped, state.restored, syncStr(state.state)); return -1; diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 34ee18a5cc..e0559b4c48 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "mndProfile.h" +#include "audit.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" @@ -26,7 +27,6 @@ #include "mndView.h" #include "tglobal.h" #include "tversion.h" -#include "audit.h" typedef struct { uint32_t id; @@ -530,23 +530,24 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb } #ifdef TD_ENTERPRISE - bool needCheck = true; - int32_t key = HEARTBEAT_KEY_DYN_VIEW; - SDynViewVersion* pDynViewVer = NULL; - SKv* pKv = taosHashGet(pHbReq->info, &key, sizeof(key)); + bool needCheck = true; + int32_t key = HEARTBEAT_KEY_DYN_VIEW; + SDynViewVersion *pDynViewVer = NULL; + SKv *pKv = taosHashGet(pHbReq->info, &key, sizeof(key)); if (NULL != pKv) { pDynViewVer = pKv->value; mTrace("recv view dyn ver, bootTs:%" PRId64 ", ver:%" PRIu64, pDynViewVer->svrBootTs, pDynViewVer->dynViewVer); - SDynViewVersion* pRspVer = NULL; + SDynViewVersion *pRspVer = NULL; if (0 != mndValidateDynViewVersion(pMnode, pDynViewVer, &needCheck, &pRspVer)) { return -1; } - + if (needCheck) { SKv kv1 = {.key = HEARTBEAT_KEY_DYN_VIEW, .valueLen = sizeof(*pDynViewVer), .value = pRspVer}; taosArrayPush(hbRsp.info, &kv1); - mTrace("need to check view ver, lastest bootTs:%" PRId64 ", ver:%" PRIu64, pRspVer->svrBootTs, pRspVer->dynViewVer); + mTrace("need to check view ver, lastest bootTs:%" PRId64 ", ver:%" PRIu64, pRspVer->svrBootTs, + pRspVer->dynViewVer); } } #endif @@ -594,7 +595,7 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb if (!needCheck) { break; } - + void *rspMsg = NULL; int32_t rspLen = 0; mndValidateViewInfo(pMnode, kv->value, kv->valueLen / sizeof(SViewVersion), &rspMsg, &rspLen); @@ -814,8 +815,9 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl * @param offset skip [offset] queries in pConn * @param rowsToPack at most rows to pack * @return rows packed -*/ -static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBlock* pBlock, uint32_t offset, uint32_t rowsToPack) { + */ +static int32_t packQueriesIntoBlock(SShowObj *pShow, SConnObj *pConn, SSDataBlock *pBlock, uint32_t offset, + uint32_t rowsToPack) { int32_t cols = 0; taosRLockLatch(&pConn->queryLock); int32_t numOfQueries = taosArrayGetSize(pConn->pQueries); @@ -826,7 +828,7 @@ static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBloc int32_t i = offset; for (; i < numOfQueries && (i - offset) < rowsToPack; ++i) { - int32_t curRowIndex = pBlock->info.rows; + int32_t curRowIndex = pBlock->info.rows; SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i); cols = 0; @@ -877,14 +879,19 @@ static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBloc colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false); char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0}; + int64_t reserve = 64; int32_t strSize = sizeof(subStatus); int32_t offset = VARSTR_HEADER_SIZE; - for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) { + for (int32_t i = 0; i < pQuery->subPlanNum && offset + reserve < strSize; ++i) { if (i) { - offset += snprintf(subStatus + offset, strSize - offset - 1, ","); + offset += sprintf(subStatus + offset, ","); + } + if (offset + reserve < strSize) { + SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i); + offset += sprintf(subStatus + offset, "%" PRIu64 ":%s", pDesc->tid, pDesc->status); + } else { + break; } - SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i); - offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status); } varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -903,8 +910,8 @@ static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBloc } static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { - SMnode * pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; SConnObj *pConn = NULL; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 1084536cc1..c74007edd3 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -62,7 +62,7 @@ static void mndCancelGetNextStreamTask(SMnode *pMnode, void *pIter); static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq); static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq); static int32_t mndBuildStreamCheckpointSourceReq2(void **pBuf, int32_t *pLen, int32_t nodeId, int64_t checkpointId, - int64_t streamId, int32_t taskId); + int64_t streamId, int32_t taskId, int32_t transId); static int32_t mndProcessNodeCheck(SRpcMsg *pReq); static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg); static SArray *extractNodeListFromStream(SMnode *pMnode); @@ -80,7 +80,9 @@ static int32_t createStreamUpdateTrans(SStreamObj *pStream, SVgroupChangeInfo *p static void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode); static void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode); static int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot); -static int32_t killActiveCheckpointTrans(SMnode *pMnode, const char *pDbName, size_t len); +static int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDbName, size_t len); +static void killTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName); + static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList); static void freeCheckpointCandEntry(void *); @@ -683,6 +685,40 @@ _OVER: return -1; } +static int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool* hasEpset, int32_t taskId, int32_t nodeId) { + *hasEpset = false; + + if (nodeId == SNODE_HANDLE) { + SSnodeObj *pObj = NULL; + void *pIter = NULL; + + pIter = sdbFetch(pMnode->pSdb, SDB_SNODE, pIter, (void **)&pObj); + if (pIter != NULL) { + addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); + sdbRelease(pMnode->pSdb, pObj); + sdbCancelFetch(pMnode->pSdb, pIter); + *hasEpset = true; + return TSDB_CODE_SUCCESS; + } else { + mError("failed to acquire snode epset"); + return TSDB_CODE_INVALID_PARA; + } + } else { + SVgObj *pVgObj = mndAcquireVgroup(pMnode, nodeId); + if (pVgObj != NULL) { + SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj); + mndReleaseVgroup(pMnode, pVgObj); + + epsetAssign(pEpSet, &epset); + *hasEpset = true; + return TSDB_CODE_SUCCESS; + } else { + mDebug("orphaned task:0x%x need to be dropped, nodeId:%d, no redo action", taskId, nodeId); + return TSDB_CODE_SUCCESS; + } + } +} + static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) { SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq)); if (pReq == NULL) { @@ -696,28 +732,17 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask STransAction action = {0}; SEpSet epset = {0}; - if (pTask->info.nodeId == SNODE_HANDLE) { - SSnodeObj *pObj = NULL; - void *pIter = NULL; - while (1) { - pIter = sdbFetch(pMnode->pSdb, SDB_SNODE, pIter, (void **)&pObj); - if (pIter == NULL) { - break; - } + bool hasEpset = false; - addEpIntoEpSet(&epset, pObj->pDnode->fqdn, pObj->pDnode->port); - sdbRelease(pMnode->pSdb, pObj); - } - } else { - SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId); - if (pVgObj != NULL) { - epset = mndGetVgroupEpset(pMnode, pVgObj); - mndReleaseVgroup(pMnode, pVgObj); - } else { - mDebug("orphaned task:0x%x need to be dropped, nodeId:%d, no redo action", pTask->id.taskId, pTask->info.nodeId); - taosMemoryFree(pReq); - return 0; - } + int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return -1; + } + + // no valid epset, return directly without redoAction + if (!hasEpset) { + return TSDB_CODE_SUCCESS; } // The epset of nodeId of this task may have been expired now, let's use the newest epset from mnode. @@ -767,7 +792,7 @@ static int32_t checkForNumOfStreams(SMnode *pMnode, SStreamObj *pStreamObj) { / if (pStream->targetStbUid == pStreamObj->targetStbUid) { mError("Cannot write the same stable as other stream:%s", pStream->name); sdbCancelFetch(pMnode->pSdb, pIter); - terrno = TSDB_CODE_MND_TOO_MANY_STREAMS; + terrno = TSDB_CODE_MND_INVALID_TARGET_TABLE; return terrno; } } @@ -972,13 +997,14 @@ static int32_t mndProcessStreamRemainChkptTmr(SRpcMsg *pReq) { } static int32_t mndBuildStreamCheckpointSourceReq2(void **pBuf, int32_t *pLen, int32_t nodeId, int64_t checkpointId, - int64_t streamId, int32_t taskId) { + int64_t streamId, int32_t taskId, int32_t transId) { SStreamCheckpointSourceReq req = {0}; req.checkpointId = checkpointId; req.nodeId = nodeId; req.expireTime = -1; req.streamId = streamId; // pTask->id.streamId; req.taskId = taskId; // pTask->id.taskId; + req.transId = transId; int32_t code; int32_t blen; @@ -1021,7 +1047,7 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre return -1; } - bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_CHECKPOINT_NAME, true); + bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_CHECKPOINT_NAME, true); if (conflict) { mndAddtoCheckpointWaitingList(pStream, checkpointId); mWarn("checkpoint conflict with other trans in %s, ignore the checkpoint for stream:%s %" PRIx64, pStream->sourceDb, @@ -1068,7 +1094,7 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre void * buf; int32_t tlen; if (mndBuildStreamCheckpointSourceReq2(&buf, &tlen, pTask->info.nodeId, checkpointId, pTask->id.streamId, - pTask->id.taskId) < 0) { + pTask->id.taskId, pTrans->id) < 0) { mndReleaseVgroup(pMnode, pVgObj); taosWUnLockLatch(&pStream->lock); goto _ERR; @@ -1137,7 +1163,7 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream void * buf; int32_t tlen; if (mndBuildStreamCheckpointSourceReq2(&buf, &tlen, pTask->info.nodeId, chkptId, pTask->id.streamId, - pTask->id.taskId) < 0) { + pTask->id.taskId, pTrans->id) < 0) { mndReleaseVgroup(pMnode, pVgObj); taosWUnLockLatch(&pStream->lock); return -1; @@ -1318,6 +1344,7 @@ static int32_t mndProcessStreamCheckpointInCandid(SRpcMsg *pReq) { if (ps == NULL) { continue; } + mDebug("start to launch checkpoint for stream:%s %" PRIx64 " in candidate list", pEntry->pName, pEntry->streamId); code = mndProcessStreamCheckpointTrans(pMnode, ps, pEntry->checkpointId); @@ -1373,7 +1400,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { } // check if it is conflict with other trans in both sourceDb and targetDb. - bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_DROP_NAME, true); + bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_DROP_NAME, true); if (conflict) { sdbRelease(pMnode->pSdb, pStream); tFreeMDropStreamReq(&dropReq); @@ -1425,6 +1452,13 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { return -1; } + // kill the related checkpoint trans + int32_t transId = mndStreamGetRelTrans(pMnode, pStream->uid); + if (transId != 0) { + mDebug("drop active related transId:%d due to stream:%s dropped", transId, pStream->name); + killTransImpl(pMnode, transId, pStream->sourceDb); + } + removeStreamTasksInBuf(pStream, &execInfo); SName name = {0}; @@ -1465,6 +1499,17 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { return -1; } #endif + + // kill the related checkpoint trans + int32_t transId = mndStreamGetRelTrans(pMnode, pStream->uid); + if (transId != 0) { + mDebug("drop active related transId:%d due to stream:%s dropped", transId, pStream->name); + killTransImpl(pMnode, transId, pStream->sourceDb); + } + + // drop the stream obj in execInfo + removeStreamTasksInBuf(pStream, &execInfo); + if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) { sdbRelease(pSdb, pStream); sdbCancelFetch(pSdb, pIter); @@ -1597,10 +1642,18 @@ static void mndCancelGetNextStream(SMnode *pMnode, void *pIter) { sdbCancelFetch(pSdb, pIter); } -static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlock *pBlock, int32_t numOfRows) { +static int32_t setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlock *pBlock, int32_t numOfRows) { SColumnInfoData *pColInfo; int32_t cols = 0; + STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; + + STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id)); + if (pe == NULL) { + mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId); + return -1; + } + // stream name char streamName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(streamName, mndGetDbStr(pStream->name), sizeof(streamName)); @@ -1651,14 +1704,7 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat colDataSetVal(pColInfo, numOfRows, (const char *)level, false); // status - char status[20 + VARSTR_HEADER_SIZE] = {0}; - STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; - - STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id)); - if (pe == NULL) { - mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId); - return; - } + char status[20 + VARSTR_HEADER_SIZE] = {0}; const char *pStatus = streamTaskGetStatusStr(pe->status); STR_TO_VARSTR(status, pStatus); @@ -1682,7 +1728,7 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat colDataSetVal(pColInfo, numOfRows, (const char *)vbuf, false); // output queue - // sprintf(buf, queueInfoStr, pe->outputQUsed, pe->outputRate); +// sprintf(buf, queueInfoStr, pe->outputQUsed, pe->outputRate); // STR_TO_VARSTR(vbuf, buf); // pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1701,6 +1747,8 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)vbuf, false); + + return TSDB_CODE_SUCCESS; } static int32_t getNumOfTasks(SArray *pTaskList) { @@ -1742,8 +1790,10 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock int32_t numOfLevels = taosArrayGetSize(pLevel); for (int32_t j = 0; j < numOfLevels; j++) { SStreamTask *pTask = taosArrayGetP(pLevel, j); - setTaskAttrInResBlock(pStream, pTask, pBlock, numOfRows); - numOfRows++; + int32_t code = setTaskAttrInResBlock(pStream, pTask, pBlock, numOfRows); + if (code == TSDB_CODE_SUCCESS) { + numOfRows++; + } } } @@ -1774,9 +1824,20 @@ static int32_t mndPauseStreamTask(SMnode *pMnode, STrans *pTrans, SStreamTask *p pReq->taskId = pTask->id.taskId; pReq->streamId = pTask->id.streamId; - SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId); - SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj); - mndReleaseVgroup(pMnode, pVgObj); + SEpSet epset; + bool hasEpset = false; + int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pReq); + return -1; + } + + // no valid epset, return directly without redoAction + if (!hasEpset) { + taosMemoryFree(pReq); + return TSDB_CODE_SUCCESS; + } STransAction action = {0}; initTransAction(&action, pReq, sizeof(SVPauseStreamTaskReq), TDMT_STREAM_TASK_PAUSE, &epset, 0); @@ -1801,7 +1862,7 @@ int32_t mndPauseAllStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStre } if (atomic_load_8(&pTask->status.taskStatus) != TASK_STATUS__PAUSE) { - atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus); + atomic_store_8(&pTask->status.statusBackup, pTask->status.taskStatus); atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE); } } @@ -1857,7 +1918,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { } // check if it is conflict with other trans in both sourceDb and targetDb. - bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_PAUSE_NAME, true); + bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_PAUSE_NAME, true); if (conflict) { sdbRelease(pMnode->pSdb, pStream); return -1; @@ -1918,17 +1979,25 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { static int32_t mndResumeStreamTask(STrans *pTrans, SMnode *pMnode, SStreamTask *pTask, int8_t igUntreated) { SVResumeStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResumeStreamTaskReq)); if (pReq == NULL) { + mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq), + tstrerror(TSDB_CODE_OUT_OF_MEMORY)); terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } + pReq->head.vgId = htonl(pTask->info.nodeId); pReq->taskId = pTask->id.taskId; pReq->streamId = pTask->id.streamId; pReq->igUntreated = igUntreated; - SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId); - SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj); - mndReleaseVgroup(pMnode, pVgObj); + SEpSet epset; + bool hasEpset = false; + int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pReq); + return -1; + } STransAction action = {0}; initTransAction(&action, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0); @@ -1951,7 +2020,7 @@ int32_t mndResumeAllStreamTasks(STrans *pTrans, SMnode *pMnode, SStreamObj *pStr } if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__PAUSE) { - atomic_store_8(&pTask->status.taskStatus, pTask->status.keepTaskStatus); + atomic_store_8(&pTask->status.taskStatus, pTask->status.statusBackup); } } } @@ -1992,7 +2061,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { } // check if it is conflict with other trans in both sourceDb and targetDb. - bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_RESUME_NAME, true); + bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_RESUME_NAME, true); if (conflict) { sdbRelease(pMnode->pSdb, pStream); return -1; @@ -2206,6 +2275,8 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP epsetAssign(&updateInfo.newEp, &pCurrent->epset); taosArrayPush(info.pUpdateNodeList, &updateInfo); } + + // todo handle the snode info if (pCurrent->nodeId != SNODE_HANDLE) { SVgObj *pVgroup = mndAcquireVgroup(pMnode, pCurrent->nodeId); taosHashPut(info.pDBMap, pVgroup->dbName, strlen(pVgroup->dbName), NULL, 0); @@ -2284,12 +2355,28 @@ static SArray *mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady) { } static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) { - SSdb *pSdb = pMnode->pSdb; - - // check all streams that involved this vnode should update the epset info + SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; - void * pIter = NULL; - STrans * pTrans = NULL; + void *pIter = NULL; + STrans *pTrans = NULL; + + // conflict check for nodeUpdate trans, here we randomly chose one stream to add into the trans pool + while(1) { + pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); + if (pIter == NULL) { + break; + } + + bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_TASK_UPDATE_NAME, false); + sdbRelease(pSdb, pStream); + + if (conflict) { + mWarn("nodeUpdate trans in progress, current nodeUpdate ignored"); + sdbCancelFetch(pSdb, pIter); + return -1; + } + } + while (1) { pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); @@ -2305,6 +2392,8 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange sdbCancelFetch(pSdb, pIter); return terrno; } + + mndStreamRegisterTrans(pTrans, MND_STREAM_TASK_RESET_NAME, pStream->uid); } void *p = taosHashGet(pChangeInfo->pDBMap, pStream->targetDb, strlen(pStream->targetDb)); @@ -2502,7 +2591,7 @@ static void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInf size_t len = 0; void * pKey = taosHashGetKey(pDb, &len); - killActiveCheckpointTrans(pMnode, pKey, len); + doKillCheckpointTrans(pMnode, pKey, len); } } @@ -2551,7 +2640,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) { code = mndProcessVgroupChange(pMnode, &changeInfo); - // keep the new vnode snapshot + // keep the new vnode snapshot if success if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) { mDebug("create trans successfully, update cached node list"); taosArrayDestroy(execInfo.pNodeList); @@ -2702,9 +2791,18 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { pReq->taskId = pTask->id.taskId; pReq->streamId = pTask->id.streamId; - SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId); - SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj); - mndReleaseVgroup(pMnode, pVgObj); + SEpSet epset; + bool hasEpset = false; + int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); + if (code != TSDB_CODE_SUCCESS) { + taosMemoryFree(pReq); + continue; + } + + if (!hasEpset) { + taosMemoryFree(pReq); + continue; + } STransAction action = {0}; initTransAction(&action, pReq, sizeof(SVResetStreamTaskReq), TDMT_VND_STREAM_TASK_RESET, &epset, 0); @@ -2738,7 +2836,16 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { return TSDB_CODE_ACTION_IN_PROGRESS; } -int32_t killActiveCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) { +void killTransImpl(SMnode* pMnode, int32_t transId, const char* pDbName) { + STrans *pTrans = mndAcquireTrans(pMnode, transId); + if (pTrans != NULL) { + mInfo("kill active transId:%d in Db:%s", transId, pDbName); + mndKillTrans(pMnode, pTrans); + mndReleaseTrans(pMnode, pTrans); + } +} + +int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) { // data in the hash table will be removed automatically, no need to remove it here. SStreamTransInfo *pTransInfo = taosHashGet(execInfo.transMgmt.pDBTrans, pDBName, len); if (pTransInfo == NULL) { @@ -2751,52 +2858,43 @@ int32_t killActiveCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t le return TSDB_CODE_SUCCESS; } - STrans *pTrans = mndAcquireTrans(pMnode, pTransInfo->transId); - if (pTrans != NULL) { - char* pDupDBName = strndup(pDBName, len); - mInfo("kill checkpoint transId:%d in Db:%s", pTransInfo->transId, pDupDBName); - taosMemoryFree(pDupDBName); - - mndKillTrans(pMnode, pTrans); - mndReleaseTrans(pMnode, pTrans); - } + char* pDupDBName = strndup(pDBName, len); + killTransImpl(pMnode, pTransInfo->transId, pDupDBName); + taosMemoryFree(pDupDBName); return TSDB_CODE_SUCCESS; } -static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int32_t transId) { +static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int64_t streamId, int32_t transId) { + int32_t code = TSDB_CODE_SUCCESS; + STrans *pTrans = mndAcquireTrans(pMnode, transId); if (pTrans != NULL) { mInfo("kill checkpoint transId:%d to reset task status", transId); mndKillTrans(pMnode, pTrans); mndReleaseTrans(pMnode, pTrans); + } else { + mError("failed to acquire checkpoint trans:%d", transId); } - // set all tasks status to be normal, refactor later to be stream level, instead of vnode level. - SSdb * pSdb = pMnode->pSdb; - SStreamObj *pStream = NULL; - void * pIter = NULL; - while (1) { - pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); - if (pIter == NULL) { - break; - } - - bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_TASK_RESET_NAME, false); + SStreamObj *pStream = mndGetStreamObj(pMnode, streamId); + if (pStream == NULL) { + code = TSDB_CODE_STREAM_TASK_NOT_EXIST; + mError("failed to acquire the streamObj:0x%" PRIx64 " to reset checkpoint, may have been dropped", pStream->uid); + } else { + bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_TASK_RESET_NAME, false); if (conflict) { - mError("stream:%s other trans exists in DB:%s & %s failed to start reset-status trans", pStream->name, - pStream->sourceDb, pStream->targetDb); - continue; - } - - mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, create reset trans", pStream->name, pStream->uid); - int32_t code = createStreamResetStatusTrans(pMnode, pStream); - if (code != TSDB_CODE_SUCCESS) { - sdbCancelFetch(pSdb, pIter); - return code; + mError("stream:%s other trans exists in DB:%s, dstTable:%s failed to start reset-status trans", pStream->name, + pStream->sourceDb, pStream->targetSTbName); + } else { + mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, transId:%d, create reset trans", pStream->name, + pStream->uid, transId); + code = createStreamResetStatusTrans(pMnode, pStream); + mndReleaseStream(pMnode, pStream); } } - return 0; + + return code; } static SStreamTask *mndGetStreamTask(STaskId *pId, SStreamObj *pStream) { @@ -2815,38 +2913,38 @@ static SStreamTask *mndGetStreamTask(STaskId *pId, SStreamObj *pStream) { return NULL; } -static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) { - if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) { - if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) { - int32_t numOfReady = 0; - int32_t numOfTotal = 0; - for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) { - STaskId *pId = taosArrayGet(pExecNode->pTaskList, k); - if (pTaskEntry->id.streamId == pId->streamId) { - numOfTotal++; - - if (pTaskEntry->id.taskId != pId->taskId) { - STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); - if (pEntry->status == TASK_STATUS__READY) { - numOfReady++; - } - } - } - } - - if (numOfReady > 0) { - mDebug("stream:0x%" PRIx64 - " %d tasks are ready, %d tasks in stream-scan-history for more than 50s, drop related fill-history task", - pTaskEntry->id.streamId, numOfReady, numOfTotal - numOfReady); - return true; - } else { - return false; - } - } - } - - return false; -} +//static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) { +// if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) { +// if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) { +// int32_t numOfReady = 0; +// int32_t numOfTotal = 0; +// for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) { +// STaskId *pId = taosArrayGet(pExecNode->pTaskList, k); +// if (pTaskEntry->id.streamId == pId->streamId) { +// numOfTotal++; +// +// if (pTaskEntry->id.taskId != pId->taskId) { +// STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); +// if (pEntry->status == TASK_STATUS__READY) { +// numOfReady++; +// } +// } +// } +// } +// +// if (numOfReady > 0) { +// mDebug("stream:0x%" PRIx64 +// " %d tasks are ready, %d tasks in stream-scan-history for more than 50s, drop related fill-history task", +// pTaskEntry->id.streamId, numOfReady, numOfTotal - numOfReady); +// return true; +// } else { +// return false; +// } +// } +// } +// +// return false; +//} // currently only handle the sink task // 1. sink task, drop related fill-history task msg is missing @@ -2876,11 +2974,17 @@ static int32_t mndDropRelatedFillhistoryTask(SMnode *pMnode, STaskStatusEntry *p mDebug("build and send drop related fill-history task for task:0x%x", pTask->id.taskId); - SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId); - SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj); - mndReleaseVgroup(pMnode, pVgObj); + SEpSet epset; + bool hasEpset = false; + int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (hasEpset) { + tmsgSendReq(&epset, &msg); + } - tmsgSendReq(&epset, &msg); return TSDB_CODE_SUCCESS; } @@ -2928,11 +3032,14 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { bool checkpointFailed = false; int64_t activeCheckpointId = 0; + int64_t streamId = 0; + int32_t transId = 0; SDecoder decoder = {0}; tDecoderInit(&decoder, pReq->pCont, pReq->contLen); if (tDecodeStreamHbMsg(&decoder, &req) < 0) { + streamMetaClearHbMsg(&req); tDecoderClear(&decoder); terrno = TSDB_CODE_INVALID_MSG; return -1; @@ -2969,7 +3076,9 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (pTaskEntry->stage != p->stage && pTaskEntry->stage != -1) { updateStageInfo(pTaskEntry, p->stage); - if (pTaskEntry->nodeId == SNODE_HANDLE) snodeChanged = true; + if (pTaskEntry->nodeId == SNODE_HANDLE) { + snodeChanged = true; + } } else { // task is idle for more than 50 sec. if (fabs(pTaskEntry->inputQUsed - p->inputQUsed) <= DBL_EPSILON) { @@ -2993,6 +3102,8 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (p->checkpointFailed) { checkpointFailed = p->checkpointFailed; + streamId = p->id.streamId; + transId = p->chkpointTransId; } } } @@ -3006,19 +3117,6 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (p->status != TASK_STATUS__READY) { mDebug("received s-task:0x%" PRIx64 " not in ready status:%s", p->id.taskId, streamTaskGetStatusStr(p->status)); - - if (p->status == TASK_STATUS__STREAM_SCAN_HISTORY) { - bool drop = needDropRelatedFillhistoryTask(pTaskEntry, &execInfo); - if (drop) { - SStreamObj *pStreamObj = mndGetStreamObj(pMnode, pTaskEntry->id.streamId); - if (pStreamObj == NULL) { - mError("failed to acquire the streamObj:0x%" PRIx64 " it may have been dropped", pStreamObj->uid); - } else { - mndDropRelatedFillhistoryTask(pMnode, pTaskEntry, pStreamObj); - mndReleaseStream(pMnode, pStreamObj); - } - } - } } } @@ -3031,18 +3129,16 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (allReady || snodeChanged) { // if the execInfo.activeCheckpoint == 0, the checkpoint is restoring from wal - mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status", - execInfo.activeCheckpoint); - mndResetStatusFromCheckpoint(pMnode, activeCheckpointId); + mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status", activeCheckpointId); + mndResetStatusFromCheckpoint(pMnode, streamId, transId); } else { mInfo("not all vgroups are ready, wait for next HB from stream tasks"); } } taosThreadMutexUnlock(&execInfo.lock); + streamMetaClearHbMsg(&req); - taosArrayDestroy(req.pTaskStatus); - taosArrayDestroy(req.pUpdateNodes); return TSDB_CODE_SUCCESS; } diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index 0db3cb0a8a..847f55a8fd 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -65,7 +65,7 @@ int32_t clearFinishedTrans(SMnode* pMnode) { return 0; } -bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char* pTransName, bool lock) { +bool mndStreamTransConflictCheck(SMnode* pMnode, int64_t streamUid, const char* pTransName, bool lock) { if (lock) { taosThreadMutexLock(&execInfo.lock); } @@ -90,16 +90,22 @@ bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0) { if (strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) { - mWarn("conflict with other transId:%d streamUid:%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid, + mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid, tInfo.name); + terrno = TSDB_CODE_MND_TRANS_CONFLICT; return true; + } else { + mDebug("not conflict with checkpoint trans, name:%s, continue create trans", pTransName); } - } else if ((strcmp(tInfo.name, MND_STREAM_CREATE_NAME) == 0) || - (strcmp(tInfo.name, MND_STREAM_DROP_NAME) == 0)) { - mWarn("conflict with other transId:%d streamUid:%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid, + } else if ((strcmp(tInfo.name, MND_STREAM_CREATE_NAME) == 0) || (strcmp(tInfo.name, MND_STREAM_DROP_NAME) == 0) || + (strcmp(tInfo.name, MND_STREAM_TASK_RESET_NAME) == 0)) { + mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid, tInfo.name); + terrno = TSDB_CODE_MND_TRANS_CONFLICT; return true; } + } else { + mDebug("stream:0x%"PRIx64" no conflict trans existed, continue create trans", streamUid); } if (lock) { @@ -109,6 +115,30 @@ bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char return false; } +int32_t mndStreamGetRelTrans(SMnode* pMnode, int64_t streamUid) { + taosThreadMutexLock(&execInfo.lock); + int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans); + if (num <= 0) { + taosThreadMutexUnlock(&execInfo.lock); + return 0; + } + + clearFinishedTrans(pMnode); + SStreamTransInfo* pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, &streamUid, sizeof(streamUid)); + if (pEntry != NULL) { + SStreamTransInfo tInfo = *pEntry; + taosThreadMutexUnlock(&execInfo.lock); + + if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0 || strcmp(tInfo.name, MND_STREAM_TASK_UPDATE_NAME) == 0) { + return tInfo.transId; + } + } else { + taosThreadMutexUnlock(&execInfo.lock); + } + + return 0; +} + int32_t mndAddtoCheckpointWaitingList(SStreamObj* pStream, int64_t checkpointId) { SCheckpointCandEntry* pEntry = taosHashGet(execInfo.transMgmt.pWaitingList, &pStream->uid, sizeof(pStream->uid)); if (pEntry == NULL) { diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 44ee804d22..0909003201 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -27,7 +27,10 @@ #define MND_SUBSCRIBE_VER_NUMBER 2 #define MND_SUBSCRIBE_RESERVE_SIZE 64 -#define MND_SUBSCRIBE_REBALANCE_CNT 3 +#define MND_CONSUMER_LOST_HB_CNT 6 +#define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 + +static int32_t mqRebInExecCnt = 0; static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *); static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw); @@ -38,14 +41,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg); static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg); static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextSubscribe(SMnode *pMnode, void *pIter); - -static int32_t mndSetSubRedoLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { - SSdbRaw *pRedoRaw = mndSubActionEncode(pSub); - if (pRedoRaw == NULL) return -1; - if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1; - if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1; - return 0; -} +static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj* hash); static int32_t mndSetSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { SSdbRaw *pCommitRaw = mndSubActionEncode(pSub); @@ -68,7 +64,7 @@ int32_t mndInitSubscribe(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_VND_TMQ_SUBSCRIBE_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_VND_TMQ_DELETE_SUB_RSP, mndTransProcessRsp); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DO_REBALANCE, mndProcessRebalanceReq); + mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessRebalanceReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DROP_CGROUP, mndProcessDropCgroupReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DROP_CGROUP_RSP, mndTransProcessRsp); @@ -213,16 +209,18 @@ static int32_t mndSplitSubscribeKey(const char *key, char *topic, char *cgroup, } static SMqRebInfo *mndGetOrCreateRebSub(SHashObj *pHash, const char *key) { - SMqRebInfo *pRebSub = taosHashGet(pHash, key, strlen(key) + 1); - if (pRebSub == NULL) { - pRebSub = tNewSMqRebSubscribe(key); - if (pRebSub == NULL) { + SMqRebInfo *pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); + if (pRebInfo == NULL) { + pRebInfo = tNewSMqRebSubscribe(key); + if (pRebInfo == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - taosHashPut(pHash, key, strlen(key) + 1, pRebSub, sizeof(SMqRebInfo)); + taosHashPut(pHash, key, strlen(key) + 1, pRebInfo, sizeof(SMqRebInfo)); + taosMemoryFree(pRebInfo); + pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); } - return pRebSub; + return pRebInfo; } static void doRemoveLostConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, const SMqRebInputObj *pInput) { @@ -727,17 +725,156 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu return 0; } -static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMqDoRebalanceMsg *pReq = pMsg->pCont; - void *pIter = NULL; -// bool rebalanceOnce = false; // to ensure only once. +static void freeRebalanceItem(void *param) { + SMqRebInfo *pInfo = param; + taosArrayDestroy(pInfo->newConsumers); + taosArrayDestroy(pInfo->removedConsumers); +} - mInfo("mq re-balance start, total required re-balanced trans:%d", taosHashGetSize(pReq->rebSubHash)); +static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj* rebSubHash) { + SMnode *pMnode = pMsg->info.node; + SSdb *pSdb = pMnode->pSdb; + SMqConsumerObj *pConsumer; + void *pIter = NULL; + + // iterate all consumers, find all modification + while (1) { + pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) { + break; + } + + int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); + int32_t status = atomic_load_32(&pConsumer->status); + + mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", + pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, + hbStatus); + + if (status == MQ_CONSUMER_STATUS_READY) { + if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { + taosRLockLatch(&pConsumer->lock); + int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < topicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); + } + taosRUnLockLatch(&pConsumer->lock); + }else{ + int32_t newTopicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < newTopicNum; i++) { + char * topic = taosArrayGetP(pConsumer->currentTopics, i); + SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic); + if (pSub == NULL) { + continue; + } + taosRLockLatch(&pSub->lock); + + // 2.2 iterate all vg assigned to the consumer of that topic + SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &pConsumer->consumerId, sizeof(int64_t)); + int32_t vgNum = taosArrayGetSize(pConsumerEp->vgs); + + for (int32_t j = 0; j < vgNum; j++) { + SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j); + SVgObj * pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId); + if (!pVgroup) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + mndMakeSubscribeKey(key, pConsumer->cgroup, topic); + mndGetOrCreateRebSub(rebSubHash, key); + mInfo("vnode splitted, vgId:%d rebalance will be triggered", pVgEp->vgId); + } + mndReleaseVgroup(pMnode, pVgroup); + } + taosRUnLockLatch(&pSub->lock); + mndReleaseSubscribe(pMnode, pSub); + } + } + } else if (status == MQ_CONSUMER_STATUS_LOST) { + if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } + } else { + taosRLockLatch(&pConsumer->lock); + + int32_t newTopicNum = taosArrayGetSize(pConsumer->rebNewTopics); + for (int32_t i = 0; i < newTopicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *newTopic = taosArrayGetP(pConsumer->rebNewTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, newTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(rebSubHash, key); + taosArrayPush(pRebSub->newConsumers, &pConsumer->consumerId); + } + + int32_t removedTopicNum = taosArrayGetSize(pConsumer->rebRemovedTopics); + for (int32_t i = 0; i < removedTopicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->rebRemovedTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); + } + + if (newTopicNum == 0 && removedTopicNum == 0 && taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } + + taosRUnLockLatch(&pConsumer->lock); + } + + mndReleaseConsumer(pMnode, pConsumer); + } + + return 0; +} + +bool mndRebTryStart() { + int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); + mInfo("rebalance counter old val:%d", old); + return old == 0; +} + +void mndRebCntInc() { + int32_t val = atomic_add_fetch_32(&mqRebInExecCnt, 1); + mInfo("rebalance cnt inc, value:%d", val); +} + +void mndRebCntDec() { + int32_t val = atomic_sub_fetch_32(&mqRebInExecCnt, 1); + mInfo("rebalance cnt sub, value:%d", val); +} + +static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { + int code = 0; + mInfo("start to process mq timer"); + + if (!mndRebTryStart()) { + mInfo("mq rebalance already in progress, do nothing"); + return code; + } + + SHashObj *rebSubHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); + if (rebSubHash == NULL) { + mError("failed to create rebalance hashmap"); + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = -1; + goto END; + } + + taosHashSetFreeFp(rebSubHash, freeRebalanceItem); + + mndCheckConsumer(pMsg, rebSubHash); + mInfo("mq re-balance start, total required re-balanced trans:%d", taosHashGetSize(rebSubHash)); // here we only handle one topic rebalance requirement to ensure the atomic execution of this transaction. + void *pIter = NULL; + SMnode *pMnode = pMsg->info.node; while (1) { - pIter = taosHashIterate(pReq->rebSubHash, pIter); + pIter = taosHashIterate(rebSubHash, pIter); if (pIter == NULL) { break; } @@ -756,12 +893,11 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { taosArrayDestroy(rebOutput.modifyConsumers); taosArrayDestroy(rebOutput.rebVgs); - taosHashCancelIterate(pReq->rebSubHash, pIter); + taosHashCancelIterate(rebSubHash, pIter); terrno = TSDB_CODE_OUT_OF_MEMORY; - mInfo("mq re-balance failed, due to out of memory"); - taosHashCleanup(pReq->rebSubHash); - mndRebEnd(); - return -1; + mError("mq re-balance failed, due to out of memory"); + code = -1; + goto END; } SMqRebInfo *pRebInfo = (SMqRebInfo *)pIter; @@ -829,10 +965,12 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { // reset flag mInfo("mq re-balance completed successfully"); - taosHashCleanup(pReq->rebSubHash); - mndRebEnd(); - return 0; +END: + taosHashCleanup(rebSubHash); + mndRebCntDec(); + + return code; } static int32_t sendDeleteSubToVnode(SMqSubscribeObj *pSub, STrans *pTrans){ @@ -1215,7 +1353,7 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName) return -1; } - if (mndSetDropSubRedoLogs(pMnode, pTrans, pSub) < 0) { + if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { sdbRelease(pSdb, pSub); sdbCancelFetch(pSdb, pIter); return -1; diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index f46f33ac22..0fc8dad420 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -180,7 +180,7 @@ int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) { goto _OUT; } - mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64 + mInfo("trans:%d, process sync proposal, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64 " role:%s raw:%p sec:%d seq:%" PRId64, transId, pMgmt->transId, pMeta->code, pMeta->index, pMeta->term, pMeta->lastConfigIndex, syncStr(pMeta->state), pRaw, pMgmt->transSec, pMgmt->transSeq); @@ -208,15 +208,11 @@ int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) { } if (pTrans->stage == TRN_STAGE_PREPARE) { - bool continueExec = mndTransPerformPrepareStage(pMnode, pTrans); + bool continueExec = mndTransPerformPrepareStage(pMnode, pTrans, false); if (!continueExec) goto _OUT; } - if (pTrans->id != pMgmt->transId) { - mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d", - pTrans->id, pTrans->createdTime, pMgmt->transId); - mndTransRefresh(pMnode, pTrans); - } + mndTransRefresh(pMnode, pTrans); sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex); sdbWriteFile(pMnode->pSdb, tsMndSdbWriteDelta); @@ -234,6 +230,7 @@ static int32_t mndPostMgmtCode(SMnode *pMnode, int32_t code) { goto _OUT; } + int32_t transId = pMgmt->transId; pMgmt->transId = 0; pMgmt->transSec = 0; pMgmt->transSeq = 0; @@ -241,9 +238,9 @@ static int32_t mndPostMgmtCode(SMnode *pMnode, int32_t code) { tsem_post(&pMgmt->syncSem); if (pMgmt->errCode != 0) { - mError("trans:%d, failed to propose since %s, post sem", pMgmt->transId, tstrerror(pMgmt->errCode)); + mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode)); } else { - mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, pMgmt->transId, pMgmt->transSeq); + mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, transId, pMgmt->transSeq); } _OUT: @@ -542,7 +539,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) { taosThreadMutexLock(&pMgmt->lock); pMgmt->errCode = 0; - if (pMgmt->transId != 0 /* && pMgmt->transId != transId*/) { + if (pMgmt->transId != 0) { mError("trans:%d, can't be proposed since trans:%d already waiting for confirm", transId, pMgmt->transId); taosThreadMutexUnlock(&pMgmt->lock); rpcFreeCont(req.pCont); diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 7749decf91..9075f0145c 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -15,7 +15,7 @@ #define _DEFAULT_SOURCE #include "mndTrans.h" -#include "mndConsumer.h" +#include "mndSubscribe.h" #include "mndDb.h" #include "mndPrivilege.h" #include "mndShow.h" @@ -36,21 +36,25 @@ static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw); static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction); static void mndTransDropLogs(SArray *pArray); static void mndTransDropActions(SArray *pArray); -static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray); -static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans); -static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans); -static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans); -static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans); -static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans); -static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsLeader(pMnode); } + +static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray, bool topHalf); +static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans, bool topHalf); +static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans, bool topHalf); +static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf); +static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf); +static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf); +static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf); + +static bool mndCannotExecuteTransAction(SMnode *pMnode, bool topHalf) { + return (!pMnode->deploy && !mndIsLeader(pMnode)) || !topHalf; +} static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans); static int32_t mndProcessTransTimer(SRpcMsg *pReq); @@ -1090,8 +1094,9 @@ static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) } } -static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { +static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { if (pAction->rawWritten) return 0; + if (topHalf) return TSDB_CODE_MND_TRANS_CTX_SWITCH; int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pAction->pRaw); if (code == 0 || terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) { @@ -1112,9 +1117,12 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi return code; } -static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { - if (pAction->msgSent) return 0; - if (mndCannotExecuteTransAction(pMnode)) return -1; +static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { + if (pAction->msgSent) return 0; + if (mndCannotExecuteTransAction(pMnode, topHalf)) { + terrno = TSDB_CODE_MND_TRANS_CTX_SWITCH; + return TSDB_CODE_MND_TRANS_CTX_SWITCH; + } int64_t signature = pTrans->id; signature = (signature << 32); @@ -1159,7 +1167,8 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio return code; } -static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { +static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { + if (!topHalf) return TSDB_CODE_MND_TRANS_CTX_SWITCH; pAction->rawWritten = 0; pAction->errCode = 0; mInfo("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id); @@ -1168,34 +1177,39 @@ static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction return 0; } -static int32_t mndTransExecSingleAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { +static int32_t mndTransExecSingleAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) { if (pAction->actionType == TRANS_ACTION_RAW) { - return mndTransWriteSingleLog(pMnode, pTrans, pAction); + return mndTransWriteSingleLog(pMnode, pTrans, pAction, topHalf); } else if (pAction->actionType == TRANS_ACTION_MSG) { - return mndTransSendSingleMsg(pMnode, pTrans, pAction); + return mndTransSendSingleMsg(pMnode, pTrans, pAction, topHalf); } else { - return mndTransExecNullMsg(pMnode, pTrans, pAction); + return mndTransExecNullMsg(pMnode, pTrans, pAction, topHalf); } } -static int32_t mndTransExecSingleActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { +static int32_t mndTransExecSingleActions(SMnode *pMnode, STrans *pTrans, SArray *pArray, bool topHalf) { int32_t numOfActions = taosArrayGetSize(pArray); int32_t code = 0; for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pArray, action); - code = mndTransExecSingleAction(pMnode, pTrans, pAction); - if (code != 0) break; + code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); + if (code != 0) { + mInfo("trans:%d, action:%d not executed since %s. numOfActions:%d", pTrans->id, action, tstrerror(code), + numOfActions); + break; + } } return code; } -static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { +static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray, bool topHalf) { int32_t numOfActions = taosArrayGetSize(pArray); + int32_t code = 0; if (numOfActions == 0) return 0; - if (mndTransExecSingleActions(pMnode, pTrans, pArray) != 0) { + if ((code = mndTransExecSingleActions(pMnode, pTrans, pArray, topHalf)) != 0) { return -1; } @@ -1248,31 +1262,31 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA } } -static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions); +static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { + int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("failed to execute redoActions since:%s, code:0x%x", terrstr(), terrno); } return code; } -static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions); +static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { + int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("failed to execute undoActions since %s", terrstr()); } return code; } -static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions); +static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf) { + int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions, topHalf); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("failed to execute commitActions since %s", terrstr()); } return code; } -static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) { +static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans, bool topHalf) { int32_t code = 0; int32_t numOfActions = taosArrayGetSize(pTrans->redoActions); if (numOfActions == 0) return code; @@ -1289,7 +1303,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) for (int32_t action = pTrans->redoActionPos; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->redoActionPos); - code = mndTransExecSingleAction(pMnode, pTrans, pAction); + code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); if (code == 0) { if (pAction->msgSent) { if (pAction->msgReceived) { @@ -1317,14 +1331,16 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) } mndSetTransLastAction(pTrans, pAction); - if (mndCannotExecuteTransAction(pMnode)) break; + if (mndCannotExecuteTransAction(pMnode, topHalf)) break; if (code == 0) { pTrans->code = 0; pTrans->redoActionPos++; mInfo("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage), pAction->id); + taosThreadMutexUnlock(&pTrans->mutex); code = mndTransSync(pMnode, pTrans); + taosThreadMutexLock(&pTrans->mutex); if (code != 0) { pTrans->redoActionPos--; pTrans->code = terrno; @@ -1357,7 +1373,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) return code; } -bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { +bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; int32_t code = 0; @@ -1368,7 +1384,7 @@ bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pTrans->prepareActions, action); - code = mndTransExecSingleAction(pMnode, pTrans, pAction); + code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); if (code != 0) { mError("trans:%d, failed to execute prepare action:%d, numOfActions:%d", pTrans->id, action, numOfActions); return false; @@ -1381,17 +1397,17 @@ _OVER: return continueExec; } -static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { +static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; int32_t code = 0; if (pTrans->exec == TRN_EXEC_SERIAL) { - code = mndTransExecuteRedoActionsSerial(pMnode, pTrans); + code = mndTransExecuteRedoActionsSerial(pMnode, pTrans, topHalf); } else { - code = mndTransExecuteRedoActions(pMnode, pTrans); + code = mndTransExecuteRedoActions(pMnode, pTrans, topHalf); } - if (mndCannotExecuteTransAction(pMnode)) return false; + if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; terrno = code; if (code == 0) { @@ -1431,8 +1447,8 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { - if (mndCannotExecuteTransAction(pMnode)) return false; +static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { + if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; bool continueExec = true; int32_t code = mndTransCommit(pMnode, pTrans); @@ -1452,9 +1468,9 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) { +static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; - int32_t code = mndTransExecuteCommitActions(pMnode, pTrans); + int32_t code = mndTransExecuteCommitActions(pMnode, pTrans, topHalf); if (code == 0) { pTrans->code = 0; @@ -1471,9 +1487,9 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { +static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; - int32_t code = mndTransExecuteUndoActions(pMnode, pTrans); + int32_t code = mndTransExecuteUndoActions(pMnode, pTrans, topHalf); if (code == 0) { pTrans->stage = TRN_STAGE_PRE_FINISH; @@ -1491,8 +1507,8 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) { - if (mndCannotExecuteTransAction(pMnode)) return false; +static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { + if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; bool continueExec = true; int32_t code = mndTransRollback(pMnode, pTrans); @@ -1510,8 +1526,8 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans) { - if (mndCannotExecuteTransAction(pMnode)) return false; +static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { + if (mndCannotExecuteTransAction(pMnode, topHalf)) return false; bool continueExec = true; int32_t code = mndTransPreFinish(pMnode, pTrans); @@ -1529,8 +1545,9 @@ static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans) { +static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = false; + if (topHalf) return continueExec; SSdbRaw *pRaw = mndTransEncode(pTrans); if (pRaw == NULL) { @@ -1558,43 +1575,28 @@ void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) { pTrans->lastExecTime = taosGetTimestampMs(); switch (pTrans->stage) { case TRN_STAGE_PREPARE: - continueExec = mndTransPerformPrepareStage(pMnode, pTrans); + continueExec = mndTransPerformPrepareStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_REDO_ACTION: - continueExec = mndTransPerformRedoActionStage(pMnode, pTrans); + continueExec = mndTransPerformRedoActionStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_COMMIT: - if (topHalf) { - continueExec = mndTransPerformCommitStage(pMnode, pTrans); - } else { - mInfo("trans:%d, can not commit since not leader", pTrans->id); - continueExec = false; - } + continueExec = mndTransPerformCommitStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_COMMIT_ACTION: - continueExec = mndTransPerformCommitActionStage(pMnode, pTrans); + continueExec = mndTransPerformCommitActionStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_ROLLBACK: - if (topHalf) { - continueExec = mndTransPerformRollbackStage(pMnode, pTrans); - } else { - mInfo("trans:%d, can not rollback since not leader", pTrans->id); - continueExec = false; - } + continueExec = mndTransPerformRollbackStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_UNDO_ACTION: - continueExec = mndTransPerformUndoActionStage(pMnode, pTrans); + continueExec = mndTransPerformUndoActionStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_PRE_FINISH: - if (topHalf) { - continueExec = mndTransPerformPreFinishStage(pMnode, pTrans); - } else { - mInfo("trans:%d, can not pre-finish since not leader", pTrans->id); - continueExec = false; - } + continueExec = mndTransPerformPreFinishStage(pMnode, pTrans, topHalf); break; case TRN_STAGE_FINISH: - continueExec = mndTransPerformFinishStage(pMnode, pTrans); + continueExec = mndTransPerformFinishStage(pMnode, pTrans, topHalf); break; default: continueExec = false; diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 20fb7a4d31..6eb5d7e592 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -2707,8 +2707,16 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro if (newVg1.replica == 1) { if (mndAddVnodeToVgroup(pMnode, pTrans, &newVg1, pArray) != 0) goto _OVER; + + newVg1.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_LEARNER; if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[0].dnodeId) != 0) goto _OVER; if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg1, &newVg1.vnodeGid[1]) != 0) goto _OVER; + + newVg1.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER; + if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[1].dnodeId) != 0) goto _OVER; + if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[0].dnodeId) != 0) goto _OVER; + + if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER; } else if (newVg1.replica == 3) { SVnodeGid del1 = {0}; if (mndRemoveVnodeFromVgroup(pMnode, pTrans, &newVg1, pArray, &del1) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index 2d03631a37..4d0d53ced0 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -61,6 +61,7 @@ class MndTestTrans2 : public ::testing::Test { static SMsgCb msgCb = {0}; msgCb.reportStartupFp = reportStartup; msgCb.sendReqFp = sendReq; + msgCb.sendSyncReqFp = sendSyncReq; msgCb.sendRspFp = sendRsp; msgCb.queueFps[SYNC_QUEUE] = putToQueue; msgCb.queueFps[WRITE_QUEUE] = putToQueue; diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 21d813c7c0..4815dfe65c 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -19,26 +19,24 @@ #include "tqCommon.h" #include "tuuid.h" -#define sndError(...) \ - do { \ - if (sndDebugFlag & DEBUG_ERROR) { \ - taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__); \ - } \ - } while (0) +// clang-format off +#define sndError(...) do { if (sndDebugFlag & DEBUG_ERROR) {taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__);}} while (0) +#define sndInfo(...) do { if (sndDebugFlag & DEBUG_INFO) { taosPrintLog("SND INFO ", DEBUG_INFO, sndDebugFlag, __VA_ARGS__);}} while (0) +#define sndDebug(...) do { if (sndDebugFlag & DEBUG_DEBUG) { taosPrintLog("SND ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__);}} while (0) +// clang-format on -#define sndInfo(...) \ - do { \ - if (sndDebugFlag & DEBUG_INFO) { \ - taosPrintLog("SND INFO ", DEBUG_INFO, sndDebugFlag, __VA_ARGS__); \ - } \ - } while (0) - -#define sndDebug(...) \ - do { \ - if (sndDebugFlag & DEBUG_DEBUG) { \ - taosPrintLog("SND ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__); \ - } \ - } while (0) +static STaskId replaceStreamTaskId(SStreamTask *pTask) { + ASSERT(pTask->info.fillHistory); + STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; + pTask->id.streamId = pTask->streamTaskId.streamId; + pTask->id.taskId = pTask->streamTaskId.taskId; + return id; +} +static void restoreStreamTaskId(SStreamTask *pTask, STaskId *pId) { + ASSERT(pTask->info.fillHistory); + pTask->id.taskId = pId->taskId; + pTask->id.streamId = pId->streamId; +} int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer) { ASSERT(pTask->info.taskLevel == TASK_LEVEL__AGG && taosArrayGetSize(pTask->upstreamInfo.pList) != 0); @@ -50,23 +48,22 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer streamTaskOpenAllUpstreamInput(pTask); - SStreamTask *pSateTask = pTask; - SStreamTask task = {0}; + STaskId taskId = {0}; if (pTask->info.fillHistory) { - task.id.streamId = pTask->streamTaskId.streamId; - task.id.taskId = pTask->streamTaskId.taskId; - task.pMeta = pTask->pMeta; - pSateTask = &task; + taskId = replaceStreamTaskId(pTask); } - pTask->pState = streamStateOpen(pSnode->path, pSateTask, false, -1, -1); + pTask->pState = streamStateOpen(pSnode->path, pTask, false, -1, -1); if (pTask->pState == NULL) { sndError("s-task:%s failed to open state for task", pTask->id.idStr); return -1; } else { sndDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState); } - + + if (pTask->info.fillHistory) { + restoreStreamTaskId(pTask, &taskId); + } int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList); SReadHandle handle = { @@ -90,27 +87,25 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer // checkpoint ver is the kept version, handled data should be the next version. if (pTask->chkInfo.checkpointId != 0) { pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1; - sndInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " nextProcessVer:%" PRId64, pTask->id.idStr, - pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer); + sndInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " nextProcessVer:%" PRId64, + pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer); } - char *p = NULL; - streamTaskGetStatus(pTask, &p); - + char* p = streamTaskGetStatus(pTask)->name; if (pTask->info.fillHistory) { sndInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 - " nextProcessVer:%" PRId64 - " child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms", - SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, - pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, - (int32_t)pTask->streamTaskId.taskId, pTask->info.triggerParam); + " nextProcessVer:%" PRId64 + " child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms", + SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, + (int32_t)pTask->streamTaskId.taskId, pTask->info.triggerParam); } else { sndInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 - " nextProcessVer:%" PRId64 - " child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms", - SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, - pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, - (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.triggerParam); + " nextProcessVer:%" PRId64 + " child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms", + SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, + (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.triggerParam); } return 0; } @@ -128,7 +123,7 @@ SSnode *sndOpen(const char *path, const SSnodeOpt *pOption) { } pSnode->msgCb = pOption->msgCb; - pSnode->pMeta = streamMetaOpen(path, pSnode, (FTaskExpand *)sndExpandTask, SNODE_HANDLE, taosGetTimestampMs()); + pSnode->pMeta = streamMetaOpen(path, pSnode, (FTaskExpand *)sndExpandTask, SNODE_HANDLE, taosGetTimestampMs(), tqStartTaskCompleteCallback); if (pSnode->pMeta == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto FAIL; @@ -149,9 +144,10 @@ FAIL: return NULL; } -int32_t sndInit(SSnode * pSnode) { - resetStreamTaskStatus(pSnode->pMeta); - startStreamTasks(pSnode->pMeta); +int32_t sndInit(SSnode *pSnode) { + int32_t numOfTasks = 0; + tqStreamTaskResetStatus(pSnode->pMeta, &numOfTasks); + streamMetaStartAllTasks(pSnode->pMeta); return 0; } @@ -198,13 +194,19 @@ int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg *pRsp) { case TDMT_STREAM_TASK_DEPLOY: { void * pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t len = pMsg->contLen - sizeof(SMsgHead); - return tqStreamTaskProcessDeployReq(pSnode->pMeta, -1, pReq, len, true, true); + return tqStreamTaskProcessDeployReq(pSnode->pMeta, &pSnode->msgCb,pMsg->info.conn.applyIndex, pReq, len, true, true); } case TDMT_STREAM_TASK_DROP: return tqStreamTaskProcessDropReq(pSnode->pMeta, pMsg->pCont, pMsg->contLen); case TDMT_VND_STREAM_TASK_UPDATE: return tqStreamTaskProcessUpdateReq(pSnode->pMeta, &pSnode->msgCb, pMsg, true); + case TDMT_VND_STREAM_TASK_RESET: + return tqStreamTaskProcessTaskResetReq(pSnode->pMeta, pMsg); + case TDMT_STREAM_TASK_PAUSE: + return tqStreamTaskProcessTaskPauseReq(pSnode->pMeta, pMsg->pCont); + case TDMT_STREAM_TASK_RESUME: + return tqStreamTaskProcessTaskResumeReq(pSnode->pMeta, pMsg->info.conn.applyIndex, pMsg->pCont, false); default: ASSERT(0); } diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index cf57623a43..435b12bc06 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -107,6 +107,7 @@ struct STQ { TTB* pExecStore; TTB* pCheckStore; SStreamMeta* pStreamMeta; + void* tqTimer; }; int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle); @@ -152,9 +153,6 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data); char* tqOffsetBuildFName(const char* path, int32_t fVer); int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); -// tqStream -int32_t tqStopStreamTasks(STQ* pTq); - // tq util int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type); int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index d347ce30d8..9d8d5013fa 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -256,31 +256,8 @@ void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]); // tsdbFS.c ============================================================================================== int32_t tsdbFSOpen(STsdb *pTsdb, int8_t rollback); int32_t tsdbFSClose(STsdb *pTsdb); -int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS); -void tsdbFSDestroy(STsdbFS *pFS); -int32_t tDFileSetCmprFn(const void *p1, const void *p2); -int32_t tsdbFSCommit(STsdb *pTsdb); -int32_t tsdbFSRollback(STsdb *pTsdb); -int32_t tsdbFSPrepareCommit(STsdb *pTsdb, STsdbFS *pFS); -int32_t tsdbFSRef(STsdb *pTsdb, STsdbFS *pFS); -void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS); void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t); - -int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet); -int32_t tsdbFSUpsertDelFile(STsdbFS *pFS, SDelFile *pDelFile); // tsdbReaderWriter.c ============================================================================================== -// SDataFWriter -int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet); -int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync); -int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter); -int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx); -int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx); -int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk); -int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo, - int8_t cmprAlg, int8_t toLast); -int32_t tsdbWriteDiskData(SDataFWriter *pWriter, const SDiskData *pDiskData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo); - -int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo); // SDataFReader int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet); int32_t tsdbDataFReaderClose(SDataFReader **ppReader); @@ -292,12 +269,6 @@ int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pBlock, SBlockData *p int32_t tsdbReadDataBlockEx(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData); int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData); int32_t tsdbReadSttBlockEx(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData); -// SDelFWriter -int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb); -int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync); -int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx); -int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx); -int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter); // SDelFReader int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb); int32_t tsdbDelFReaderClose(SDelFReader **ppReader); @@ -426,6 +397,7 @@ struct STbData { tb_uid_t uid; TSKEY minKey; TSKEY maxKey; + SRWLatch lock; SDelData *pHead; SDelData *pTail; SMemSkipList sl; @@ -737,7 +709,6 @@ struct STsdbReadSnap { SMemTable *pIMem; SQueryNode *pINode; TFileSetArray *pfSetArray; - STsdbFS fs; }; struct SDataFWriter { @@ -796,16 +767,16 @@ typedef struct { } SSttTableRowsInfo; typedef struct SSttBlockLoadInfo { - SBlockDataInfo blockData[2]; // buffered block data - SArray *aSttBlk; - int32_t currentLoadBlockIndex; - STSchema *pSchema; - int16_t *colIds; - int32_t numOfCols; - bool checkRemainingRow; // todo: no assign value? - bool isLast; - bool sttBlockLoaded; - SSttTableRowsInfo info; + SBlockDataInfo blockData[2]; // buffered block data + SArray *aSttBlk; + int32_t currentLoadBlockIndex; + STSchema *pSchema; + int16_t *colIds; + int32_t numOfCols; + bool checkRemainingRow; // todo: no assign value? + bool isLast; + bool sttBlockLoaded; + SSttTableRowsInfo info; SSttBlockLoadCostInfo cost; } SSttBlockLoadInfo; @@ -894,15 +865,15 @@ typedef struct { _load_tomb_fn loadTombFn; void *pReader; void *idstr; - bool rspRows; // response the rows in stt-file, if possible + bool rspRows; // response the rows in stt-file, if possible } SMergeTreeConf; typedef struct SSttDataInfoForTable { - SArray* pTimeWindowList; + SArray *pTimeWindowList; int64_t numOfRows; } SSttDataInfoForTable; -int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoForTable* pTableInfo); +int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoForTable *pTableInfo); void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter); bool tMergeTreeNext(SMergeTree *pMTree); void tMergeTreePinSttBlock(SMergeTree *pMTree); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index f3b495675d..c1a4754b62 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -95,7 +95,7 @@ typedef struct SQueryNode SQueryNode; #define VNODE_RSMA2_DIR "rsma2" #define VNODE_TQ_STREAM "stream" -#if SUSPEND_RESUME_TEST // only for test purpose +#if SUSPEND_RESUME_TEST // only for test purpose #define VNODE_BUFPOOL_SEGMENTS 1 #else #define VNODE_BUFPOOL_SEGMENTS 3 @@ -216,8 +216,6 @@ int32_t tsdbBegin(STsdb* pTsdb); int32_t tsdbCacheCommit(STsdb* pTsdb); int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo); int32_t tsdbRetention(STsdb* tsdb, int64_t now, int32_t sync); -// int32_t tsdbFinishCommit(STsdb* pTsdb); -// int32_t tsdbRollbackCommit(STsdb* pTsdb); int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg); int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq2* pMsg, SSubmitRsp2* pRsp); int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitTbData* pSubmitTbData, int32_t* affectedRows); @@ -232,6 +230,7 @@ int tqPushMsg(STQ*, tmsg_t msgType); int tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg); int tqUnregisterPushHandle(STQ* pTq, void* pHandle); int tqScanWalAsync(STQ* pTq, bool ckPause); +int32_t tqStopStreamTasksAsync(STQ* pTq); int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp); int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg); diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 976ee616f9..ac2486eda1 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -36,7 +36,6 @@ static int metaDeleteBtimeIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaDeleteNcolIdx(SMeta *pMeta, const SMetaEntry *pME); - static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) { pInfo->uid = pEntry->uid; pInfo->version = pEntry->version; @@ -562,6 +561,7 @@ int metaAddIndexToSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); metaULock(pMeta); metaDestroyTagIdxKey(pTagIdxKey); + pTagIdxKey = NULL; } nStbEntry.version = version; @@ -692,6 +692,7 @@ int metaDropIndexFromSTable(SMeta *pMeta, int64_t version, SDropIndexReq *pReq) tdbTbDelete(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, pMeta->txn); metaULock(pMeta); metaDestroyTagIdxKey(pTagIdxKey); + pTagIdxKey = NULL; } // clear idx flag @@ -1076,7 +1077,7 @@ static int metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) { return ttlMgrDeleteTtl(pMeta->pTtlMgr, &ctx); } -static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *pSuid, int8_t* pSysTbl) { +static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *pSuid, int8_t *pSysTbl) { void *pData = NULL; int nData = 0; int rc = 0; @@ -1146,6 +1147,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *p tdbTbDelete(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, pMeta->txn); } metaDestroyTagIdxKey(pTagIdxKey); + pTagIdxKey = NULL; } } tDecoderClear(&tdc); @@ -1865,6 +1867,7 @@ static int metaAddTagIndex(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTb } tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); metaDestroyTagIdxKey(pTagIdxKey); + pTagIdxKey = NULL; } tdbTbcClose(pCtbIdxc); return 0; @@ -2122,7 +2125,7 @@ int metaUpdateChangeTimeWithLock(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeM if (!tsTtlChangeOnWrite) return 0; metaWLock(pMeta); - int ret = metaUpdateChangeTime(pMeta, uid, changeTimeMs); + int ret = metaUpdateChangeTime(pMeta, uid, changeTimeMs); metaULock(pMeta); return ret; } @@ -2228,15 +2231,14 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { nTagData = tDataTypes[pTagColumn->type].bytes; } - if (pTagData != NULL) { - if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, nTagData, pTagColumn->type, - pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) { - ret = -1; - goto end; - } - tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); + if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, nTagData, pTagColumn->type, + pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) { + ret = -1; + goto end; } + tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); metaDestroyTagIdxKey(pTagIdxKey); + pTagIdxKey = NULL; } } end: diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index abe4c3f2fc..fd4cc7eaa5 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -240,23 +240,23 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTaskId *pId) { STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - taosRLockLatch(&pMeta->lock); + streamMetaRLock(pMeta); SStreamTask **ppTask = (SStreamTask **)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask && *ppTask) { pItem->submitReqVer = (*ppTask)->chkInfo.checkpointVer; pItem->fetchResultVer = (*ppTask)->info.triggerParam; } - taosRUnLockLatch(&pMeta->lock); + streamMetaRUnLock(pMeta); } static void tdRSmaTaskRemove(SStreamMeta *pMeta, int64_t streamId, int32_t taskId) { streamMetaUnregisterTask(pMeta, streamId, taskId); - taosWLockLatch(&pMeta->lock); + streamMetaWLock(pMeta); int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); if (streamMetaCommit(pMeta) < 0) { // persist to disk } - taosWUnLockLatch(&pMeta->lock); + streamMetaWUnLock(pMeta); smaDebug("vgId:%d, rsma task:%" PRIi64 ",%d dropped, remain tasks:%d", pMeta->vgId, streamId, taskId, numOfTasks); } @@ -750,6 +750,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma } tDestroySubmitReq(pReq, TSDB_MSG_FLG_ENCODE); taosMemoryFree(pReq); + pReq = NULL; TSDB_CHECK_CODE(code, lino, _exit); } @@ -1301,14 +1302,14 @@ _checkpoint: checkpointBuilt = true; } - taosWLockLatch(&pMeta->lock); + streamMetaWLock(pMeta); if (streamMetaSaveTask(pMeta, pTask)) { - taosWUnLockLatch(&pMeta->lock); + streamMetaWUnLock(pMeta); code = terrno ? terrno : TSDB_CODE_OUT_OF_MEMORY; taosHashCancelIterate(pInfoHash, infoHash); TSDB_CHECK_CODE(code, lino, _exit); } - taosWUnLockLatch(&pMeta->lock); + streamMetaWUnLock(pMeta); smaDebug("vgId:%d, rsma commit, succeed to commit task:%p, submitReqVer:%" PRIi64 ", fetchResultVer:%" PRIi64 ", table:%" PRIi64 ", level:%d", TD_VID(pVnode), pTask, pItem->submitReqVer, pItem->fetchResultVer, pRSmaInfo->suid, i + 1); @@ -1316,13 +1317,13 @@ _checkpoint: } } if (pMeta) { - taosWLockLatch(&pMeta->lock); + streamMetaWLock(pMeta); if (streamMetaCommit(pMeta)) { - taosWUnLockLatch(&pMeta->lock); + streamMetaWUnLock(pMeta); code = terrno ? terrno : TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } - taosWUnLockLatch(&pMeta->lock); + streamMetaWUnLock(pMeta); } if (checkpointBuilt) { smaInfo("vgId:%d, rsma commit, succeed to commit checkpoint:%" PRIi64, TD_VID(pVnode), checkpointId); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 138c58b45f..a53b322753 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -19,13 +19,25 @@ // 0: not init // 1: already inited -// 2: wait to be inited or cleaup +// 2: wait to be inited or cleanup static int32_t tqInitialize(STQ* pTq); static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_STATUS_EXEC == pHandle->status; } static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; } static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; } +static int32_t tqTimerInit(STQ* pTq) { + pTq->tqTimer = taosTmrInit(100, 100, 1000, "TQ"); + if (pTq->tqTimer == NULL) { + return -1; + } + return 0; +} + +static void tqTimerCleanUp(STQ* pTq) { + taosTmrCleanUp(pTq->tqTimer); +} + void tqDestroyTqHandle(void* data) { STqHandle* pData = (STqHandle*)data; qDestroyTask(pData->execHandle.task); @@ -96,7 +108,8 @@ int32_t tqInitialize(STQ* pTq) { return -1; } - pTq->pStreamMeta = streamMetaOpen(pTq->path, pTq, (FTaskExpand*)tqExpandTask, pTq->pVnode->config.vgId, -1); + int32_t vgId = TD_VID(pTq->pVnode); + pTq->pStreamMeta = streamMetaOpen(pTq->path, pTq, (FTaskExpand*)tqExpandTask, vgId, -1, tqStartTaskCompleteCallback); if (pTq->pStreamMeta == NULL) { return -1; } @@ -105,6 +118,7 @@ int32_t tqInitialize(STQ* pTq) { return -1; } + tqTimerInit(pTq); return 0; } @@ -135,6 +149,8 @@ void tqClose(STQ* pTq) { taosMemoryFree(pTq->path); tqMetaClose(pTq); streamMetaClose(pTq->pStreamMeta); + tqTimerCleanUp(pTq); + qDebug("end to close tq"); taosMemoryFree(pTq); } @@ -841,8 +857,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer); } - char* p = NULL; - streamTaskGetStatus(pTask, &p); + char* p = streamTaskGetStatus(pTask)->name; if (pTask->info.fillHistory) { tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 @@ -872,7 +887,8 @@ int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { - return tqStreamTaskProcessDeployReq(pTq->pStreamMeta, sversion, msg, msgLen, vnodeIsRoleLeader(pTq->pVnode), pTq->pVnode->restored); + return tqStreamTaskProcessDeployReq(pTq->pStreamMeta, &pTq->pVnode->msgCb, sversion, msg, msgLen, + vnodeIsRoleLeader(pTq->pVnode), pTq->pVnode->restored); } static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask, STQ* pTq) { @@ -931,8 +947,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { // do recovery step1 const char* id = pTask->id.idStr; - char* pStatus = NULL; - streamTaskGetStatus(pTask, &pStatus); + char* pStatus = streamTaskGetStatus(pTask)->name; // avoid multi-thread exec while (1) { @@ -948,7 +963,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { // let's decide which step should be executed now if (pTask->execInfo.step1Start == 0) { int64_t ts = taosGetTimestampMs(); - pTask->execInfo.step1Start = ts; tqDebug("s-task:%s start scan-history stage(step 1), status:%s, step1 startTs:%" PRId64, id, pStatus, ts); } else { @@ -989,15 +1003,15 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { if (retInfo.ret == TASK_SCANHISTORY_REXEC) { streamReExecScanHistoryFuture(pTask, retInfo.idleTime); } else { - char* p = NULL; - ETaskStatus s = streamTaskGetStatus(pTask, &p); + SStreamTaskState* p = streamTaskGetStatus(pTask); + ETaskStatus s = p->state; if (s == TASK_STATUS__PAUSE) { tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr, el, pTask->execInfo.step1El, status); } else if (s == TASK_STATUS__STOP || s == TASK_STATUS__DROPPING) { - tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", pTask->id.idStr, p, - pTask->execInfo.step1El); + tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", pTask->id.idStr, + p->name, pTask->execInfo.step1El); } } @@ -1036,15 +1050,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { streamMetaReleaseTask(pMeta, pStreamTask); } else { - STimeWindow* pWindow = &pTask->dataRange.window; - ASSERT(HAS_RELATED_FILLHISTORY_TASK(pTask) || streamTaskShouldStop(pTask)); - - // Not update the fill-history time window until the state transfer is completed. - tqDebug("s-task:%s scan-history in stream time window completed, start to handle data from WAL, startVer:%" PRId64 - ", window:%" PRId64 " - %" PRId64, - id, pTask->chkInfo.nextProcessVer, pWindow->skey, pWindow->ekey); - - code = streamTaskScanHistoryDataComplete(pTask); + ASSERT(0); } atomic_store_32(&pTask->status.inScanHistorySentinel, 0); @@ -1064,16 +1070,19 @@ int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { SStreamTaskRunReq* pReq = pMsg->pCont; - int32_t taskId = pReq->taskId; - - if (taskId == STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID) { // all tasks are extracted submit data from the wal + // extracted submit data from wal files for all tasks + if (pReq->reqType == STREAM_EXEC_T_EXTRACT_WAL_DATA) { tqScanWal(pTq); return 0; } + int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode)); - if(code == 0 && taskId > 0){ + + // let's continue scan data in the wal files + if(code == 0 && pReq->reqType >= 0){ tqScanWalAsync(pTq, false); } + return code; } @@ -1090,108 +1099,11 @@ int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) { } int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { - SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)msg; - - SStreamMeta* pMeta = pTq->pStreamMeta; - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); - if (pTask == NULL) { - tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId, - pReq->taskId); - // since task is in [STOP|DROPPING] state, it is safe to assume the pause is active - return TSDB_CODE_SUCCESS; - } - - tqDebug("s-task:%s receive pause msg from mnode", pTask->id.idStr); - streamTaskPause(pTask, pMeta); - - SStreamTask* pHistoryTask = NULL; - if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId); - if (pHistoryTask == NULL) { - tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64 - ", it may have been dropped already", - pMeta->vgId, pTask->hTaskInfo.id.taskId); - streamMetaReleaseTask(pMeta, pTask); - - // since task is in [STOP|DROPPING] state, it is safe to assume the pause is active - return TSDB_CODE_SUCCESS; - } - - tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr); - - streamTaskPause(pHistoryTask, pMeta); - streamMetaReleaseTask(pMeta, pHistoryTask); - } - - streamMetaReleaseTask(pMeta, pTask); - return TSDB_CODE_SUCCESS; -} - -int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, int8_t igUntreated) { - int32_t vgId = pTq->pStreamMeta->vgId; - if (pTask == NULL) { - return -1; - } - - streamTaskResume(pTask); - ETaskStatus status = streamTaskGetStatus(pTask, NULL); - - int32_t level = pTask->info.taskLevel; - if (level == TASK_LEVEL__SINK) { - if (status == TASK_STATUS__UNINIT) { - } - streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; - } - - if (status == TASK_STATUS__READY || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) { - // no lock needs to secure the access of the version - if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) { - // discard all the data when the stream task is suspended. - walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion); - tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64 - ", schedStatus:%d", - vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus); - } else { // from the previous paused version and go on - tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d", - vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus); - } - - if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && status == TASK_STATUS__SCAN_HISTORY) { - streamStartScanHistoryAsync(pTask, igUntreated); - } else if (level == TASK_LEVEL__SOURCE && (streamQueueGetNumOfItems(pTask->inputq.queue) == 0)) { - tqScanWalAsync(pTq, false); - } else { - streamSchedExec(pTask); - } - } else if (status == TASK_STATUS__UNINIT) { - // todo: fill-history task init ? - if (pTask->info.fillHistory == 0) { - EStreamTaskEvent event = HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; - streamTaskHandleEvent(pTask->status.pSM, event); - } - } - - streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; + return tqStreamTaskProcessTaskPauseReq(pTq->pStreamMeta, msg); } int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { - SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg; - - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId); - int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated); - if (code != 0) { - return code; - } - - STaskId* pHTaskId = &pTask->hTaskInfo.id; - SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pHTaskId->streamId, pHTaskId->taskId); - if (pHistoryTask) { - code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated); - } - - return code; + return tqStreamTaskProcessTaskResumeReq(pTq, sversion, msg, true); } int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { @@ -1256,10 +1168,11 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) if (pTask->status.downstreamReady != 1) { pTask->chkInfo.failedId = req.checkpointId; // record the latest failed checkpoint id pTask->chkInfo.checkpointingId = req.checkpointId; + pTask->chkInfo.transId = req.transId; tqError("s-task:%s not ready for checkpoint, since downstream not ready, ignore this checkpoint:%" PRId64 - ", set it failure", - pTask->id.idStr, req.checkpointId); + ", transId:%d set it failed", + pTask->id.idStr, req.checkpointId, req.transId); streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; @@ -1270,7 +1183,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) // todo save the checkpoint failed info taosThreadMutexLock(&pTask->lock); - ETaskStatus status = streamTaskGetStatus(pTask, NULL); + ETaskStatus status = streamTaskGetStatus(pTask)->state; if (status == TASK_STATUS__HALT || status == TASK_STATUS__PAUSE) { tqError("s-task:%s not ready for checkpoint, since it is halt, ignore this checkpoint:%" PRId64 ", set it failure", @@ -1288,10 +1201,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) // check if the checkpoint msg already sent or not. if (status == TASK_STATUS__CK) { - ASSERT(pTask->chkInfo.checkpointingId == req.checkpointId); tqWarn("s-task:%s recv checkpoint-source msg again checkpointId:%" PRId64 - " already received, ignore this msg and continue process checkpoint", - pTask->id.idStr, pTask->chkInfo.checkpointingId); + " transId:%d already received, ignore this msg and continue process checkpoint", + pTask->id.idStr, pTask->chkInfo.checkpointingId, req.transId); taosThreadMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); @@ -1335,28 +1247,10 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) { - SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg->pCont; - - SStreamMeta* pMeta = pTq->pStreamMeta; - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); - if (pTask == NULL) { - tqError("vgId:%d process task-reset req, failed to acquire task:0x%x, it may have been dropped already", - pMeta->vgId, pReq->taskId); - return TSDB_CODE_SUCCESS; - } - - tqDebug("s-task:%s receive task-reset msg from mnode, reset status and ready for data processing", pTask->id.idStr); - - // clear flag set during do checkpoint, and open inputQ for all upstream tasks - if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__CK) { - streamTaskClearCheckInfo(pTask, true); - streamTaskSetStatusReady(pTask); - } - - streamMetaReleaseTask(pMeta, pTask); - return TSDB_CODE_SUCCESS; + return tqStreamTaskProcessTaskResetReq(pTq->pStreamMeta, pMsg); } +// NOTE: here we may receive this message more than once, so need to handle this case int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) { SVDropHTaskReq* pReq = (SVDropHTaskReq*)pMsg->pCont; @@ -1375,13 +1269,10 @@ int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) { return TSDB_CODE_SUCCESS; } - ETaskStatus status = streamTaskGetStatus(pTask, NULL); - ASSERT(status == TASK_STATUS__STREAM_SCAN_HISTORY); - - streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE); - + taosThreadMutexLock(&pTask->lock); SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId}; streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id); + taosThreadMutexUnlock(&pTask->lock); // clear the scheduler status streamTaskSetSchedStatusInactive(pTask); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 41b1aa7bd1..bfa8cfdb53 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -195,7 +195,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal); int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal); - wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64", 0x%"PRIx64, + wDebug("vgId:%d, start to fetch wal, index:%" PRId64 ", last:%" PRId64 " commit:%" PRId64 ", applied:%" PRId64 + ", 0x%" PRIx64, vgId, offset, lastVer, committedVer, appliedVer, id); while (offset <= appliedVer) { diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 1b0a76e81c..2a600d08ef 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -24,6 +24,7 @@ static int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId); static bool handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver); static bool taskReadyForDataFromWal(SStreamTask* pTask); static bool doPutDataIntoInputQFromWal(SStreamTask* pTask, int64_t maxVer, int32_t* numOfItems); +static int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration); // extract data blocks(submit/delete) from WAL, and add them into the input queue for all the sources tasks. int32_t tqScanWal(STQ* pTq) { @@ -31,35 +32,78 @@ int32_t tqScanWal(STQ* pTq) { SStreamMeta* pMeta = pTq->pStreamMeta; int64_t st = taosGetTimestampMs(); - while (1) { - int32_t scan = pMeta->walScanCounter; - tqDebug("vgId:%d continue check if data in wal are available, walScanCounter:%d", vgId, scan); + tqDebug("vgId:%d continue to check if data in wal are available, scanCounter:%d", vgId, pMeta->scanInfo.scanCounter); - // check all tasks - bool shouldIdle = true; - doScanWalForAllTasks(pTq->pStreamMeta, &shouldIdle); + // check all tasks + int32_t numOfTasks = 0; + bool shouldIdle = true; + doScanWalForAllTasks(pMeta, &shouldIdle); - if (shouldIdle) { - streamMetaWLock(pMeta); - int32_t times = (--pMeta->walScanCounter); - ASSERT(pMeta->walScanCounter >= 0); - streamMetaWUnLock(pMeta); + streamMetaWLock(pMeta); + int32_t times = (--pMeta->scanInfo.scanCounter); + ASSERT(pMeta->scanInfo.scanCounter >= 0); - if (times <= 0) { - break; - } else { - tqDebug("vgId:%d scan wal for stream tasks for %d times in %dms", vgId, times, SCAN_WAL_IDLE_DURATION); - } - } - - taosMsleep(SCAN_WAL_IDLE_DURATION); - } + numOfTasks = taosArrayGetSize(pMeta->pTaskList); + streamMetaWUnLock(pMeta); int64_t el = (taosGetTimestampMs() - st); tqDebug("vgId:%d scan wal for stream tasks completed, elapsed time:%" PRId64 " ms", vgId, el); + + if (times > 0) { + tqDebug("vgId:%d scan wal for stream tasks for %d times in %dms", vgId, times, SCAN_WAL_IDLE_DURATION); + tqScanWalInFuture(pTq, numOfTasks, SCAN_WAL_IDLE_DURATION); + } return 0; } +typedef struct SBuildScanWalMsgParam { + STQ* pTq; + int32_t numOfTasks; +} SBuildScanWalMsgParam; + +static void doStartScanWal(void* param, void* tmrId) { + SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*) param; + + int32_t vgId = pParam->pTq->pStreamMeta->vgId; + + SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); + if (pRunReq == NULL) { + taosMemoryFree(pParam); + terrno = TSDB_CODE_OUT_OF_MEMORY; + tqError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr()); + return; + } + + tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks, + pParam->pTq->pVnode->restored); + + pRunReq->head.vgId = vgId; + pRunReq->streamId = 0; + pRunReq->taskId = 0; + pRunReq->reqType = STREAM_EXEC_T_EXTRACT_WAL_DATA; + + SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; + tmsgPutToQueue(&pParam->pTq->pVnode->msgCb, STREAM_QUEUE, &msg); + + taosMemoryFree(pParam); +} + +int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { + SStreamMeta* pMeta = pTq->pStreamMeta; + + SBuildScanWalMsgParam* pParam = taosMemoryMalloc(sizeof(SBuildScanWalMsgParam)); + + pParam->pTq = pTq; + pParam->numOfTasks = numOfTasks; + if (pMeta->scanInfo.scanTimer == NULL) { + pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTq->tqTimer); + } else { + taosTmrReset(doStartScanWal, idleDuration, pParam, pTq->tqTimer, &pMeta->scanInfo.scanTimer); + } + + return TSDB_CODE_SUCCESS; +} + int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { int32_t vgId = TD_VID(pTq->pVnode); SStreamMeta* pMeta = pTq->pStreamMeta; @@ -80,23 +124,23 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { return 0; } - pMeta->walScanCounter += 1; - if (pMeta->walScanCounter > MAX_REPEAT_SCAN_THRESHOLD) { - pMeta->walScanCounter = MAX_REPEAT_SCAN_THRESHOLD; + pMeta->scanInfo.scanCounter += 1; + if (pMeta->scanInfo.scanCounter > MAX_REPEAT_SCAN_THRESHOLD) { + pMeta->scanInfo.scanCounter = MAX_REPEAT_SCAN_THRESHOLD; } - if (pMeta->walScanCounter > 1) { - tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->walScanCounter); + if (pMeta->scanInfo.scanCounter > 1) { + tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->scanInfo.scanCounter); streamMetaWUnLock(pMeta); return 0; } - int32_t numOfPauseTasks = pTq->pStreamMeta->numOfPausedTasks; + int32_t numOfPauseTasks = pMeta->numOfPausedTasks; if (ckPause && numOfTasks == numOfPauseTasks) { tqDebug("vgId:%d ignore all submit, all streams had been paused, reset the walScanCounter", vgId); // reset the counter value, since we do not launch the scan wal operation. - pMeta->walScanCounter = 0; + pMeta->scanInfo.scanCounter = 0; streamMetaWUnLock(pMeta); return 0; } @@ -109,12 +153,13 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { return -1; } - tqDebug("vgId:%d create msg to start wal scan to launch stream tasks, numOfTasks:%d, restored:%d", vgId, numOfTasks, - alreadyRestored); + tqDebug("vgId:%d create msg to start wal scan to launch stream tasks, numOfTasks:%d, vnd restored:%d", vgId, + numOfTasks, alreadyRestored); pRunReq->head.vgId = vgId; pRunReq->streamId = 0; - pRunReq->taskId = STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID; + pRunReq->taskId = 0; + pRunReq->reqType = STREAM_EXEC_T_EXTRACT_WAL_DATA; SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg); @@ -123,33 +168,26 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { return 0; } -int32_t tqStopStreamTasks(STQ* pTq) { +int32_t tqStopStreamTasksAsync(STQ* pTq) { SStreamMeta* pMeta = pTq->pStreamMeta; - int32_t vgId = TD_VID(pTq->pVnode); - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + int32_t vgId = pMeta->vgId; - tqDebug("vgId:%d stop all %d stream task(s)", vgId, numOfTasks); - if (numOfTasks == 0) { - return TSDB_CODE_SUCCESS; + SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); + if (pRunReq == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tqError("vgId:%d failed to create msg to stop tasks, code:%s", vgId, terrstr()); + return -1; } - SArray* pTaskList = NULL; - streamMetaWLock(pMeta); - pTaskList = taosArrayDup(pMeta->pTaskList, NULL); - streamMetaWUnLock(pMeta); + tqDebug("vgId:%d create msg to stop tasks", vgId); - for (int32_t i = 0; i < numOfTasks; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId); - if (pTask == NULL) { - continue; - } + pRunReq->head.vgId = vgId; + pRunReq->streamId = 0; + pRunReq->taskId = 0; + pRunReq->reqType = STREAM_EXEC_T_STOP_ALL_TASKS; - streamTaskStop(pTask); - streamMetaReleaseTask(pMeta, pTask); - } - - taosArrayDestroy(pTaskList); + SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; + tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg); return 0; } @@ -228,16 +266,15 @@ bool taskReadyForDataFromWal(SStreamTask* pTask) { } // not in ready state, do not handle the data from wal - char* p = NULL; - int32_t status = streamTaskGetStatus(pTask, &p); - if (streamTaskGetStatus(pTask, &p) != TASK_STATUS__READY) { - tqTrace("s-task:%s not ready for submit block in wal, status:%s", pTask->id.idStr, p); + SStreamTaskState* pState = streamTaskGetStatus(pTask); + if (pState->state != TASK_STATUS__READY) { + tqTrace("s-task:%s not ready for submit block in wal, status:%s", pTask->id.idStr, pState->name); return false; } // fill-history task has entered into the last phase, no need to anything if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) { - ASSERT(status == TASK_STATUS__READY); + ASSERT(pState->state == TASK_STATUS__READY); // the maximum version of data in the WAL has reached already, the step2 is done tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr, pTask->dataRange.range.maxVer); @@ -350,10 +387,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { taosThreadMutexLock(&pTask->lock); - char* p = NULL; - ETaskStatus status = streamTaskGetStatus(pTask, &p); - if (status != TASK_STATUS__READY) { - tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, p); + SStreamTaskState* pState = streamTaskGetStatus(pTask); + if (pState->state != TASK_STATUS__READY) { + tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pState->name); taosThreadMutexUnlock(&pTask->lock); streamMetaReleaseTask(pStreamMeta, pTask); continue; diff --git a/source/dnode/vnode/src/tq/tqStreamTaskSnap.c b/source/dnode/vnode/src/tq/tqStreamTaskSnap.c index f966e90b9a..dda5173ad9 100644 --- a/source/dnode/vnode/src/tq/tqStreamTaskSnap.c +++ b/source/dnode/vnode/src/tq/tqStreamTaskSnap.c @@ -196,7 +196,7 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) { int32_t code = 0; STQ* pTq = pWriter->pTq; - taosWLockLatch(&pTq->pStreamMeta->lock); + streamMetaWLock(pTq->pStreamMeta); tqDebug("vgId:%d, vnode stream-task snapshot writer closed", TD_VID(pTq->pVnode)); if (rollback) { tdbAbort(pTq->pStreamMeta->db, pTq->pStreamMeta->txn); @@ -212,14 +212,14 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) { taosMemoryFree(pWriter); goto _err; } - taosWUnLockLatch(&pTq->pStreamMeta->lock); + streamMetaWUnLock(pTq->pStreamMeta); taosMemoryFree(pWriter); return code; _err: tqError("vgId:%d, vnode stream-task snapshot writer failed to close since %s", TD_VID(pWriter->pTq->pVnode), tstrerror(code)); - taosWUnLockLatch(&pTq->pStreamMeta->lock); + streamMetaWUnLock(pTq->pStreamMeta); return code; } @@ -240,13 +240,13 @@ int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t tDecoderClear(&decoder); int64_t key[2] = {taskId.streamId, taskId.taskId}; - taosWLockLatch(&pTq->pStreamMeta->lock); + streamMetaWLock(pTq->pStreamMeta); if (tdbTbUpsert(pTq->pStreamMeta->pTaskDb, key, sizeof(int64_t) << 1, (uint8_t*)pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr), pTq->pStreamMeta->txn) < 0) { - taosWUnLockLatch(&pTq->pStreamMeta->lock); + streamMetaWUnLock(pTq->pStreamMeta); return -1; } - taosWUnLockLatch(&pTq->pStreamMeta->lock); + streamMetaWUnLock(pTq->pStreamMeta); } else if (pHdr->type == SNAP_DATA_STREAM_TASK_CHECKPOINT) { // do nothing } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 110cf79b4e..d18455d221 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -35,20 +35,8 @@ int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { } void tqUpdateNodeStage(STQ* pTq, bool isLeader) { - SSyncState state = syncGetState(pTq->pVnode->sync); - SStreamMeta* pMeta = pTq->pStreamMeta; - int64_t stage = pMeta->stage; - - pMeta->stage = state.term; - pMeta->role = (isLeader)? NODE_ROLE_LEADER:NODE_ROLE_FOLLOWER; - if (isLeader) { - tqInfo("vgId:%d update meta stage:%" PRId64 ", prev:%" PRId64 " leader:%d, start to send Hb", pMeta->vgId, - state.term, stage, isLeader); - streamMetaStartHb(pMeta); - } else { - tqInfo("vgId:%d update meta stage:%" PRId64 " prev:%" PRId64 " leader:%d", pMeta->vgId, state.term, stage, - isLeader); - } + SSyncState state = syncGetState(pTq->pVnode->sync); + streamMetaUpdateStageRole(pTq->pStreamMeta, state.term, isLeader); } static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, STqOffsetVal pOffset) { diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 64c4d2af5d..48eca5c349 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -25,7 +25,6 @@ typedef struct STaskUpdateEntry { int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) { int32_t vgId = pMeta->vgId; - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); if (numOfTasks == 0) { tqDebug("vgId:%d no stream tasks existed to run", vgId); @@ -42,7 +41,62 @@ int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) { tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks); pRunReq->head.vgId = vgId; pRunReq->streamId = 0; - pRunReq->taskId = restart ? STREAM_EXEC_RESTART_ALL_TASKS_ID : STREAM_EXEC_START_ALL_TASKS_ID; + pRunReq->taskId = 0; + pRunReq->reqType = restart ? STREAM_EXEC_T_RESTART_ALL_TASKS : STREAM_EXEC_T_START_ALL_TASKS; + + SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; + tmsgPutToQueue(cb, STREAM_QUEUE, &msg); + return 0; +} + +int32_t tqStreamOneTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) { + int32_t vgId = pMeta->vgId; + + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + if (numOfTasks == 0) { + tqDebug("vgId:%d no stream tasks existed to run", vgId); + return 0; + } + + SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); + if (pRunReq == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tqError("vgId:%d failed to create msg to start task:0x%x, code:%s", vgId, taskId, terrstr()); + return -1; + } + + tqDebug("vgId:%d start task:0x%x async", vgId, taskId); + pRunReq->head.vgId = vgId; + pRunReq->streamId = streamId; + pRunReq->taskId = taskId; + pRunReq->reqType = STREAM_EXEC_T_START_ONE_TASK; + + SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; + tmsgPutToQueue(cb, STREAM_QUEUE, &msg); + return 0; +} + +int32_t tqUpdateNodeEpsetAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) { + int32_t vgId = pMeta->vgId; + + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + if (numOfTasks == 0) { + tqDebug("vgId:%d no stream tasks existed to run", vgId); + return 0; + } + + SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); + if (pRunReq == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tqError("vgId:%d failed to create msg to start task:0x%x, code:%s", vgId, taskId, terrstr()); + return -1; + } + + tqDebug("vgId:%d update s-task:0x%x nodeEpset async", vgId, taskId); + pRunReq->head.vgId = vgId; + pRunReq->streamId = streamId; + pRunReq->taskId = taskId; + pRunReq->reqType = STREAM_EXEC_T_UPDATE_TASK_EPSET; SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; tmsgPutToQueue(cb, STREAM_QUEUE, &msg); @@ -54,6 +108,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t len = pMsg->contLen - sizeof(SMsgHead); SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS}; + int64_t st = taosGetTimestampMs(); SStreamTaskNodeUpdateMsg req = {0}; @@ -75,7 +130,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask == NULL || *ppTask == NULL) { - tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", pMeta->vgId, + tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", vgId, req.taskId); rsp.code = TSDB_CODE_SUCCESS; streamMetaWUnLock(pMeta); @@ -85,42 +140,38 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM } SStreamTask* pTask = *ppTask; + const char* idstr = pTask->id.idStr; if (pMeta->updateInfo.transId != req.transId) { pMeta->updateInfo.transId = req.transId; - tqInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", pTask->id.idStr, req.transId); + tqInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", idstr, req.transId); // info needs to be kept till the new trans to update the nodeEp arrived. taosHashClear(pMeta->updateInfo.pTasks); } else { - tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", pTask->id.idStr, req.transId); + tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", idstr, req.transId); } STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId}; - void* exist = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry)); - if (exist != NULL) { - tqDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", pTask->id.idStr, vgId, - req.transId); + + void* pReqTask = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry)); + if (pReqTask != NULL) { + tqDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", idstr, vgId, req.transId); rsp.code = TSDB_CODE_SUCCESS; streamMetaWUnLock(pMeta); taosArrayDestroy(req.pNodeList); return rsp.code; } - streamMetaWUnLock(pMeta); - // the following two functions should not be executed within the scope of meta lock to avoid deadlock streamTaskUpdateEpsetInfo(pTask, req.pNodeList); streamTaskResetStatus(pTask); - // continue after lock the meta again - streamMetaWLock(pMeta); - SStreamTask** ppHTask = NULL; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); if (ppHTask == NULL || *ppHTask == NULL) { tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already", - pMeta->vgId, req.taskId); + vgId, req.taskId); CLEAR_RELATED_FILLHISTORY_TASK(pTask); } else { tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr); @@ -128,17 +179,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM } } - { + if (restored) { + tqDebug("s-task:%s vgId:%d start to save task", idstr, vgId); streamMetaSaveTask(pMeta, pTask); if (ppHTask != NULL) { streamMetaSaveTask(pMeta, *ppHTask); } +#if 0 if (streamMetaCommit(pMeta) < 0) { // persist to disk } +#endif + } else { + tqDebug("s-task:%s vgId:%d not save since restore not finish", idstr, vgId); } + tqDebug("s-task:%s vgId:%d start to stop task after save task", idstr, vgId); streamTaskStop(pTask); // keep the already handled info @@ -146,10 +203,15 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM if (ppHTask != NULL) { streamTaskStop(*ppHTask); - tqDebug("s-task:%s task nodeEp update completed, streamTask and related fill-history task closed", pTask->id.idStr); + + int64_t now = taosGetTimestampMs(); + tqDebug("s-task:%s vgId:%d task nodeEp update completed, streamTask/fill-history closed, elapsed:%" PRId64 " ms", + idstr, vgId, now - st); taosHashPut(pMeta->updateInfo.pTasks, &(*ppHTask)->id, sizeof(pTask->id), NULL, 0); } else { - tqDebug("s-task:%s task nodeEp update completed, streamTask closed", pTask->id.idStr); + int64_t now = taosGetTimestampMs(); + tqDebug("s-task:%s vgId:%d, task nodeEp update completed, streamTask closed, elapsed time:%" PRId64 "ms", idstr, + vgId, now - st); } rsp.code = 0; @@ -166,65 +228,18 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM streamMetaWUnLock(pMeta); } else { if (!restored) { - tqDebug("vgId:%d vnode restore not completed, not restart the tasks, clear the start after nodeUpdate flag", - vgId); + tqDebug("vgId:%d vnode restore not completed, not start the tasks, clear the start after nodeUpdate flag", vgId); pMeta->startInfo.tasksWillRestart = 0; streamMetaWUnLock(pMeta); } else { - tqDebug("vgId:%d all %d task(s) nodeEp updated and closed", vgId, numOfTasks); -#if 1 + tqDebug("vgId:%d all %d task(s) nodeEp updated and closed, transId:%d", vgId, numOfTasks, req.transId); +#if 0 + // for test purpose, to trigger the leader election + taosMSleep(5000); +#endif + tqStreamTaskStartAsync(pMeta, cb, true); streamMetaWUnLock(pMeta); -#else - streamMetaWUnLock(pMeta); - - // For debug purpose. - // the following procedure consume many CPU resource, result in the re-election of leader - // with high probability. So we employ it as a test case for the stream processing framework, with - // checkpoint/restart/nodeUpdate etc. - while (1) { - int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1); - if (startVal == 0) { - break; - } - - tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId); - taosMsleep(500); - } - - while (streamMetaTaskInTimer(pMeta)) { - tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId); - taosMsleep(100); - } - - streamMetaWLock(pMeta); - - int32_t code = streamMetaReopen(pMeta); - if (code != 0) { - tqError("vgId:%d failed to reopen stream meta", vgId); - streamMetaWUnLock(pMeta); - taosArrayDestroy(req.pNodeList); - return -1; - } - - streamMetaInitBackend(pMeta); - - if (streamMetaLoadAllTasks(pTq->pStreamMeta) < 0) { - tqError("vgId:%d failed to load stream tasks", vgId); - streamMetaWUnLock(pMeta); - taosArrayDestroy(req.pNodeList); - return -1; - } - - if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) { - tqInfo("vgId:%d start all stream tasks after all being updated", vgId); - resetStreamTaskStatus(pTq->pStreamMeta); - tqStartStreamTaskAsync(pTq, false); - } else { - tqInfo("vgId:%d, follower node not start stream tasks", vgId); - } - streamMetaWUnLock(pMeta); -#endif } } @@ -379,6 +394,7 @@ int32_t tqStreamTaskProcessScanHistoryFinishReq(SStreamMeta* pMeta, SRpcMsg* pMs } int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) { + int32_t code = TSDB_CODE_SUCCESS; char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -390,6 +406,12 @@ int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMs tDecodeCompleteHistoryDataMsg(&decoder, &req); tDecoderClear(&decoder); + if (pMeta->role == NODE_ROLE_FOLLOWER) { + tqError("s-task:0x%x (vgId:%d) not handle the scan-history finish rsp, since it becomes follower", + req.upstreamTaskId, pMeta->vgId); + return TASK_DOWNSTREAM_NOT_LEADER; + } + SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId); if (pTask == NULL) { tqError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed", pMeta->vgId, @@ -406,11 +428,11 @@ int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMs "s-task:%s scan-history finish rsp received from downstream task:0x%x, all downstream tasks rsp scan-history " "completed msg", pTask->id.idStr, req.downstreamId); - streamProcessScanHistoryFinishRsp(pTask); + code = streamProcessScanHistoryFinishRsp(pTask); } streamMetaReleaseTask(pMeta, pTask); - return 0; + return code; } int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { @@ -449,11 +471,11 @@ int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage, &rsp.oldStage); streamMetaReleaseTask(pMeta, pTask); - char* p = NULL; - streamTaskGetStatus(pTask, &p); + SStreamTaskState* pState = streamTaskGetStatus(pTask); tqDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), check_status:%d", - pTask->id.idStr, p, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); + pTask->id.idStr, pState->name, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, + rsp.status); } else { rsp.status = TASK_DOWNSTREAM_NOT_READY; tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64 @@ -465,12 +487,22 @@ int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId); } +static void setParam(SStreamTask* pTask, int64_t* initTs, bool* hasHTask, STaskId* pId) { + *initTs = pTask->execInfo.init; + + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + *hasHTask = true; + pId->streamId = pTask->hTaskInfo.id.streamId; + pId->taskId = pTask->hTaskInfo.id.taskId; + } +} + int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) { char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t len = pMsg->contLen - sizeof(SMsgHead); int32_t vgId = pMeta->vgId; + int32_t code = TSDB_CODE_SUCCESS; - int32_t code; SStreamTaskCheckRsp rsp; SDecoder decoder; @@ -487,20 +519,62 @@ int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLe tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d", rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status); + int64_t initTs = 0; + int64_t now = taosGetTimestampMs(); + STaskId id = {.streamId = rsp.streamId, .taskId = rsp.upstreamTaskId}; + STaskId fId = {0}; + bool hasHistoryTask = false; + + // todo extract method if (!isLeader) { - streamMetaUpdateTaskDownstreamStatus(pMeta, rsp.streamId, rsp.upstreamTaskId, 0, taosGetTimestampMs(), false); - tqError("vgId:%d not leader, task:0x%x not handle the check rsp, downstream:0x%x (vgId:%d)", vgId, - rsp.upstreamTaskId, rsp.downstreamTaskId, rsp.downstreamNodeId); + // this task may have been stopped, so acquire task may failed. Retrieve it directly from the task hash map. + streamMetaRLock(pMeta); + + SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (ppTask != NULL) { + setParam(*ppTask, &initTs, &hasHistoryTask, &fId); + streamMetaRUnLock(pMeta); + + if (hasHistoryTask) { + streamMetaAddTaskLaunchResult(pMeta, fId.streamId, fId.taskId, initTs, now, false); + } + + tqError("vgId:%d not leader, task:0x%x not handle the check rsp, downstream:0x%x (vgId:%d)", vgId, + rsp.upstreamTaskId, rsp.downstreamTaskId, rsp.downstreamNodeId); + } else { + streamMetaRUnLock(pMeta); + + tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped", + rsp.streamId, rsp.upstreamTaskId, vgId); + code = terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + streamMetaAddTaskLaunchResult(pMeta, rsp.streamId, rsp.upstreamTaskId, initTs, now, false); return code; } SStreamTask* pTask = streamMetaAcquireTask(pMeta, rsp.streamId, rsp.upstreamTaskId); if (pTask == NULL) { - streamMetaUpdateTaskDownstreamStatus(pMeta, rsp.streamId, rsp.upstreamTaskId, 0, taosGetTimestampMs(), false); + streamMetaRLock(pMeta); + + SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (ppTask != NULL) { + setParam(*ppTask, &initTs, &hasHistoryTask, &fId); + streamMetaRUnLock(pMeta); + + if (hasHistoryTask) { + streamMetaAddTaskLaunchResult(pMeta, fId.streamId, fId.taskId, initTs, now, false); + } + } else { + streamMetaRUnLock(pMeta); + } + + streamMetaAddTaskLaunchResult(pMeta, rsp.streamId, rsp.upstreamTaskId, initTs, now, false); tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped", rsp.streamId, rsp.upstreamTaskId, vgId); - terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST; - return -1; + + code = terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST; + return code; } code = streamProcessCheckRsp(pTask, &rsp); @@ -539,8 +613,8 @@ int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg) return code; } -int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader, - bool restored) { +int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sversion, char* msg, int32_t msgLen, + bool isLeader, bool restored) { int32_t code = 0; int32_t vgId = pMeta->vgId; @@ -592,18 +666,19 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* // only handled in the leader node if (isLeader) { tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks); - SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId); - if (p != NULL && restored && p->info.fillHistory == 0) { - EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; - streamTaskHandleEvent(p->status.pSM, event); - } else if (!restored) { - tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId); + if (restored) { + SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId); + if (p != NULL && (p->info.fillHistory == 0)) { + tqStreamOneTaskStartAsync(pMeta, cb, streamId, taskId); + } + if (p != NULL) { + streamMetaReleaseTask(pMeta, p); + } + } else { + tqWarn("s-task:0x%x not launched since vnode(vgId:%d) not ready", taskId, vgId); } - if (p != NULL) { - streamMetaReleaseTask(pMeta, p); - } } else { tqDebug("vgId:%d not leader, not launch stream task s-task:0x%x", vgId, taskId); } @@ -648,79 +723,25 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen return 0; } -int32_t startStreamTasks(SStreamMeta* pMeta) { - int32_t code = TSDB_CODE_SUCCESS; +int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks) { int32_t vgId = pMeta->vgId; + *numOfTasks = taosArrayGetSize(pMeta->pTaskList); - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); - tqDebug("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks); - if (numOfTasks == 0) { + tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, *numOfTasks); + if (*numOfTasks == 0) { return TSDB_CODE_SUCCESS; } - SArray* pTaskList = NULL; - streamMetaWLock(pMeta); - pTaskList = taosArrayDup(pMeta->pTaskList, NULL); - taosHashClear(pMeta->startInfo.pReadyTaskSet); - taosHashClear(pMeta->startInfo.pFailedTaskSet); - pMeta->startInfo.startTs = taosGetTimestampMs(); - streamMetaWUnLock(pMeta); - - // broadcast the check downstream tasks msg - for (int32_t i = 0; i < numOfTasks; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId); - if (pTask == NULL) { - continue; - } - - // fill-history task can only be launched by related stream tasks. - if (pTask->info.fillHistory == 1) { - streamMetaReleaseTask(pMeta, pTask); - continue; - } - - if (pTask->status.downstreamReady == 1) { - if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - tqDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task", - pTask->id.idStr); - streamLaunchFillHistoryTask(pTask); - } - - streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, pTask->execInfo.init, - pTask->execInfo.start, true); - streamMetaReleaseTask(pMeta, pTask); - continue; - } - - EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; - int32_t ret = streamTaskHandleEvent(pTask->status.pSM, event); - if (ret != TSDB_CODE_SUCCESS) { - code = ret; - } - - streamMetaReleaseTask(pMeta, pTask); - } - - taosArrayDestroy(pTaskList); - return code; -} - -int32_t resetStreamTaskStatus(SStreamMeta* pMeta) { - int32_t vgId = pMeta->vgId; - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); - - tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks); - if (numOfTasks == 0) { - return TSDB_CODE_SUCCESS; - } - - for (int32_t i = 0; i < numOfTasks; ++i) { + for (int32_t i = 0; i < (*numOfTasks); ++i) { SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); streamTaskResetStatus(*pTask); + +// if ((*pTask)->info.fillHistory == 1) { +// streamResetParamForScanHistory(*pTask); +// } } return 0; @@ -731,16 +752,18 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { int32_t code = 0; int64_t st = taosGetTimestampMs(); - while (1) { - int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1); - if (startVal == 0) { - break; - } - - tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId); - taosMsleep(500); + streamMetaWLock(pMeta); + if (pMeta->startInfo.taskStarting == 1) { + pMeta->startInfo.restartCount += 1; + tqDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId, + pMeta->startInfo.restartCount); + streamMetaWUnLock(pMeta); + return TSDB_CODE_SUCCESS; } + pMeta->startInfo.taskStarting = 1; + streamMetaWUnLock(pMeta); + terrno = 0; tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d", vgId, pMeta->updateInfo.transId); @@ -764,12 +787,19 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { return code; } - if (isLeader && !tsDisableStream) { - resetStreamTaskStatus(pMeta); - streamMetaWUnLock(pMeta); - tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId); + { + STaskStartInfo* pStartInfo = &pMeta->startInfo; + taosHashClear(pStartInfo->pReadyTaskSet); + taosHashClear(pStartInfo->pFailedTaskSet); + pStartInfo->readyTs = 0; + } - startStreamTasks(pMeta); + if (isLeader && !tsDisableStream) { + int32_t numOfTasks = 0; + tqStreamTaskResetStatus(pMeta, &numOfTasks); + streamMetaWUnLock(pMeta); + + streamMetaStartAllTasks(pMeta); } else { streamMetaResetStartInfo(&pMeta->startInfo); streamMetaWUnLock(pMeta); @@ -783,18 +813,44 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) { SStreamTaskRunReq* pReq = pMsg->pCont; - int32_t taskId = pReq->taskId; + int32_t type = pReq->reqType; int32_t vgId = pMeta->vgId; - if (taskId == STREAM_EXEC_START_ALL_TASKS_ID) { - startStreamTasks(pMeta); + if (type == STREAM_EXEC_T_START_ONE_TASK) { + streamMetaStartOneTask(pMeta, pReq->streamId, pReq->taskId); return 0; - } else if (taskId == STREAM_EXEC_RESTART_ALL_TASKS_ID) { + } else if (type == STREAM_EXEC_T_START_ALL_TASKS) { + streamMetaStartAllTasks(pMeta); + return 0; + } else if (type == STREAM_EXEC_T_RESTART_ALL_TASKS) { restartStreamTasks(pMeta, isLeader); + return 0; + } else if (type == STREAM_EXEC_T_STOP_ALL_TASKS) { + streamMetaStopAllTasks(pMeta); + return 0; + } else if (type == STREAM_EXEC_T_RESUME_TASK) { // task resume to run after idle for a while + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); + + if (pTask != NULL) { + char* pStatus = NULL; + if (streamTaskReadyToRun(pTask, &pStatus)) { + int64_t execTs = pTask->status.lastExecTs; + int32_t idle = taosGetTimestampMs() - execTs; + tqDebug("s-task:%s task resume to run after idle for:%dms from:%" PRId64, pTask->id.idStr, idle, execTs); + + streamResumeTask(pTask); + } else { + int8_t status = streamTaskSetSchedStatusInactive(pTask); + tqDebug("vgId:%d s-task:%s ignore run req since not in ready state, status:%s, sched-status:%d", vgId, + pTask->id.idStr, pStatus, status); + } + streamMetaReleaseTask(pMeta, pTask); + } + return 0; } - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, taskId); + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); if (pTask != NULL) { // even in halt status, the data in inputQ must be processed char* p = NULL; if (streamTaskReadyToRun(pTask, &p)) { @@ -811,7 +867,160 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead return 0; } else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec. // todo add one function to handle this - tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, taskId); + tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, pReq->taskId); return -1; } } + +int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) { + STaskStartInfo* pStartInfo = &pMeta->startInfo; + int32_t vgId = pMeta->vgId; + + streamMetaWLock(pMeta); + if (pStartInfo->taskStarting == 1) { + tqDebug("vgId:%d already in start tasks procedure in other thread, restartCounter:%d, do nothing", vgId, + pMeta->startInfo.restartCount); + } else { // not in starting procedure + if (pStartInfo->restartCount > 0) { + pStartInfo->restartCount -= 1; + tqDebug("vgId:%d role:%d need to restart all tasks again, restartCounter:%d", vgId, pMeta->role, + pStartInfo->restartCount); + + streamMetaWUnLock(pMeta); + restartStreamTasks(pMeta, (pMeta->role == NODE_ROLE_LEADER)); + return TSDB_CODE_SUCCESS; + } else { + tqDebug("vgId:%d start all tasks completed in callbackFn", pMeta->vgId); + } + } + + streamMetaWUnLock(pMeta); + return TSDB_CODE_SUCCESS; +} + +int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { + SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg->pCont; + + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); + if (pTask == NULL) { + tqError("vgId:%d process task-reset req, failed to acquire task:0x%x, it may have been dropped already", + pMeta->vgId, pReq->taskId); + return TSDB_CODE_SUCCESS; + } + + tqDebug("s-task:%s receive task-reset msg from mnode, reset status and ready for data processing", pTask->id.idStr); + + // clear flag set during do checkpoint, and open inputQ for all upstream tasks + if (streamTaskGetStatus(pTask)->state == TASK_STATUS__CK) { + streamTaskClearCheckInfo(pTask, true); + streamTaskSetStatusReady(pTask); + } + + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_SUCCESS; +} + +int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg){ + SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg; + + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); + if (pTask == NULL) { + tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId, + pReq->taskId); + // since task is in [STOP|DROPPING] state, it is safe to assume the pause is active + return TSDB_CODE_SUCCESS; + } + + tqDebug("s-task:%s receive pause msg from mnode", pTask->id.idStr); + streamTaskPause(pTask, pMeta); + + SStreamTask* pHistoryTask = NULL; + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId); + if (pHistoryTask == NULL) { + tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64 + ", it may have been dropped already", + pMeta->vgId, pTask->hTaskInfo.id.taskId); + streamMetaReleaseTask(pMeta, pTask); + + // since task is in [STOP|DROPPING] state, it is safe to assume the pause is active + return TSDB_CODE_SUCCESS; + } + + tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr); + + streamTaskPause(pHistoryTask, pMeta); + streamMetaReleaseTask(pMeta, pHistoryTask); + } + + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_SUCCESS; +} + +static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t sversion, int8_t igUntreated, bool fromVnode) { + SStreamMeta *pMeta = fromVnode ? ((STQ*)handle)->pStreamMeta : handle; + int32_t vgId = pMeta->vgId; + if (pTask == NULL) { + return -1; + } + + streamTaskResume(pTask); + ETaskStatus status = streamTaskGetStatus(pTask)->state; + + int32_t level = pTask->info.taskLevel; + if (level == TASK_LEVEL__SINK) { + if (status == TASK_STATUS__UNINIT) { + } + streamMetaReleaseTask(pMeta, pTask); + return 0; + } + + if (status == TASK_STATUS__READY || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) { + // no lock needs to secure the access of the version + if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) { + // discard all the data when the stream task is suspended. + walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion); + tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64 + ", schedStatus:%d", + vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus); + } else { // from the previous paused version and go on + tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d", + vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus); + } + + if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && status == TASK_STATUS__SCAN_HISTORY) { + streamStartScanHistoryAsync(pTask, igUntreated); + } else if (level == TASK_LEVEL__SOURCE && (streamQueueGetNumOfItems(pTask->inputq.queue) == 0)) { + tqScanWalAsync((STQ*)handle, false); + } else { + streamSchedExec(pTask); + } + } else if (status == TASK_STATUS__UNINIT) { + // todo: fill-history task init ? + if (pTask->info.fillHistory == 0) { + EStreamTaskEvent event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; + streamTaskHandleEvent(pTask->status.pSM, event); + } + } + + streamMetaReleaseTask(pMeta, pTask); + return 0; +} + +int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* msg, bool fromVnode){ + SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg; + SStreamMeta *pMeta = fromVnode ? ((STQ*)handle)->pStreamMeta : handle; + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); + int32_t code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode); + if (code != 0) { + return code; + } + + STaskId* pHTaskId = &pTask->hTaskInfo.id; + SStreamTask* pHistoryTask = streamMetaAcquireTask(pMeta, pHTaskId->streamId, pHTaskId->taskId); + if (pHistoryTask) { + code = tqProcessTaskResumeImpl(handle, pHistoryTask, sversion, pReq->igUntreated, fromVnode); + } + + return code; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 1799823011..9b29329cf3 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -885,9 +885,17 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr int32_t code = 0; rocksdb_writebatch_t *wb = NULL; SArray *pTmpColArray = NULL; - int num_keys = TARRAY_SIZE(remainCols); - int16_t *aCols = taosMemoryMalloc(num_keys * sizeof(int16_t)); - int16_t *slotIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); + + SIdxKey *idxKey = taosArrayGet(remainCols, 0); + if (idxKey->key.cid != PRIMARYKEY_TIMESTAMP_COL_ID) { + SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = PRIMARYKEY_TIMESTAMP_COL_ID}; + + taosArrayInsert(remainCols, 0, &(SIdxKey){0, *key}); + } + + int num_keys = TARRAY_SIZE(remainCols); + int16_t *aCols = taosMemoryMalloc(num_keys * sizeof(int16_t)); + int16_t *slotIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); for (int i = 0; i < num_keys; ++i) { SIdxKey *idxKey = taosArrayGet(remainCols, i); @@ -1031,6 +1039,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA taosMemoryFree(values_list_sizes); if (TARRAY_SIZE(remainCols) > 0) { + // tsdbTrace("tsdb/cache: vgId: %d, load %" PRId64 " from raw", TD_VID(pTsdb->pVnode), uid); code = tsdbCacheLoadFromRaw(pTsdb, uid, pLastArray, remainCols, pr, ltype); } @@ -1090,6 +1099,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache } } + // tsdbTrace("tsdb/cache: vgId: %d, load %" PRId64 " from rocks", TD_VID(pTsdb->pVnode), uid); code = tsdbCacheLoadFromRocks(pTsdb, uid, pLastArray, remainCols, pr, ltype); taosThreadMutexUnlock(&pTsdb->lruMutex); @@ -1173,8 +1183,10 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[i], klen); if (h) { SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h); - if (pLastCol->dirty && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) { + if (pLastCol->dirty) { pLastCol->dirty = 0; + } + if (pLastCol->ts <= eKey && pLastCol->ts >= sKey) { erase = true; } taosLRUCacheRelease(pTsdb->lruCache, h, erase); @@ -1187,8 +1199,10 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[num_keys + i], klen); if (h) { SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h); - if (pLastCol->dirty && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) { + if (pLastCol->dirty) { pLastCol->dirty = 0; + } + if (pLastCol->ts <= eKey && pLastCol->ts >= sKey) { erase = true; } taosLRUCacheRelease(pTsdb->lruCache, h, erase); @@ -1866,7 +1880,7 @@ static int32_t lastIterOpen(SFSLastIter *iter, STFileSet *pFileSet, STsdb *pTsdb .suid = suid, .pTsdb = pTsdb, .timewindow = (STimeWindow){.skey = lastTs, .ekey = TSKEY_MAX}, - .verRange = (SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, + .verRange = (SVersionRange){.minVer = 0, .maxVer = INT64_MAX}, .strictTimeRange = false, .pSchema = pTSchema, .pCurrentFileset = pFileSet, @@ -1941,6 +1955,7 @@ typedef struct SFSNextRowIter { SArray *pIndexList; int32_t iBrinIndex; SBrinBlock brinBlock; + SBrinBlock *pBrinBlock; int32_t iBrinRecord; SBrinRecord brinRecord; SBlockData blockData; @@ -1975,9 +1990,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie if (SFSNEXTROW_FILESET == state->state) { _next_fileset: - if (--state->iFileSet < 0) { - clearLastFileSet(state); + clearLastFileSet(state); + if (--state->iFileSet < 0) { *ppRow = NULL; return code; } else { @@ -2129,6 +2144,11 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie pBrinBlk = taosArrayGet(state->pIndexList, state->iBrinIndex); } + if (!state->pBrinBlock) { + state->pBrinBlock = &state->brinBlock; + } else { + tBrinBlockClear(&state->brinBlock); + } code = tsdbDataFileReadBrinBlock(state->pr->pFileReader, pBrinBlk, &state->brinBlock); if (code != TSDB_CODE_SUCCESS) { goto _err; @@ -2406,6 +2426,16 @@ int32_t clearNextRowFromFS(void *iter) { state->pBlockData = NULL; } + if (state->pBrinBlock) { + tBrinBlockDestroy(state->pBrinBlock); + state->pBrinBlock = NULL; + } + + if (state->pIndexList) { + taosArrayDestroy(state->pIndexList); + state->pIndexList = NULL; + } + if (state->pTSRow) { taosMemoryFree(state->pTSRow); state->pTSRow = NULL; @@ -2743,6 +2773,13 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC break; } SLastCol *pCol = taosArrayGet(pColArray, iCol); + if (slotIds[iCol] > pTSchema->numOfCols - 1) { + if (!setNoneCol) { + noneCol = iCol; + setNoneCol = true; + } + continue; + } if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { continue; } @@ -2825,7 +2862,9 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC taosArraySet(pColArray, iCol, &lastCol); int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ); - taosArrayRemove(aColArray, aColIndex); + if (aColIndex >= 0) { + taosArrayRemove(aColArray, aColIndex); + } } else if (!COL_VAL_IS_VALUE(tColVal) && !COL_VAL_IS_VALUE(pColVal) && !setNoneCol) { noneCol = iCol; setNoneCol = true; @@ -2917,6 +2956,9 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, break; } SLastCol *pCol = taosArrayGet(pColArray, iCol); + if (slotIds[iCol] > pTSchema->numOfCols - 1) { + continue; + } if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { continue; } diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 55ae25aee4..37413ef920 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -15,92 +15,6 @@ #include "tsdb.h" -typedef enum { MEMORY_DATA_ITER = 0, STT_DATA_ITER } EDataIterT; - -#define USE_STREAM_COMPRESSION 0 - -typedef struct { - SRBTreeNode n; - SRowInfo r; - EDataIterT type; - union { - struct { - int32_t iTbDataP; - STbDataIter iter; - }; // memory data iter - struct { - int32_t iStt; - SArray *aSttBlk; - int32_t iSttBlk; - SBlockData bData; - int32_t iRow; - }; // stt file data iter - }; -} SDataIter; - -typedef struct { - STsdb *pTsdb; - /* commit data */ - int64_t commitID; - int32_t minutes; - int8_t precision; - int32_t minRow; - int32_t maxRow; - int8_t cmprAlg; - int8_t sttTrigger; - SArray *aTbDataP; // memory - STsdbFS fs; // disk - // -------------- - TSKEY nextKey; // reset by each table commit - int32_t commitFid; - int32_t expLevel; - TSKEY minKey; - TSKEY maxKey; - // commit file data - struct { - SDataFReader *pReader; - SArray *aBlockIdx; // SArray - int32_t iBlockIdx; - SBlockIdx *pBlockIdx; - SMapData mBlock; // SMapData - SBlockData bData; - } dReader; - struct { - SDataIter *pIter; - SRBTree rbt; - SDataIter dataIter; - SDataIter aDataIter[TSDB_STT_TRIGGER_ARRAY_SIZE]; - int8_t toLastOnly; - }; - struct { - SDataFWriter *pWriter; - SArray *aBlockIdx; // SArray - SArray *aSttBlk; // SArray - SMapData mBlock; // SMapData - SBlockData bData; -#if USE_STREAM_COMPRESSION - SDiskDataBuilder *pBuilder; -#else - SBlockData bDatal; -#endif - } dWriter; - SSkmInfo skmTable; - SSkmInfo skmRow; - /* commit del */ - SDelFReader *pDelFReader; - SDelFWriter *pDelFWriter; - SArray *aDelIdx; // SArray - SArray *aDelIdxN; // SArray - SArray *aDelData; // SArray -} SCommitter; - -static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter, SCommitInfo *pInfo); -static int32_t tsdbCommitData(SCommitter *pCommitter); -static int32_t tsdbCommitDel(SCommitter *pCommitter); -static int32_t tsdbCommitCache(SCommitter *pCommitter); -static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno); -static int32_t tsdbNextCommitRow(SCommitter *pCommitter); - int32_t tRowInfoCmprFn(const void *p1, const void *p2) { SRowInfo *pInfo1 = (SRowInfo *)p1; SRowInfo *pInfo2 = (SRowInfo *)p2; @@ -150,1545 +64,3 @@ _exit: } return code; } - -int32_t tsdbPrepareCommit(STsdb *pTsdb) { - taosThreadMutexLock(&pTsdb->mutex); - ASSERT(pTsdb->imem == NULL); - pTsdb->imem = pTsdb->mem; - pTsdb->mem = NULL; - taosThreadMutexUnlock(&pTsdb->mutex); - - return 0; -} - -int32_t tsdbCommit(STsdb *pTsdb, SCommitInfo *pInfo) { - if (!pTsdb) return 0; - - int32_t code = 0; - int32_t lino = 0; - SCommitter commith; - SMemTable *pMemTable = pTsdb->imem; - - // check - if (pMemTable->nRow == 0 && pMemTable->nDel == 0) { - taosThreadMutexLock(&pTsdb->mutex); - pTsdb->imem = NULL; - taosThreadMutexUnlock(&pTsdb->mutex); - - tsdbUnrefMemTable(pMemTable, NULL, true); - goto _exit; - } - - // start commit - code = tsdbStartCommit(pTsdb, &commith, pInfo); - TSDB_CHECK_CODE(code, lino, _exit); - - // commit impl - code = tsdbCommitData(&commith); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbCommitDel(&commith); - TSDB_CHECK_CODE(code, lino, _exit); - - // end commit - code = tsdbEndCommit(&commith, 0); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbEndCommit(&commith, code); - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitDelStart(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - STsdb *pTsdb = pCommitter->pTsdb; - SMemTable *pMemTable = pTsdb->imem; - - if ((pCommitter->aDelIdx = taosArrayInit(0, sizeof(SDelIdx))) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - if ((pCommitter->aDelData = taosArrayInit(0, sizeof(SDelData))) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - if ((pCommitter->aDelIdxN = taosArrayInit(0, sizeof(SDelIdx))) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - SDelFile *pDelFileR = pCommitter->fs.pDelFile; - if (pDelFileR) { - code = tsdbDelFReaderOpen(&pCommitter->pDelFReader, pDelFileR, pTsdb); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbReadDelIdx(pCommitter->pDelFReader, pCommitter->aDelIdx); - TSDB_CHECK_CODE(code, lino, _exit); - } - - // prepare new - SDelFile wDelFile = {.commitID = pCommitter->commitID, .size = 0, .offset = 0}; - code = tsdbDelFWriterOpen(&pCommitter->pDelFWriter, &wDelFile, pTsdb); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } else { - tsdbDebug("vgId:%d, commit del start", TD_VID(pTsdb->pVnode)); - } - return code; -} - -static int32_t tsdbCommitTableDel(SCommitter *pCommitter, STbData *pTbData, SDelIdx *pDelIdx) { - int32_t code = 0; - int32_t lino = 0; - SDelData *pDelData; - tb_uid_t suid; - tb_uid_t uid; - - if (pTbData) { - suid = pTbData->suid; - uid = pTbData->uid; - - if (pTbData->pHead == NULL) { - pTbData = NULL; - } - } - - if (pDelIdx) { - suid = pDelIdx->suid; - uid = pDelIdx->uid; - - code = tsdbReadDelDatav1(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData, INT64_MAX); - TSDB_CHECK_CODE(code, lino, _exit); - } else { - taosArrayClear(pCommitter->aDelData); - } - - if (pTbData == NULL && pDelIdx == NULL) goto _exit; - - SDelIdx delIdx = {.suid = suid, .uid = uid}; - - // memory - pDelData = pTbData ? pTbData->pHead : NULL; - for (; pDelData; pDelData = pDelData->pNext) { - if (taosArrayPush(pCommitter->aDelData, pDelData) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - // write - code = tsdbWriteDelData(pCommitter->pDelFWriter, pCommitter->aDelData, &delIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - // put delIdx - if (taosArrayPush(pCommitter->aDelIdxN, &delIdx) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitDelEnd(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - STsdb *pTsdb = pCommitter->pTsdb; - - code = tsdbWriteDelIdx(pCommitter->pDelFWriter, pCommitter->aDelIdxN); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbUpdateDelFileHdr(pCommitter->pDelFWriter); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbFSUpsertDelFile(&pCommitter->fs, &pCommitter->pDelFWriter->fDel); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbDelFWriterClose(&pCommitter->pDelFWriter, 1); - TSDB_CHECK_CODE(code, lino, _exit); - - if (pCommitter->pDelFReader) { - code = tsdbDelFReaderClose(&pCommitter->pDelFReader); - TSDB_CHECK_CODE(code, lino, _exit); - } - - taosArrayDestroy(pCommitter->aDelIdx); - taosArrayDestroy(pCommitter->aDelData); - taosArrayDestroy(pCommitter->aDelIdxN); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -int32_t tsdbUpdateTableSchema(SMeta *pMeta, int64_t suid, int64_t uid, SSkmInfo *pSkmInfo) { - int32_t code = 0; - int32_t lino = 0; - - if (suid) { - if (pSkmInfo->suid == suid) { - pSkmInfo->uid = uid; - goto _exit; - } - } else { - if (pSkmInfo->uid == uid) goto _exit; - } - - pSkmInfo->suid = suid; - pSkmInfo->uid = uid; - tDestroyTSchema(pSkmInfo->pTSchema); - code = metaGetTbTSchemaEx(pMeta, suid, uid, -1, &pSkmInfo->pTSchema); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - return code; -} - -static int32_t tsdbCommitterUpdateRowSchema(SCommitter *pCommitter, int64_t suid, int64_t uid, int32_t sver) { - int32_t code = 0; - int32_t lino = 0; - - if (pCommitter->skmRow.pTSchema) { - if (pCommitter->skmRow.suid == suid) { - if (suid == 0) { - if (pCommitter->skmRow.uid == uid && sver == pCommitter->skmRow.pTSchema->version) goto _exit; - } else { - if (sver == pCommitter->skmRow.pTSchema->version) goto _exit; - } - } - } - - pCommitter->skmRow.suid = suid; - pCommitter->skmRow.uid = uid; - tDestroyTSchema(pCommitter->skmRow.pTSchema); - code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, sver, &pCommitter->skmRow.pTSchema); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - return code; -} - -static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - ASSERT(pCommitter->dReader.pBlockIdx); - - pCommitter->dReader.iBlockIdx++; - if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) { - pCommitter->dReader.pBlockIdx = - (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx); - - code = tsdbReadDataBlk(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock); - TSDB_CHECK_CODE(code, lino, _exit); - - ASSERT(pCommitter->dReader.mBlock.nItem > 0); - } else { - pCommitter->dReader.pBlockIdx = NULL; - } - -_exit: - return code; -} - -static int32_t tDataIterCmprFn(const SRBTreeNode *n1, const SRBTreeNode *n2) { - SDataIter *pIter1 = (SDataIter *)((uint8_t *)n1 - offsetof(SDataIter, n)); - SDataIter *pIter2 = (SDataIter *)((uint8_t *)n2 - offsetof(SDataIter, n)); - - return tRowInfoCmprFn(&pIter1->r, &pIter2->r); -} - -static int32_t tsdbOpenCommitIter(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - pCommitter->pIter = NULL; - tRBTreeCreate(&pCommitter->rbt, tDataIterCmprFn); - - // memory - TSDBKEY tKey = {.ts = pCommitter->minKey, .version = VERSION_MIN}; - SDataIter *pIter = &pCommitter->dataIter; - pIter->type = MEMORY_DATA_ITER; - pIter->iTbDataP = 0; - for (; pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP); pIter->iTbDataP++) { - STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, pIter->iTbDataP); - tsdbTbDataIterOpen(pTbData, &tKey, 0, &pIter->iter); - TSDBROW *pRow = tsdbTbDataIterGet(&pIter->iter); - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { - pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow)); - pRow = NULL; - } - - if (pRow == NULL) continue; - - pIter->r.suid = pTbData->suid; - pIter->r.uid = pTbData->uid; - pIter->r.row = *pRow; - break; - } - ASSERT(pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP)); - tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pIter); - - // disk - pCommitter->toLastOnly = 0; - SDataFReader *pReader = pCommitter->dReader.pReader; - if (pReader) { - if (pReader->pSet->nSttF >= pCommitter->sttTrigger) { - int8_t iIter = 0; - for (int32_t iStt = 0; iStt < pReader->pSet->nSttF; iStt++) { - pIter = &pCommitter->aDataIter[iIter]; - pIter->type = STT_DATA_ITER; - pIter->iStt = iStt; - - code = tsdbReadSttBlk(pCommitter->dReader.pReader, iStt, pIter->aSttBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - if (taosArrayGetSize(pIter->aSttBlk) == 0) continue; - - pIter->iSttBlk = 0; - SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, 0); - code = tsdbReadSttBlockEx(pCommitter->dReader.pReader, iStt, pSttBlk, &pIter->bData); - TSDB_CHECK_CODE(code, lino, _exit); - - pIter->iRow = 0; - pIter->r.suid = pIter->bData.suid; - pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[0]; - pIter->r.row = tsdbRowFromBlockData(&pIter->bData, 0); - - tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pIter); - iIter++; - } - } else { - for (int32_t iStt = 0; iStt < pReader->pSet->nSttF; iStt++) { - SSttFile *pSttFile = pReader->pSet->aSttF[iStt]; - if (pSttFile->size > pSttFile->offset) { - pCommitter->toLastOnly = 1; - break; - } - } - } - } - - code = tsdbNextCommitRow(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - STsdb *pTsdb = pCommitter->pTsdb; - SDFileSet *pRSet = NULL; - - // memory - pCommitter->commitFid = tsdbKeyFid(pCommitter->nextKey, pCommitter->minutes, pCommitter->precision); - pCommitter->expLevel = tsdbFidLevel(pCommitter->commitFid, &pCommitter->pTsdb->keepCfg, taosGetTimestampSec()); - tsdbFidKeyRange(pCommitter->commitFid, pCommitter->minutes, pCommitter->precision, &pCommitter->minKey, - &pCommitter->maxKey); -#if 0 - ASSERT(pCommitter->minKey <= pCommitter->nextKey && pCommitter->maxKey >= pCommitter->nextKey); -#endif - - pCommitter->nextKey = TSKEY_MAX; - - // Reader - SDFileSet tDFileSet = {.fid = pCommitter->commitFid}; - pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &tDFileSet, tDFileSetCmprFn, TD_EQ); - if (pRSet) { - code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet); - TSDB_CHECK_CODE(code, lino, _exit); - - // data - code = tsdbReadBlockIdx(pCommitter->dReader.pReader, pCommitter->dReader.aBlockIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - pCommitter->dReader.iBlockIdx = 0; - if (taosArrayGetSize(pCommitter->dReader.aBlockIdx) > 0) { - pCommitter->dReader.pBlockIdx = (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, 0); - code = tsdbReadDataBlk(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock); - TSDB_CHECK_CODE(code, lino, _exit); - } else { - pCommitter->dReader.pBlockIdx = NULL; - } - tBlockDataReset(&pCommitter->dReader.bData); - } else { - pCommitter->dReader.pBlockIdx = NULL; - } - - // Writer - SHeadFile fHead = {.commitID = pCommitter->commitID}; - SDataFile fData = {.commitID = pCommitter->commitID}; - SSmaFile fSma = {.commitID = pCommitter->commitID}; - SSttFile fStt = {.commitID = pCommitter->commitID}; - SDFileSet wSet = {.fid = pCommitter->commitFid, .pHeadF = &fHead, .pDataF = &fData, .pSmaF = &fSma}; - if (pRSet) { - ASSERT(pRSet->nSttF <= pCommitter->sttTrigger); - fData = *pRSet->pDataF; - fSma = *pRSet->pSmaF; - wSet.diskId = pRSet->diskId; - if (pRSet->nSttF < pCommitter->sttTrigger) { - for (int32_t iStt = 0; iStt < pRSet->nSttF; iStt++) { - wSet.aSttF[iStt] = pRSet->aSttF[iStt]; - } - wSet.nSttF = pRSet->nSttF + 1; - } else { - wSet.nSttF = 1; - } - } else { - SDiskID did = {0}; - if (tfsAllocDisk(pTsdb->pVnode->pTfs, pCommitter->expLevel, &did) < 0) { - code = terrno; - TSDB_CHECK_CODE(code, lino, _exit); - } - code = tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did); - TSDB_CHECK_CODE(code, lino, _exit); - wSet.diskId = did; - wSet.nSttF = 1; - } - wSet.aSttF[wSet.nSttF - 1] = &fStt; - code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet); - TSDB_CHECK_CODE(code, lino, _exit); - - taosArrayClear(pCommitter->dWriter.aBlockIdx); - taosArrayClear(pCommitter->dWriter.aSttBlk); - tMapDataReset(&pCommitter->dWriter.mBlock); - tBlockDataReset(&pCommitter->dWriter.bData); -#if USE_STREAM_COMPRESSION - tDiskDataBuilderClear(pCommitter->dWriter.pBuilder); -#else - tBlockDataReset(&pCommitter->dWriter.bDatal); -#endif - - // open iter - code = tsdbOpenCommitIter(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -int32_t tsdbWriteDataBlock(SDataFWriter *pWriter, SBlockData *pBlockData, SMapData *mDataBlk, int8_t cmprAlg) { - int32_t code = 0; - int32_t lino = 0; - - if (pBlockData->nRow == 0) return code; - - SDataBlk dataBlk; - tDataBlkReset(&dataBlk); - - // info - dataBlk.nRow += pBlockData->nRow; - for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) { - TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]}; - - if (iRow == 0) { - if (tsdbKeyCmprFn(&dataBlk.minKey, &key) > 0) { - dataBlk.minKey = key; - } - } else { - if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) { - dataBlk.hasDup = 1; - } - } - - if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&dataBlk.maxKey, &key) < 0) { - dataBlk.maxKey = key; - } - - dataBlk.minVer = TMIN(dataBlk.minVer, key.version); - dataBlk.maxVer = TMAX(dataBlk.maxVer, key.version); - } - - // write - dataBlk.nSubBlock++; - code = tsdbWriteBlockData(pWriter, pBlockData, &dataBlk.aSubBlock[dataBlk.nSubBlock - 1], - ((dataBlk.nSubBlock == 1) && !dataBlk.hasDup) ? &dataBlk.smaInfo : NULL, cmprAlg, 0); - TSDB_CHECK_CODE(code, lino, _exit); - - // put SDataBlk - code = tMapDataPutItem(mDataBlk, &dataBlk, tPutDataBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - // clear - tBlockDataClear(pBlockData); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -int32_t tsdbWriteSttBlock(SDataFWriter *pWriter, SBlockData *pBlockData, SArray *aSttBlk, int8_t cmprAlg) { - int32_t code = 0; - int32_t lino = 0; - SSttBlk sstBlk; - - if (pBlockData->nRow == 0) return code; - - // info - sstBlk.suid = pBlockData->suid; - sstBlk.nRow = pBlockData->nRow; - sstBlk.minKey = TSKEY_MAX; - sstBlk.maxKey = TSKEY_MIN; - sstBlk.minVer = VERSION_MAX; - sstBlk.maxVer = VERSION_MIN; - for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) { - sstBlk.minKey = TMIN(sstBlk.minKey, pBlockData->aTSKEY[iRow]); - sstBlk.maxKey = TMAX(sstBlk.maxKey, pBlockData->aTSKEY[iRow]); - sstBlk.minVer = TMIN(sstBlk.minVer, pBlockData->aVersion[iRow]); - sstBlk.maxVer = TMAX(sstBlk.maxVer, pBlockData->aVersion[iRow]); - } - sstBlk.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0]; - sstBlk.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1]; - - // write - code = tsdbWriteBlockData(pWriter, pBlockData, &sstBlk.bInfo, NULL, cmprAlg, 1); - TSDB_CHECK_CODE(code, lino, _exit); - - // push SSttBlk - if (taosArrayPush(aSttBlk, &sstBlk) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - // clear - tBlockDataClear(pBlockData); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitSttBlk(SDataFWriter *pWriter, SDiskDataBuilder *pBuilder, SArray *aSttBlk) { - int32_t code = 0; - int32_t lino = 0; - - if (pBuilder->nRow == 0) return code; - - // gnrt - const SDiskData *pDiskData; - const SBlkInfo *pBlkInfo; - code = tGnrtDiskData(pBuilder, &pDiskData, &pBlkInfo); - TSDB_CHECK_CODE(code, lino, _exit); - - SSttBlk sttBlk = {.suid = pBuilder->suid, - .minUid = pBlkInfo->minUid, - .maxUid = pBlkInfo->maxUid, - .minKey = pBlkInfo->minKey, - .maxKey = pBlkInfo->maxKey, - .minVer = pBlkInfo->minVer, - .maxVer = pBlkInfo->maxVer, - .nRow = pBuilder->nRow}; - // write - code = tsdbWriteDiskData(pWriter, pDiskData, &sttBlk.bInfo, NULL); - TSDB_CHECK_CODE(code, lino, _exit); - - // push - if (taosArrayPush(aSttBlk, &sttBlk) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - // clear - tDiskDataBuilderClear(pBuilder); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - // write aBlockIdx - code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - // write aSttBlk - code = tsdbWriteSttBlk(pCommitter->dWriter.pWriter, pCommitter->dWriter.aSttBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - // update file header - code = tsdbUpdateDFileSetHeader(pCommitter->dWriter.pWriter); - TSDB_CHECK_CODE(code, lino, _exit); - - // upsert SDFileSet - code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->dWriter.pWriter->wSet); - TSDB_CHECK_CODE(code, lino, _exit); - - // close and sync - code = tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 1); - TSDB_CHECK_CODE(code, lino, _exit); - - if (pCommitter->dReader.pReader) { - code = tsdbDataFReaderClose(&pCommitter->dReader.pReader); - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) { - int32_t code = 0; - int32_t lino = 0; - - while (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) < 0) { - SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx; - code = tsdbWriteDataBlk(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - code = tsdbCommitterNextTableData(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter); -static int32_t tsdbCommitFileData(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - STsdb *pTsdb = pCommitter->pTsdb; - SMemTable *pMemTable = pTsdb->imem; - - // commit file data start - code = tsdbCommitFileDataStart(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - // impl - code = tsdbCommitFileDataImpl(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - // commit file data end - code = tsdbCommitFileDataEnd(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - tsdbDataFReaderClose(&pCommitter->dReader.pReader); - tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 0); - } - return code; -} - -// ---------------------------------------------------------------------------- -static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter, SCommitInfo *pInfo) { - int32_t code = 0; - int32_t lino = 0; - - memset(pCommitter, 0, sizeof(*pCommitter)); - ASSERT(pTsdb->imem && "last tsdb commit incomplete"); - - pCommitter->pTsdb = pTsdb; - pCommitter->commitID = pInfo->info.state.commitID; - pCommitter->minutes = pTsdb->keepCfg.days; - pCommitter->precision = pTsdb->keepCfg.precision; - pCommitter->minRow = pInfo->info.config.tsdbCfg.minRows; - pCommitter->maxRow = pInfo->info.config.tsdbCfg.maxRows; - pCommitter->cmprAlg = pInfo->info.config.tsdbCfg.compression; - pCommitter->sttTrigger = pInfo->info.config.sttTrigger; - pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem); - if (pCommitter->aTbDataP == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - code = tsdbFSCopy(pTsdb, &pCommitter->fs); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitDataStart(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - // reader - pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); - if (pCommitter->dReader.aBlockIdx == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - code = tBlockDataCreate(&pCommitter->dReader.bData); - TSDB_CHECK_CODE(code, lino, _exit); - - // merger - for (int32_t iStt = 0; iStt < TSDB_STT_TRIGGER_ARRAY_SIZE; iStt++) { - SDataIter *pIter = &pCommitter->aDataIter[iStt]; - pIter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk)); - if (pIter->aSttBlk == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - code = tBlockDataCreate(&pIter->bData); - TSDB_CHECK_CODE(code, lino, _exit); - } - - // writer - pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); - if (pCommitter->dWriter.aBlockIdx == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - pCommitter->dWriter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk)); - if (pCommitter->dWriter.aSttBlk == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - code = tBlockDataCreate(&pCommitter->dWriter.bData); - TSDB_CHECK_CODE(code, lino, _exit); - -#if USE_STREAM_COMPRESSION - code = tDiskDataBuilderCreate(&pCommitter->dWriter.pBuilder); -#else - code = tBlockDataCreate(&pCommitter->dWriter.bDatal); -#endif - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static void tsdbCommitDataEnd(SCommitter *pCommitter) { - // reader - taosArrayDestroy(pCommitter->dReader.aBlockIdx); - tMapDataClear(&pCommitter->dReader.mBlock); - tBlockDataDestroy(&pCommitter->dReader.bData); - - // merger - for (int32_t iStt = 0; iStt < TSDB_STT_TRIGGER_ARRAY_SIZE; iStt++) { - SDataIter *pIter = &pCommitter->aDataIter[iStt]; - taosArrayDestroy(pIter->aSttBlk); - tBlockDataDestroy(&pIter->bData); - } - - // writer - taosArrayDestroy(pCommitter->dWriter.aBlockIdx); - taosArrayDestroy(pCommitter->dWriter.aSttBlk); - tMapDataClear(&pCommitter->dWriter.mBlock); - tBlockDataDestroy(&pCommitter->dWriter.bData); -#if USE_STREAM_COMPRESSION - tDiskDataBuilderDestroy(pCommitter->dWriter.pBuilder); -#else - tBlockDataDestroy(&pCommitter->dWriter.bDatal); -#endif - tDestroyTSchema(pCommitter->skmTable.pTSchema); - tDestroyTSchema(pCommitter->skmRow.pTSchema); -} - -static int32_t tsdbCommitData(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - STsdb *pTsdb = pCommitter->pTsdb; - SMemTable *pMemTable = pTsdb->imem; - - // check - if (pMemTable->nRow == 0) goto _exit; - - // start ==================== - code = tsdbCommitDataStart(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - // impl ==================== - pCommitter->nextKey = pMemTable->minKey; - while (pCommitter->nextKey < TSKEY_MAX) { - code = tsdbCommitFileData(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - } - - // end ==================== - tsdbCommitDataEnd(pCommitter); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitDel(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - STsdb *pTsdb = pCommitter->pTsdb; - SMemTable *pMemTable = pTsdb->imem; - - if (pMemTable->nDel == 0) { - goto _exit; - } - - // start - code = tsdbCommitDelStart(pCommitter); - if (code) { - TSDB_CHECK_CODE(code, lino, _exit); - } - - // impl - int32_t iDelIdx = 0; - int32_t nDelIdx = taosArrayGetSize(pCommitter->aDelIdx); - int32_t iTbData = 0; - int32_t nTbData = taosArrayGetSize(pCommitter->aTbDataP); - STbData *pTbData; - SDelIdx *pDelIdx; - - ASSERT(nTbData > 0); - - pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData); - pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL; - while (true) { - if (pTbData == NULL && pDelIdx == NULL) break; - - if (pTbData && pDelIdx) { - int32_t c = tTABLEIDCmprFn(pTbData, pDelIdx); - - if (c == 0) { - goto _commit_mem_and_disk_del; - } else if (c < 0) { - goto _commit_mem_del; - } else { - goto _commit_disk_del; - } - } else if (pTbData) { - goto _commit_mem_del; - } else { - goto _commit_disk_del; - } - - _commit_mem_del: - code = tsdbCommitTableDel(pCommitter, pTbData, NULL); - TSDB_CHECK_CODE(code, lino, _exit); - - iTbData++; - pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData) : NULL; - continue; - - _commit_disk_del: - code = tsdbCommitTableDel(pCommitter, NULL, pDelIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - iDelIdx++; - pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL; - continue; - - _commit_mem_and_disk_del: - code = tsdbCommitTableDel(pCommitter, pTbData, pDelIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - iTbData++; - pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData) : NULL; - iDelIdx++; - pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL; - continue; - } - - // end - code = tsdbCommitDelEnd(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } else { - tsdbDebug("vgId:%d, commit del done, nDel:%" PRId64, TD_VID(pTsdb->pVnode), pMemTable->nDel); - } - return code; -} - -static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno) { - int32_t code = 0; - int32_t lino = 0; - STsdb *pTsdb = pCommitter->pTsdb; - - if (eno) { - code = eno; - TSDB_CHECK_CODE(code, lino, _exit); - } else { - code = tsdbFSPrepareCommit(pCommitter->pTsdb, &pCommitter->fs); - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - tsdbFSDestroy(&pCommitter->fs); - taosArrayDestroy(pCommitter->aTbDataP); - pCommitter->aTbDataP = NULL; - if (code || eno) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } else { - tsdbInfo("vgId:%d, tsdb end commit", TD_VID(pTsdb->pVnode)); - } - return code; -} - -// ================================================================================ - -static FORCE_INLINE SRowInfo *tsdbGetCommitRow(SCommitter *pCommitter) { - return (pCommitter->pIter) ? &pCommitter->pIter->r : NULL; -} - -static int32_t tsdbNextCommitRow(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - if (pCommitter->pIter) { - SDataIter *pIter = pCommitter->pIter; - if (pCommitter->pIter->type == MEMORY_DATA_ITER) { // memory - tsdbTbDataIterNext(&pIter->iter); - TSDBROW *pRow = tsdbTbDataIterGet(&pIter->iter); - while (true) { - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { - pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow)); - pRow = NULL; - } - - if (pRow) { - pIter->r.suid = pIter->iter.pTbData->suid; - pIter->r.uid = pIter->iter.pTbData->uid; - pIter->r.row = *pRow; - break; - } - - pIter->iTbDataP++; - if (pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP)) { - STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, pIter->iTbDataP); - TSDBKEY keyFrom = {.ts = pCommitter->minKey, .version = VERSION_MIN}; - tsdbTbDataIterOpen(pTbData, &keyFrom, 0, &pIter->iter); - pRow = tsdbTbDataIterGet(&pIter->iter); - continue; - } else { - pCommitter->pIter = NULL; - break; - } - } - } else if (pCommitter->pIter->type == STT_DATA_ITER) { // last file - pIter->iRow++; - if (pIter->iRow < pIter->bData.nRow) { - pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow]; - pIter->r.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow); - } else { - pIter->iSttBlk++; - if (pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk)) { - SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk); - - code = tsdbReadSttBlockEx(pCommitter->dReader.pReader, pIter->iStt, pSttBlk, &pIter->bData); - if (code) goto _exit; - - pIter->iRow = 0; - pIter->r.suid = pIter->bData.suid; - pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[0]; - pIter->r.row = tsdbRowFromBlockData(&pIter->bData, 0); - } else { - pCommitter->pIter = NULL; - } - } - } else { - ASSERT(0); - } - - // compare with min in RB Tree - pIter = (SDataIter *)tRBTreeMin(&pCommitter->rbt); - if (pCommitter->pIter && pIter) { - int32_t c = tRowInfoCmprFn(&pCommitter->pIter->r, &pIter->r); - if (c > 0) { - tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pCommitter->pIter); - pCommitter->pIter = NULL; - } else { - ASSERT(c); - } - } - } - - if (pCommitter->pIter == NULL) { - pCommitter->pIter = (SDataIter *)tRBTreeMin(&pCommitter->rbt); - if (pCommitter->pIter) { - tRBTreeDrop(&pCommitter->rbt, (SRBTreeNode *)pCommitter->pIter); - } - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitAheadBlock(SCommitter *pCommitter, SDataBlk *pDataBlk) { - int32_t code = 0; - int32_t lino = 0; - - SBlockData *pBlockData = &pCommitter->dWriter.bData; - SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter); - TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid}; - - tBlockDataClear(pBlockData); - while (pRowInfo) { - code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row)); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, pCommitter->skmRow.pTSchema, id.uid); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbNextCommitRow(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo) { - if (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid) { - pRowInfo = NULL; - } else { - TSDBKEY tKey = TSDBROW_KEY(&pRowInfo->row); - if (tsdbKeyCmprFn(&tKey, &pDataBlk->minKey) >= 0) pRowInfo = NULL; - } - } - - if (pBlockData->nRow >= pCommitter->maxRow) { - code = - tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBlockData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBlockData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitMergeBlock(SCommitter *pCommitter, SDataBlk *pDataBlk) { - int32_t code = 0; - int32_t lino = 0; - - SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter); - TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid}; - SBlockData *pBDataR = &pCommitter->dReader.bData; - SBlockData *pBDataW = &pCommitter->dWriter.bData; - - code = tsdbReadDataBlock(pCommitter->dReader.pReader, pDataBlk, pBDataR); - TSDB_CHECK_CODE(code, lino, _exit); - - tBlockDataClear(pBDataW); - int32_t iRow = 0; - TSDBROW row = tsdbRowFromBlockData(pBDataR, 0); - TSDBROW *pRow = &row; - - while (pRow && pRowInfo) { - int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row); - if (c < 0) { - code = tBlockDataAppendRow(pBDataW, pRow, NULL, id.uid); - TSDB_CHECK_CODE(code, lino, _exit); - - iRow++; - if (iRow < pBDataR->nRow) { - row = tsdbRowFromBlockData(pBDataR, iRow); - } else { - pRow = NULL; - } - } else if (c > 0) { - code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row)); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tBlockDataAppendRow(pBDataW, &pRowInfo->row, pCommitter->skmRow.pTSchema, id.uid); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbNextCommitRow(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo) { - if (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid) { - pRowInfo = NULL; - } else { - TSDBKEY tKey = TSDBROW_KEY(&pRowInfo->row); - if (tsdbKeyCmprFn(&tKey, &pDataBlk->maxKey) > 0) pRowInfo = NULL; - } - } - } else { - ASSERT(0 && "dup rows not allowed"); - } - - if (pBDataW->nRow >= pCommitter->maxRow) { - code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - while (pRow) { - code = tBlockDataAppendRow(pBDataW, pRow, NULL, id.uid); - TSDB_CHECK_CODE(code, lino, _exit); - - iRow++; - if (iRow < pBDataR->nRow) { - row = tsdbRowFromBlockData(pBDataR, iRow); - } else { - pRow = NULL; - } - - if (pBDataW->nRow >= pCommitter->maxRow) { - code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbMergeTableData(SCommitter *pCommitter, TABLEID id) { - int32_t code = 0; - int32_t lino = 0; - - SBlockIdx *pBlockIdx = pCommitter->dReader.pBlockIdx; - - ASSERT(pBlockIdx == NULL || tTABLEIDCmprFn(pBlockIdx, &id) >= 0); - if (pBlockIdx && pBlockIdx->suid == id.suid && pBlockIdx->uid == id.uid) { - int32_t iBlock = 0; - SDataBlk block; - SDataBlk *pDataBlk = █ - SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter); - - ASSERT(pRowInfo->suid == id.suid && pRowInfo->uid == id.uid); - - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk); - while (pDataBlk && pRowInfo) { - SDataBlk tBlock = {.minKey = TSDBROW_KEY(&pRowInfo->row), .maxKey = TSDBROW_KEY(&pRowInfo->row)}; - int32_t c = tDataBlkCmprFn(pDataBlk, &tBlock); - - if (c < 0) { - code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pDataBlk, tPutDataBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - iBlock++; - if (iBlock < pCommitter->dReader.mBlock.nItem) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk); - } else { - pDataBlk = NULL; - } - } else if (c > 0) { - code = tsdbCommitAheadBlock(pCommitter, pDataBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) pRowInfo = NULL; - } else { - code = tsdbCommitMergeBlock(pCommitter, pDataBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - iBlock++; - if (iBlock < pCommitter->dReader.mBlock.nItem) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk); - } else { - pDataBlk = NULL; - } - pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) pRowInfo = NULL; - } - } - - while (pDataBlk) { - code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pDataBlk, tPutDataBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - iBlock++; - if (iBlock < pCommitter->dReader.mBlock.nItem) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk); - } else { - pDataBlk = NULL; - } - } - - code = tsdbCommitterNextTableData(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbInitSttBlockBuilderIfNeed(SCommitter *pCommitter, TABLEID id) { - int32_t code = 0; - int32_t lino = 0; - -#if USE_STREAM_COMPRESSION - SDiskDataBuilder *pBuilder = pCommitter->dWriter.pBuilder; - if (pBuilder->suid || pBuilder->uid) { - if (!TABLE_SAME_SCHEMA(pBuilder->suid, pBuilder->uid, id.suid, id.uid)) { - code = tsdbCommitSttBlk(pCommitter->dWriter.pWriter, pBuilder, pCommitter->dWriter.aSttBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - tDiskDataBuilderClear(pBuilder); - } - } - - if (!pBuilder->suid && !pBuilder->uid) { - ASSERT(pCommitter->skmTable.suid == id.suid); - ASSERT(pCommitter->skmTable.uid == id.uid); - code = tDiskDataBuilderInit(pBuilder, pCommitter->skmTable.pTSchema, &id, pCommitter->cmprAlg, 0); - TSDB_CHECK_CODE(code, lino, _exit); - } -#else - SBlockData *pBData = &pCommitter->dWriter.bDatal; - if (pBData->suid || pBData->uid) { - if (!TABLE_SAME_SCHEMA(pBData->suid, pBData->uid, id.suid, id.uid)) { - code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBData, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - - tBlockDataReset(pBData); - } - } - - if (!pBData->suid && !pBData->uid) { - ASSERT(pCommitter->skmTable.suid == id.suid); - ASSERT(pCommitter->skmTable.uid == id.uid); - TABLEID tid = {.suid = id.suid, .uid = id.suid ? 0 : id.uid}; - code = tBlockDataInit(pBData, &tid, pCommitter->skmTable.pTSchema, NULL, 0); - TSDB_CHECK_CODE(code, lino, _exit); - } -#endif - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbAppendLastBlock(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - SBlockData *pBData = &pCommitter->dWriter.bData; - TABLEID id = {.suid = pBData->suid, .uid = pBData->uid}; - - code = tsdbInitSttBlockBuilderIfNeed(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - - for (int32_t iRow = 0; iRow < pBData->nRow; iRow++) { - TSDBROW row = tsdbRowFromBlockData(pBData, iRow); - -#if USE_STREAM_COMPRESSION - code = tDiskDataAddRow(pCommitter->dWriter.pBuilder, &row, NULL, &id); - TSDB_CHECK_CODE(code, lino, _exit); - - if (pCommitter->dWriter.pBuilder->nRow >= pCommitter->maxRow) { - code = tsdbCommitSttBlk(pCommitter->dWriter.pWriter, pCommitter->dWriter.pBuilder, pCommitter->dWriter.aSttBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbInitSttBlockBuilderIfNeed(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - } -#else - code = tBlockDataAppendRow(&pCommitter->dWriter.bDatal, &row, NULL, id.uid); - TSDB_CHECK_CODE(code, lino, _exit); - - if (pCommitter->dWriter.bDatal.nRow >= pCommitter->maxRow) { - code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk, - pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } -#endif - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitTableData(SCommitter *pCommitter, TABLEID id) { - int32_t code = 0; - int32_t lino = 0; - - SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) { - pRowInfo = NULL; - } - - if (pRowInfo == NULL) goto _exit; - - if (pCommitter->toLastOnly) { - code = tsdbInitSttBlockBuilderIfNeed(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - - while (pRowInfo) { - STSchema *pTSchema = NULL; - if (pRowInfo->row.type == TSDBROW_ROW_FMT) { - code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row)); - TSDB_CHECK_CODE(code, lino, _exit); - pTSchema = pCommitter->skmRow.pTSchema; - } - -#if USE_STREAM_COMPRESSION - code = tDiskDataAddRow(pCommitter->dWriter.pBuilder, &pRowInfo->row, pTSchema, &id); -#else - code = tBlockDataAppendRow(&pCommitter->dWriter.bDatal, &pRowInfo->row, pTSchema, id.uid); -#endif - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbNextCommitRow(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) { - pRowInfo = NULL; - } - -#if USE_STREAM_COMPRESSION - if (pCommitter->dWriter.pBuilder->nRow >= pCommitter->maxRow) { - code = tsdbCommitSttBlk(pCommitter->dWriter.pWriter, pCommitter->dWriter.pBuilder, pCommitter->dWriter.aSttBlk); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbInitSttBlockBuilderIfNeed(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - } -#else - if (pCommitter->dWriter.bDatal.nRow >= pCommitter->maxRow) { - code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk, - pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } -#endif - } - } else { - SBlockData *pBData = &pCommitter->dWriter.bData; - ASSERT(pBData->nRow == 0); - - while (pRowInfo) { - STSchema *pTSchema = NULL; - if (pRowInfo->row.type == TSDBROW_ROW_FMT) { - code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row)); - TSDB_CHECK_CODE(code, lino, _exit); - pTSchema = pCommitter->skmRow.pTSchema; - } - - code = tBlockDataAppendRow(pBData, &pRowInfo->row, pTSchema, id.uid); - TSDB_CHECK_CODE(code, lino, _exit); - - code = tsdbNextCommitRow(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - - pRowInfo = tsdbGetCommitRow(pCommitter); - if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) { - pRowInfo = NULL; - } - - if (pBData->nRow >= pCommitter->maxRow) { - code = - tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - if (pBData->nRow) { - if (pBData->nRow > pCommitter->minRow) { - code = - tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg); - TSDB_CHECK_CODE(code, lino, _exit); - } else { - code = tsdbAppendLastBlock(pCommitter); - TSDB_CHECK_CODE(code, lino, _exit); - } - } - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter) { - int32_t code = 0; - int32_t lino = 0; - - SRowInfo *pRowInfo; - TABLEID id = {0}; - while ((pRowInfo = tsdbGetCommitRow(pCommitter)) != NULL) { - ASSERT(pRowInfo->suid != id.suid || pRowInfo->uid != id.uid); - id.suid = pRowInfo->suid; - id.uid = pRowInfo->uid; - - code = tsdbMoveCommitData(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - - // start - tMapDataReset(&pCommitter->dWriter.mBlock); - - // impl - code = tsdbUpdateTableSchema(pCommitter->pTsdb->pVnode->pMeta, id.suid, id.uid, &pCommitter->skmTable); - TSDB_CHECK_CODE(code, lino, _exit); - code = tBlockDataInit(&pCommitter->dReader.bData, &id, pCommitter->skmTable.pTSchema, NULL, 0); - TSDB_CHECK_CODE(code, lino, _exit); - code = tBlockDataInit(&pCommitter->dWriter.bData, &id, pCommitter->skmTable.pTSchema, NULL, 0); - TSDB_CHECK_CODE(code, lino, _exit); - - /* merge with data in .data file */ - code = tsdbMergeTableData(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - - /* handle remain table data */ - code = tsdbCommitTableData(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - - // end - if (pCommitter->dWriter.mBlock.nItem > 0) { - SBlockIdx blockIdx = {.suid = id.suid, .uid = id.uid}; - code = tsdbWriteDataBlk(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx); - TSDB_CHECK_CODE(code, lino, _exit); - - if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - } - } - - id.suid = INT64_MAX; - id.uid = INT64_MAX; - code = tsdbMoveCommitData(pCommitter, id); - TSDB_CHECK_CODE(code, lino, _exit); - -#if USE_STREAM_COMPRESSION - code = tsdbCommitSttBlk(pCommitter->dWriter.pWriter, pCommitter->dWriter.pBuilder, pCommitter->dWriter.aSttBlk); -#else - code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk, - pCommitter->cmprAlg); -#endif - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pCommitter->pTsdb->pVnode), __func__, lino, - tstrerror(code)); - } - return code; -} - -int32_t tsdbFinishCommit(STsdb *pTsdb) { - int32_t code = 0; - int32_t lino = 0; - SMemTable *pMemTable = pTsdb->imem; - - // lock - taosThreadMutexLock(&pTsdb->mutex); - - code = tsdbFSCommit(pTsdb); - if (code) { - taosThreadMutexUnlock(&pTsdb->mutex); - TSDB_CHECK_CODE(code, lino, _exit); - } - - pTsdb->imem = NULL; - - // unlock - taosThreadMutexUnlock(&pTsdb->mutex); - if (pMemTable) { - tsdbUnrefMemTable(pMemTable, NULL, true); - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } else { - tsdbInfo("vgId:%d, tsdb finish commit", TD_VID(pTsdb->pVnode)); - } - return code; -} - -int32_t tsdbRollbackCommit(STsdb *pTsdb) { - int32_t code = 0; - int32_t lino = 0; - - code = tsdbFSRollback(pTsdb); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } else { - tsdbInfo("vgId:%d, tsdb rollback commit", TD_VID(pTsdb->pVnode)); - } - return code; -} diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 732f46467e..a796f5121c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -702,7 +702,7 @@ _exit: } // EXPOSED APIS ==================================================================================== -int32_t tsdbFSCommit(STsdb *pTsdb) { +static int32_t tsdbFSCommit(STsdb *pTsdb) { int32_t code = 0; int32_t lino = 0; STsdbFS fs = {0}; @@ -738,7 +738,7 @@ _exit: return code; } -int32_t tsdbFSRollback(STsdb *pTsdb) { +static int32_t tsdbFSRollback(STsdb *pTsdb) { int32_t code = 0; int32_t lino = 0; @@ -833,312 +833,3 @@ int32_t tsdbFSClose(STsdb *pTsdb) { return code; } - -int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) { - int32_t code = 0; - int32_t lino = 0; - - pFS->pDelFile = NULL; - if (pFS->aDFileSet) { - taosArrayClear(pFS->aDFileSet); - } else { - pFS->aDFileSet = taosArrayInit(taosArrayGetSize(pTsdb->fs.aDFileSet), sizeof(SDFileSet)); - if (pFS->aDFileSet == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - if (pTsdb->fs.pDelFile) { - pFS->pDelFile = (SDelFile *)taosMemoryMalloc(sizeof(SDelFile)); - if (pFS->pDelFile == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - - *pFS->pDelFile = *pTsdb->fs.pDelFile; - } - - for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs.aDFileSet); iSet++) { - SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet); - SDFileSet fSet = {.diskId = pSet->diskId, .fid = pSet->fid}; - - // head - fSet.pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile)); - if (fSet.pHeadF == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - *fSet.pHeadF = *pSet->pHeadF; - - // data - fSet.pDataF = (SDataFile *)taosMemoryMalloc(sizeof(SDataFile)); - if (fSet.pDataF == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - *fSet.pDataF = *pSet->pDataF; - - // sma - fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile)); - if (fSet.pSmaF == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - *fSet.pSmaF = *pSet->pSmaF; - - // stt - for (fSet.nSttF = 0; fSet.nSttF < pSet->nSttF; fSet.nSttF++) { - fSet.aSttF[fSet.nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile)); - if (fSet.aSttF[fSet.nSttF] == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - *fSet.aSttF[fSet.nSttF] = *pSet->aSttF[fSet.nSttF]; - } - - if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -int32_t tsdbFSUpsertDelFile(STsdbFS *pFS, SDelFile *pDelFile) { - int32_t code = 0; - - if (pDelFile) { - if (pFS->pDelFile == NULL) { - pFS->pDelFile = (SDelFile *)taosMemoryMalloc(sizeof(SDelFile)); - if (pFS->pDelFile == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - } - *pFS->pDelFile = *pDelFile; - } else { - if (pFS->pDelFile) { - taosMemoryFree(pFS->pDelFile); - pFS->pDelFile = NULL; - } - } - -_exit: - return code; -} - -int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet) { - int32_t code = 0; - int32_t idx = taosArraySearchIdx(pFS->aDFileSet, pSet, tDFileSetCmprFn, TD_GE); - - if (idx < 0) { - idx = taosArrayGetSize(pFS->aDFileSet); - } else { - SDFileSet *pDFileSet = (SDFileSet *)taosArrayGet(pFS->aDFileSet, idx); - int32_t c = tDFileSetCmprFn(pSet, pDFileSet); - if (c == 0) { - *pDFileSet->pHeadF = *pSet->pHeadF; - *pDFileSet->pDataF = *pSet->pDataF; - *pDFileSet->pSmaF = *pSet->pSmaF; - // stt - if (pSet->nSttF > pDFileSet->nSttF) { - ASSERT(pSet->nSttF == pDFileSet->nSttF + 1); - - pDFileSet->aSttF[pDFileSet->nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile)); - if (pDFileSet->aSttF[pDFileSet->nSttF] == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - *pDFileSet->aSttF[pDFileSet->nSttF] = *pSet->aSttF[pSet->nSttF - 1]; - pDFileSet->nSttF++; - } else if (pSet->nSttF < pDFileSet->nSttF) { - ASSERT(pSet->nSttF == 1); - for (int32_t iStt = 1; iStt < pDFileSet->nSttF; iStt++) { - taosMemoryFree(pDFileSet->aSttF[iStt]); - } - - *pDFileSet->aSttF[0] = *pSet->aSttF[0]; - pDFileSet->nSttF = 1; - } else { - for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { - *pDFileSet->aSttF[iStt] = *pSet->aSttF[iStt]; - } - } - - pDFileSet->diskId = pSet->diskId; - goto _exit; - } - } - - ASSERT(pSet->nSttF == 1); - SDFileSet fSet = {.diskId = pSet->diskId, .fid = pSet->fid, .nSttF = 1}; - - // head - fSet.pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile)); - if (fSet.pHeadF == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - *fSet.pHeadF = *pSet->pHeadF; - - // data - fSet.pDataF = (SDataFile *)taosMemoryMalloc(sizeof(SDataFile)); - if (fSet.pDataF == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - *fSet.pDataF = *pSet->pDataF; - - // sma - fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile)); - if (fSet.pSmaF == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - *fSet.pSmaF = *pSet->pSmaF; - - // stt - fSet.aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile)); - if (fSet.aSttF[0] == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - *fSet.aSttF[0] = *pSet->aSttF[0]; - - if (taosArrayInsert(pFS->aDFileSet, idx, &fSet) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - -_exit: - return code; -} - -int32_t tsdbFSPrepareCommit(STsdb *pTsdb, STsdbFS *pFSNew) { - int32_t code = 0; - int32_t lino = 0; - char tfname[TSDB_FILENAME_LEN]; - - tsdbGetCurrentFName(pTsdb, NULL, tfname); - - // gnrt CURRENT.t - code = tsdbSaveFSToFile(pFSNew, tfname); - TSDB_CHECK_CODE(code, lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -int32_t tsdbFSRef(STsdb *pTsdb, STsdbFS *pFS) { - int32_t code = 0; - int32_t nRef; - - pFS->aDFileSet = taosArrayInit(taosArrayGetSize(pTsdb->fs.aDFileSet), sizeof(SDFileSet)); - if (pFS->aDFileSet == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - - pFS->pDelFile = pTsdb->fs.pDelFile; - if (pFS->pDelFile) { - nRef = atomic_fetch_add_32(&pFS->pDelFile->nRef, 1); - ASSERT(nRef > 0); - } - - SDFileSet fSet; - for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs.aDFileSet); iSet++) { - SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet); - fSet = *pSet; - - nRef = atomic_fetch_add_32(&pSet->pHeadF->nRef, 1); - ASSERT(nRef > 0); - - nRef = atomic_fetch_add_32(&pSet->pDataF->nRef, 1); - ASSERT(nRef > 0); - - nRef = atomic_fetch_add_32(&pSet->pSmaF->nRef, 1); - ASSERT(nRef > 0); - - for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { - nRef = atomic_fetch_add_32(&pSet->aSttF[iStt]->nRef, 1); - ASSERT(nRef > 0); - } - - if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - } - -_exit: - return code; -} - -void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS) { - int32_t nRef; - char fname[TSDB_FILENAME_LEN]; - - if (pFS->pDelFile) { - nRef = atomic_sub_fetch_32(&pFS->pDelFile->nRef, 1); - ASSERT(nRef >= 0); - if (nRef == 0) { - tsdbDelFileName(pTsdb, pFS->pDelFile, fname); - (void)taosRemoveFile(fname); - taosMemoryFree(pFS->pDelFile); - } - } - - for (int32_t iSet = 0; iSet < taosArrayGetSize(pFS->aDFileSet); iSet++) { - SDFileSet *pSet = (SDFileSet *)taosArrayGet(pFS->aDFileSet, iSet); - - // head - nRef = atomic_sub_fetch_32(&pSet->pHeadF->nRef, 1); - ASSERT(nRef >= 0); - if (nRef == 0) { - tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname); - (void)taosRemoveFile(fname); - taosMemoryFree(pSet->pHeadF); - } - - // data - nRef = atomic_sub_fetch_32(&pSet->pDataF->nRef, 1); - ASSERT(nRef >= 0); - if (nRef == 0) { - tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname); - (void)taosRemoveFile(fname); - taosMemoryFree(pSet->pDataF); - } - - // sma - nRef = atomic_sub_fetch_32(&pSet->pSmaF->nRef, 1); - ASSERT(nRef >= 0); - if (nRef == 0) { - tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname); - (void)taosRemoveFile(fname); - taosMemoryFree(pSet->pSmaF); - } - - // stt - for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { - nRef = atomic_sub_fetch_32(&pSet->aSttF[iStt]->nRef, 1); - ASSERT(nRef >= 0); - if (nRef == 0) { - tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname); - (void)taosRemoveFile(fname); - taosMemoryFree(pSet->aSttF[iStt]); - /* code */ - } - } - } - - taosArrayDestroy(pFS->aDFileSet); -} diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.c b/source/dnode/vnode/src/tsdb/tsdbFS2.c index b2ccc3e859..1d2fd60df2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.c @@ -805,6 +805,12 @@ int64_t tsdbFSAllocEid(STFileSystem *fs) { return cid; } +void tsdbFSUpdateEid(STFileSystem *fs, int64_t cid) { + taosThreadMutexLock(&fs->tsdb->mutex); + fs->neid = TMAX(fs->neid, cid); + taosThreadMutexUnlock(&fs->tsdb->mutex); +} + int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT etype) { int32_t code = 0; int32_t lino; diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.h b/source/dnode/vnode/src/tsdb/tsdbFS2.h index 714bf5bf16..7099ce8b26 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.h +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.h @@ -52,6 +52,7 @@ int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ev int32_t tsdbFSDestroyRefRangedSnapshot(TFileSetRangeArray **fsrArr); // txn int64_t tsdbFSAllocEid(STFileSystem *fs); +void tsdbFSUpdateEid(STFileSystem *fs, int64_t cid); int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT etype); int32_t tsdbFSEditCommit(STFileSystem *fs); int32_t tsdbFSEditAbort(STFileSystem *fs); diff --git a/source/dnode/vnode/src/tsdb/tsdbFSetRAW.c b/source/dnode/vnode/src/tsdb/tsdbFSetRAW.c index 03c12502d5..ddb8e7e27e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSetRAW.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSetRAW.c @@ -14,6 +14,7 @@ */ #include "tsdbFSetRAW.h" +#include "tsdbFS2.h" // SFSetRAWWriter ================================================== typedef struct SFSetRAWWriter { @@ -76,7 +77,7 @@ static int32_t tsdbFSetRAWWriteFileDataBegin(SFSetRAWWriter *writer, STsdbDataRA .szPage = writer->config->szPage, .fid = bHdr->file.fid, .did = writer->config->did, - .cid = writer->config->cid, + .cid = bHdr->file.cid, .level = writer->config->level, .file = @@ -84,7 +85,7 @@ static int32_t tsdbFSetRAWWriteFileDataBegin(SFSetRAWWriter *writer, STsdbDataRA .type = bHdr->file.type, .fid = bHdr->file.fid, .did = writer->config->did, - .cid = writer->config->cid, + .cid = bHdr->file.cid, .size = bHdr->file.size, .minVer = bHdr->file.minVer, .maxVer = bHdr->file.maxVer, @@ -94,6 +95,7 @@ static int32_t tsdbFSetRAWWriteFileDataBegin(SFSetRAWWriter *writer, STsdbDataRA }, }; + tsdbFSUpdateEid(config.tsdb->pFS, config.cid); writer->ctx->offset = 0; writer->ctx->file = config.file; diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index cc77474e79..4657b60f62 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -181,6 +181,7 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid pDelData->sKey = sKey; pDelData->eKey = eKey; pDelData->pNext = NULL; + taosWLockLatch(&pTbData->lock); if (pTbData->pHead == NULL) { ASSERT(pTbData->pTail == NULL); pTbData->pHead = pTbData->pTail = pDelData; @@ -188,6 +189,7 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid pTbData->pTail->pNext = pDelData; pTbData->pTail = pDelData; } + taosWUnLockLatch(&pTbData->lock); pMemTable->nDel++; pMemTable->minVer = TMIN(pMemTable->minVer, version); @@ -401,6 +403,7 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid SL_NODE_BACKWARD(pTbData->sl.pHead, iLevel) = NULL; SL_NODE_FORWARD(pTbData->sl.pTail, iLevel) = NULL; } + taosInitRWLatch(&pTbData->lock); taosWLockLatch(&pMemTable->latch); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 427d5dd5d7..8a7ec981e8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -22,8 +22,8 @@ #include "tsdbUtil2.h" #include "tsimplehash.h" -#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) -#define getCurrentKeyInSttBlock(_r) ((_r)->currentKey) +#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) +#define getCurrentKeyInSttBlock(_r) ((_r)->currentKey) typedef struct { bool overlapWithNeighborBlock; @@ -41,7 +41,7 @@ static int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, i static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader); static int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, - SRowMerger* pMerger, SVersionRange* pVerRange, const char* id); + SRowMerger* pMerger, SVersionRange* pVerRange, const char* id); static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, STsdbReader* pReader); static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pTSRow, STableBlockScanInfo* pScanInfo); @@ -67,12 +67,13 @@ static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond static int32_t doBuildDataBlock(STsdbReader* pReader); static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader); static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo); -static bool hasDataInSttBlock(STableBlockScanInfo *pInfo); +static bool hasDataInSttBlock(STableBlockScanInfo* pInfo); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); static void resetTableListIndex(SReaderStatus* pStatus); static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t* pMinKey); static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo); +static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader); static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } @@ -120,7 +121,7 @@ static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInf } else if (pTCol->colId < pSupInfo->colId[j]) { // do nothing i += 1; } else { - return TSDB_CODE_INVALID_PARA; + return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER; } } @@ -676,6 +677,10 @@ static int32_t doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int } static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) { + if (pBlockIter->blockList == NULL) { + return NULL; + } + size_t num = TARRAY_SIZE(pBlockIter->blockList); if (num == 0) { ASSERT(pBlockIter->numOfBlocks == num); @@ -882,6 +887,21 @@ static void copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo } } +static void blockInfoToRecord(SBrinRecord* record, SFileDataBlockInfo* pBlockInfo){ + record->uid = pBlockInfo->uid; + record->firstKey = pBlockInfo->firstKey; + record->lastKey = pBlockInfo->lastKey; + record->minVer = pBlockInfo->minVer; + record->maxVer = pBlockInfo->maxVer; + record->blockOffset = pBlockInfo->blockOffset; + record->smaOffset = pBlockInfo->smaOffset; + record->blockSize = pBlockInfo->blockSize; + record->blockKeySize = pBlockInfo->blockKeySize; + record->smaSize = pBlockInfo->smaSize; + record->numRow = pBlockInfo->numRow; + record->count = pBlockInfo->count; +} + static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; SDataBlockIter* pBlockIter = &pStatus->blockIter; @@ -899,7 +919,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) { bool asc = ASCENDING_TRAVERSE(pReader->info.order); int32_t step = asc ? 1 : -1; - SBrinRecord* pRecord = &pBlockInfo->record; + SBrinRecord tmp; + blockInfoToRecord(&tmp, pBlockInfo); + SBrinRecord* pRecord = &tmp; // no data exists, return directly. if (pBlockData->nRow == 0 || pBlockData->aTSKEY == 0) { @@ -1092,14 +1114,16 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBrinRecord* pRecord = &pBlockInfo->record; + SBrinRecord tmp; + blockInfoToRecord(&tmp, pBlockInfo); + SBrinRecord* pRecord = &tmp; code = tsdbDataFileReadBlockDataByColumn(pReader->pFileReader, pRecord, pBlockData, pSchema, &pSup->colId[1], pSup->numOfCols - 1); if (code != TSDB_CODE_SUCCESS) { tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 ", rows:%d, code:%s %s", - pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.firstKey, - pBlockInfo->record.lastKey, pBlockInfo->record.numRow, tstrerror(code), pReader->idStr); + pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->firstKey, + pBlockInfo->lastKey, pBlockInfo->numRow, tstrerror(code), pReader->idStr); return code; } @@ -1120,10 +1144,10 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI * This is an two rectangles overlap cases. */ static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SFileDataBlockInfo* pBlock) { - return (pWindow->ekey < pBlock->record.lastKey && pWindow->ekey >= pBlock->record.firstKey) || - (pWindow->skey > pBlock->record.firstKey && pWindow->skey <= pBlock->record.lastKey) || - (pVerRange->minVer > pBlock->record.minVer && pVerRange->minVer <= pBlock->record.maxVer) || - (pVerRange->maxVer < pBlock->record.maxVer && pVerRange->maxVer >= pBlock->record.minVer); + return (pWindow->ekey < pBlock->lastKey && pWindow->ekey >= pBlock->firstKey) || + (pWindow->skey > pBlock->firstKey && pWindow->skey <= pBlock->lastKey) || + (pVerRange->minVer > pBlock->minVer && pVerRange->minVer <= pBlock->maxVer) || + (pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer); } static bool getNeighborBlockOfSameTable(SDataBlockIter* pBlockIter, SFileDataBlockInfo* pBlockInfo, @@ -1138,11 +1162,11 @@ static bool getNeighborBlockOfSameTable(SDataBlockIter* pBlockIter, SFileDataBlo return false; } - int32_t step = asc ? 1 : -1; + int32_t step = asc ? 1 : -1; STableDataBlockIdx* pTableDataBlockIdx = taosArrayGet(pTableBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step); SFileDataBlockInfo* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex); - memcpy(pRecord, &p->record, sizeof(SBrinRecord)); + blockInfoToRecord(pRecord, p); *nextIndex = pBlockInfo->tbBlockIdx + step; return true; @@ -1211,9 +1235,9 @@ static int32_t setFileBlockActiveInBlockIter(STsdbReader* pReader, SDataBlockIte static bool overlapWithNeighborBlock2(SFileDataBlockInfo* pBlock, SBrinRecord* pRec, int32_t order) { // it is the last block in current file, no chance to overlap with neighbor blocks. if (ASCENDING_TRAVERSE(order)) { - return pBlock->record.lastKey == pRec->firstKey; + return pBlock->lastKey == pRec->firstKey; } else { - return pBlock->record.firstKey == pRec->lastKey; + return pBlock->firstKey == pRec->lastKey; } } @@ -1223,9 +1247,9 @@ static int64_t getBoarderKeyInFiles(SFileDataBlockInfo* pBlock, STableBlockScanI int64_t key = 0; if (pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA) { int64_t keyInStt = pScanInfo->sttKeyInfo.nextProcKey; - key = ascScan ? TMIN(pBlock->record.firstKey, keyInStt) : TMAX(pBlock->record.lastKey, keyInStt); + key = ascScan ? TMIN(pBlock->firstKey, keyInStt) : TMAX(pBlock->lastKey, keyInStt); } else { - key = ascScan ? pBlock->record.firstKey : pBlock->record.lastKey; + key = ascScan ? pBlock->firstKey : pBlock->lastKey; } return key; @@ -1241,8 +1265,8 @@ static bool bufferDataInFileBlockGap(TSDBKEY keyInBuf, SFileDataBlockInfo* pBloc } static bool keyOverlapFileBlock(TSDBKEY key, SFileDataBlockInfo* pBlock, SVersionRange* pVerRange) { - return (key.ts >= pBlock->record.firstKey && key.ts <= pBlock->record.lastKey) && - (pBlock->record.maxVer >= pVerRange->minVer) && (pBlock->record.minVer <= pVerRange->maxVer); + return (key.ts >= pBlock->firstKey && key.ts <= pBlock->lastKey) && + (pBlock->maxVer >= pVerRange->minVer) && (pBlock->minVer <= pVerRange->maxVer); } static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo* pBlockInfo, @@ -1258,18 +1282,20 @@ static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo* pInfo->overlapWithNeighborBlock = overlapWithNeighborBlock2(pBlockInfo, &rec, pReader->info.order); } + SBrinRecord pRecord; + blockInfoToRecord(&pRecord, pBlockInfo); // has duplicated ts of different version in this block - pInfo->hasDupTs = (pBlockInfo->record.numRow > pBlockInfo->record.count) || (pBlockInfo->record.count <= 0); - pInfo->overlapWithDelInfo = overlapWithDelSkyline(pScanInfo, &pBlockInfo->record, pReader->info.order); + pInfo->hasDupTs = (pBlockInfo->numRow > pBlockInfo->count) || (pBlockInfo->count <= 0); + pInfo->overlapWithDelInfo = overlapWithDelSkyline(pScanInfo, &pRecord, pReader->info.order); ASSERT(pScanInfo->sttKeyInfo.status != STT_FILE_READER_UNINIT); if (pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA) { int64_t nextProcKeyInStt = pScanInfo->sttKeyInfo.nextProcKey; pInfo->overlapWithSttBlock = - !(pBlockInfo->record.lastKey < nextProcKeyInStt || pBlockInfo->record.firstKey > nextProcKeyInStt); + !(pBlockInfo->lastKey < nextProcKeyInStt || pBlockInfo->firstKey > nextProcKeyInStt); } - pInfo->moreThanCapcity = pBlockInfo->record.numRow > pReader->resBlockInfo.capacity; + pInfo->moreThanCapcity = pBlockInfo->numRow > pReader->resBlockInfo.capacity; pInfo->partiallyRequired = dataBlockPartiallyRequired(&pReader->info.window, &pReader->info.verRange, pBlockInfo); pInfo->overlapWithKeyInBuf = keyOverlapFileBlock(keyInBuf, pBlockInfo, &pReader->info.verRange); } @@ -1316,17 +1342,17 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* return TSDB_CODE_SUCCESS; } - int64_t st = taosGetTimestampUs(); + int64_t st = taosGetTimestampUs(); SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock; - int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader); + int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader); double el = (taosGetTimestampUs() - st) / 1000.0; updateComposedBlockInfo(pReader, el, pBlockScanInfo); tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%" PRId64 ", brange:%" PRId64 " - %" PRId64 ", uid:%" PRIu64 ", %s", - pReader, el, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, - pBlockScanInfo->uid, pReader->idStr); + pReader, el, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, pBlockScanInfo->uid, + pReader->idStr); pReader->cost.buildmemBlock += el; return code; @@ -1360,7 +1386,8 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB static bool nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, SVersionRange* pVerRange) { - int32_t step = ASCENDING_TRAVERSE(pSttBlockReader->order) ? 1 : -1; + int32_t order = pSttBlockReader->order; + int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1; while (1) { bool hasVal = tMergeTreeNext(&pSttBlockReader->mergeTree); @@ -1377,21 +1404,21 @@ static bool nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockSc pSttBlockReader->currentKey = key; pScanInfo->sttKeyInfo.nextProcKey = key; - if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->sttBlockDelIndex, key, ver, pSttBlockReader->order, - pVerRange)) { + if (pScanInfo->delSkyline != NULL && TARRAY_SIZE(pScanInfo->delSkyline) > 0) { + if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->sttBlockDelIndex, key, ver, order, pVerRange)) { + pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA; + return true; + } + } else { pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA; return true; } } } -static void doPinSttBlock(SSttBlockReader* pSttBlockReader) { - tMergeTreePinSttBlock(&pSttBlockReader->mergeTree); -} +static void doPinSttBlock(SSttBlockReader* pSttBlockReader) { tMergeTreePinSttBlock(&pSttBlockReader->mergeTree); } -static void doUnpinSttBlock(SSttBlockReader* pSttBlockReader) { - tMergeTreeUnpinSttBlock(&pSttBlockReader->mergeTree); -} +static void doUnpinSttBlock(SSttBlockReader* pSttBlockReader) { tMergeTreeUnpinSttBlock(&pSttBlockReader->mergeTree); } static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, STsdbReader* pReader, @@ -1468,7 +1495,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; int64_t tsLast = INT64_MIN; - if (hasDataInSttBlock(pBlockScanInfo)) { + if (hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) { tsLast = getCurrentKeyInSttBlock(pSttBlockReader); } @@ -1487,7 +1514,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* int64_t minKey = 0; if (pReader->info.order == TSDB_ORDER_ASC) { minKey = INT64_MAX; // chosen the minimum value - if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) { + if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) { minKey = tsLast; } @@ -1500,7 +1527,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* } } else { minKey = INT64_MIN; - if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) { + if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) { minKey = tsLast; } @@ -1530,8 +1557,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* if (code != TSDB_CODE_SUCCESS) { return code; } - doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, - pReader->idStr); + doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr); } if (minKey == k.ts) { @@ -1580,8 +1606,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* if (code != TSDB_CODE_SUCCESS) { return code; } - doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, - pReader->idStr); + doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr); } if (minKey == key) { @@ -1606,91 +1631,6 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* return code; } -static int32_t doMergeFileBlockAndLastBlock(SSttBlockReader* pSttBlockReader, STsdbReader* pReader, - STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, - bool mergeBlockData) { - SRowMerger* pMerger = &pReader->status.merger; - SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - - int64_t tsLastBlock = getCurrentKeyInSttBlock(pSttBlockReader); - bool copied = false; - int32_t code = TSDB_CODE_SUCCESS; - SRow* pTSRow = NULL; - TSDBROW* pRow = tMergeTreeGetRow(&pSttBlockReader->mergeTree); - - // create local variable to hold the row value - TSDBROW fRow = {.iRow = pRow->iRow, .type = TSDBROW_COL_FMT, .pBlockData = pRow->pBlockData}; - - tsdbTrace("fRow ptr:%p, %d, uid:%" PRIu64 ", ts:%" PRId64 " %s", pRow->pBlockData, pRow->iRow, pSttBlockReader->uid, - fRow.pBlockData->aTSKEY[fRow.iRow], pReader->idStr); - - // only stt block exists - if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) { - code = tryCopyDistinctRowFromSttBlock(&fRow, pSttBlockReader, pBlockScanInfo, tsLastBlock, pReader, &copied); - if (code) { - return code; - } - - if (copied) { - pBlockScanInfo->lastProcKey = tsLastBlock; - return TSDB_CODE_SUCCESS; - } else { - code = tsdbRowMergerAdd(pMerger, &fRow, NULL); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - TSDBROW* pRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree); - tsdbRowMergerAdd(pMerger, pRow1, NULL); - doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLastBlock, pMerger, &pReader->info.verRange, - pReader->idStr); - - code = tsdbRowMergerGetRow(pMerger, &pTSRow); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo); - - taosMemoryFree(pTSRow); - tsdbRowMergerClear(pMerger); - - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } else { // not merge block data - code = tsdbRowMergerAdd(pMerger, &fRow, NULL); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLastBlock, pMerger, &pReader->info.verRange, - pReader->idStr); - - // merge with block data if ts == key - if (tsLastBlock == pBlockData->aTSKEY[pDumpInfo->rowIndex]) { - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader); - } - - code = tsdbRowMergerGetRow(pMerger, &pTSRow); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo); - - taosMemoryFree(pTSRow); - tsdbRowMergerClear(pMerger); - - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - - return TSDB_CODE_SUCCESS; -} - static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader* pSttBlockReader, int64_t key, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) { SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; @@ -1714,6 +1654,9 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader* } else if ((!dataInDataFile) && dataInSttFile) { // no data ile block exists return mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader); + } else if (pBlockScanInfo->cleanSttBlocks && pReader->info.execMode == READER_EXEC_ROWS) { + // opt model for count data in stt file, which is not overlap with data blocks in files. + return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); } else { // row in both stt file blocks and data file blocks TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); @@ -1735,7 +1678,7 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader* // the following for key == tsLast // ASC: file block ------> stt block // DESC: stt block ------> file block - SRow* pTSRow = NULL; + SRow* pTSRow = NULL; if (ASCENDING_TRAVERSE(pReader->info.order)) { code = tsdbRowMergerAdd(pMerger, &fRow, NULL); if (code != TSDB_CODE_SUCCESS) { @@ -1793,7 +1736,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader); int64_t tsLast = INT64_MIN; - if (hasDataInSttBlock(pBlockScanInfo)) { + if (hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) { tsLast = getCurrentKeyInSttBlock(pSttBlockReader); } @@ -1842,7 +1785,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* minKey = key; } - if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) { + if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) { minKey = tsLast; } } else { @@ -1859,7 +1802,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* minKey = key; } - if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) { + if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) { minKey = tsLast; } } @@ -1884,8 +1827,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* return code; } - doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, - pReader->idStr); + doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr); } if (minKey == ik.ts) { @@ -1943,8 +1885,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* return code; } - doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, - pReader->idStr); + doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr); } if (minKey == key) { @@ -2033,32 +1974,39 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea return TSDB_CODE_SUCCESS; } -static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo, - STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) { +static bool isValidFileBlockRow(SBlockData* pBlockData, int32_t rowIndex, STableBlockScanInfo* pBlockScanInfo, bool asc, + STsdbReaderInfo* pInfo) { // it is an multi-table data block if (pBlockData->aUid != NULL) { - uint64_t uid = pBlockData->aUid[pDumpInfo->rowIndex]; + uint64_t uid = pBlockData->aUid[rowIndex]; if (uid != pBlockScanInfo->uid) { // move to next row return false; } } // check for version and time range - int64_t ver = pBlockData->aVersion[pDumpInfo->rowIndex]; - if (ver > pReader->info.verRange.maxVer || ver < pReader->info.verRange.minVer) { + int64_t ver = pBlockData->aVersion[rowIndex]; + if (ver > pInfo->verRange.maxVer || ver < pInfo->verRange.minVer) { return false; } - int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex]; - if (ts > pReader->info.window.ekey || ts < pReader->info.window.skey) { + int64_t ts = pBlockData->aTSKEY[rowIndex]; + if (ts > pInfo->window.ekey || ts < pInfo->window.skey) { return false; } - if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, ts, ver, pReader->info.order, - &pReader->info.verRange)) { + if ((asc && (ts <= pBlockScanInfo->lastProcKey)) || ((!asc) && (ts >= pBlockScanInfo->lastProcKey))) { return false; } + if (pBlockScanInfo->delSkyline != NULL && TARRAY_SIZE(pBlockScanInfo->delSkyline) > 0) { + bool dropped = hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, ts, ver, + pInfo->order, &pInfo->verRange); + if (dropped) { + return false; + } + } + return true; } @@ -2112,7 +2060,7 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan }; SSttDataInfoForTable info = {.pTimeWindowList = taosArrayInit(4, sizeof(STimeWindow))}; - int32_t code = tMergeTreeOpen2(&pSttBlockReader->mergeTree, &conf, &info); + int32_t code = tMergeTreeOpen2(&pSttBlockReader->mergeTree, &conf, &info); if (code != TSDB_CODE_SUCCESS) { return false; } @@ -2130,7 +2078,7 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan pScanInfo->sttWindow.ekey = INT64_MIN; // calculate the time window for data in stt files - for(int32_t i = 0; i < taosArrayGetSize(info.pTimeWindowList); ++i) { + for (int32_t i = 0; i < taosArrayGetSize(info.pTimeWindowList); ++i) { STimeWindow* pWindow = taosArrayGet(info.pTimeWindowList, i); if (pScanInfo->sttWindow.skey > pWindow->skey) { pScanInfo->sttWindow.skey = pWindow->skey; @@ -2141,13 +2089,19 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan } } - pScanInfo->sttKeyInfo.status = taosArrayGetSize(info.pTimeWindowList)? STT_FILE_HAS_DATA:STT_FILE_NO_DATA; - pScanInfo->sttKeyInfo.nextProcKey = ASCENDING_TRAVERSE(pReader->info.order)? pScanInfo->sttWindow.skey:pScanInfo->sttWindow.ekey; + pScanInfo->sttKeyInfo.status = taosArrayGetSize(info.pTimeWindowList) ? STT_FILE_HAS_DATA : STT_FILE_NO_DATA; + pScanInfo->sttKeyInfo.nextProcKey = + ASCENDING_TRAVERSE(pReader->info.order) ? pScanInfo->sttWindow.skey : pScanInfo->sttWindow.ekey; hasData = true; - } else { + } else { // not clean stt blocks + INIT_TIMEWINDOW(&pScanInfo->sttWindow); //reset the time window + pScanInfo->sttBlockReturned = false; hasData = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, &pReader->info.verRange); } } else { + pScanInfo->cleanSttBlocks = false; + INIT_TIMEWINDOW(&pScanInfo->sttWindow); //reset the time window + pScanInfo->sttBlockReturned = false; hasData = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, &pReader->info.verRange); } @@ -2160,9 +2114,7 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan return hasData; } -static bool hasDataInSttBlock(STableBlockScanInfo *pInfo) { - return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA; -} +static bool hasDataInSttBlock(STableBlockScanInfo* pInfo) { return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA; } bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) { if ((pBlockData->nRow > 0) && (pBlockData->nRow != pDumpInfo->totalRows)) { @@ -2217,10 +2169,11 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc } } -int32_t mergeRowsInSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) { - bool copied = false; - SRow* pTSRow = NULL; - int64_t tsLastBlock = getCurrentKeyInSttBlock(pSttBlockReader); +int32_t mergeRowsInSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pBlockScanInfo, + STsdbReader* pReader) { + bool copied = false; + SRow* pTSRow = NULL; + int64_t tsLastBlock = getCurrentKeyInSttBlock(pSttBlockReader); SRowMerger* pMerger = &pReader->status.merger; TSDBROW* pRow = tMergeTreeGetRow(&pSttBlockReader->mergeTree); @@ -2358,17 +2311,14 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); SSttBlockReader* pSttBlockReader = pReader->status.fileIter.pSttBlockReader; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBrinRecord* pRecord = NULL; STableBlockScanInfo* pBlockScanInfo = NULL; if (pBlockInfo == NULL) { return 0; } - pRecord = &pBlockInfo->record; - if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pBlockInfo->uid, sizeof(pBlockInfo->uid))) { - setBlockAllDumped(pDumpInfo, pRecord->lastKey, pReader->info.order); + setBlockAllDumped(pDumpInfo, pBlockInfo->lastKey, pReader->info.order); return code; } @@ -2377,13 +2327,12 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { goto _end; } - pRecord = &pBlockInfo->record; TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader); // it is a clean block, load it directly int64_t cap = pReader->resBlockInfo.capacity; - if (isCleanFileDataBlock(pReader, pBlockInfo, pBlockScanInfo, keyInBuf) && (pRecord->numRow <= cap)) { - if (((asc && (pRecord->firstKey < keyInBuf.ts)) || (!asc && (pRecord->lastKey > keyInBuf.ts))) && + if (isCleanFileDataBlock(pReader, pBlockInfo, pBlockScanInfo, keyInBuf) && (pBlockInfo->numRow <= cap)) { + if (((asc && (pBlockInfo->firstKey < keyInBuf.ts)) || (!asc && (pBlockInfo->lastKey > keyInBuf.ts))) && (pBlockScanInfo->sttKeyInfo.status == STT_FILE_NO_DATA)) { code = copyBlockDataToSDataBlock(pReader); if (code) { @@ -2391,7 +2340,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { } // record the last key value - pBlockScanInfo->lastProcKey = asc ? pRecord->lastKey : pRecord->firstKey; + pBlockScanInfo->lastProcKey = asc ? pBlockInfo->lastKey : pBlockInfo->firstKey; goto _end; } } @@ -2404,7 +2353,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { { while (pBlockData->nRow > 0 && pBlockData->uid == pBlockScanInfo->uid) { // find the first qualified row in data block - if (isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) { + if (isValidFileBlockRow(pBlockData, pDumpInfo->rowIndex, pBlockScanInfo, asc, &pReader->info)) { hasBlockData = true; break; } @@ -2419,7 +2368,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { bool loadNeighbor = false; code = loadNeighborIfOverlap(pBlockInfo, pBlockScanInfo, pReader, &loadNeighbor); if ((!loadNeighbor) || (code != 0)) { - setBlockAllDumped(pDumpInfo, pRecord->lastKey, pReader->info.order); + setBlockAllDumped(pDumpInfo, pBlockInfo->lastKey, pReader->info.order); break; } } @@ -2438,7 +2387,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { // currently loaded file data block is consumed if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) { - setBlockAllDumped(pDumpInfo, pRecord->lastKey, pReader->info.order); + setBlockAllDumped(pDumpInfo, pBlockInfo->lastKey, pReader->info.order); break; } @@ -2551,16 +2500,16 @@ static void prepareDurationForNextFileSet(STsdbReader* pReader) { pReader->status.bProcMemFirstFileset = false; } - int32_t fid = pReader->status.pCurrentFileset->fid; + int32_t fid = pReader->status.pCurrentFileset->fid; STimeWindow winFid = {0}; tsdbFidKeyRange(fid, pReader->pTsdb->keepCfg.days, pReader->pTsdb->keepCfg.precision, &winFid.skey, &winFid.ekey); if (ASCENDING_TRAVERSE(pReader->info.order)) { pReader->status.bProcMemPreFileset = !(pReader->status.memTableMaxKey < pReader->status.prevFilesetStartKey || - (winFid.skey-1) < pReader->status.memTableMinKey); + (winFid.skey - 1) < pReader->status.memTableMinKey); } else { - pReader->status.bProcMemPreFileset = !( pReader->status.memTableMaxKey < (winFid.ekey+1) || - pReader->status.prevFilesetEndKey < pReader->status.memTableMinKey); + pReader->status.bProcMemPreFileset = !(pReader->status.memTableMaxKey < (winFid.ekey + 1) || + pReader->status.prevFilesetEndKey < pReader->status.memTableMinKey); } if (pReader->status.bProcMemPreFileset) { @@ -2794,11 +2743,11 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { static bool notOverlapWithSttFiles(SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pScanInfo, bool asc) { ASSERT(pScanInfo->sttKeyInfo.status != STT_FILE_READER_UNINIT); - if(pScanInfo->sttKeyInfo.status == STT_FILE_NO_DATA) { + if (pScanInfo->sttKeyInfo.status == STT_FILE_NO_DATA) { return true; } else { int64_t keyInStt = pScanInfo->sttKeyInfo.nextProcKey; - return (asc && pBlockInfo->record.lastKey < keyInStt) || (!asc && pBlockInfo->record.firstKey > keyInStt); + return (asc && pBlockInfo->lastKey < keyInStt) || (!asc && pBlockInfo->firstKey > keyInStt); } } @@ -2812,7 +2761,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { int32_t code = TSDB_CODE_SUCCESS; if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pBlockInfo->uid, sizeof(pBlockInfo->uid))) { - setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order); + setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->lastKey, pReader->info.order); return code; } @@ -2847,20 +2796,20 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { if (notOverlapWithSttFiles(pBlockInfo, pScanInfo, asc)) { // whole block is required, return it directly SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; - pInfo->rows = pBlockInfo->record.numRow; + pInfo->rows = pBlockInfo->numRow; pInfo->id.uid = pScanInfo->uid; pInfo->dataLoad = 0; - pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey}; + pInfo->window = (STimeWindow){.skey = pBlockInfo->firstKey, .ekey = pBlockInfo->lastKey}; setComposedBlockFlag(pReader, false); - setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order); + setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->lastKey, pReader->info.order); // update the last key for the corresponding table pScanInfo->lastProcKey = asc ? pInfo->window.ekey : pInfo->window.skey; tsdbDebug("%p uid:%" PRIu64 " clean file block retrieved from file, global index:%d, " "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", - pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow, - pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr); + pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->numRow, + pBlockInfo->firstKey, pBlockInfo->lastKey, pReader->idStr); } else { SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); @@ -2889,10 +2838,10 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { // data in stt now overlaps with current active file data block, need to composed with file data block. int64_t lastKeyInStt = getCurrentKeyInSttBlock(pSttBlockReader); - if ((lastKeyInStt >= pBlockInfo->record.firstKey && asc) || - (lastKeyInStt <= pBlockInfo->record.lastKey && (!asc))) { + if ((lastKeyInStt >= pBlockInfo->firstKey && asc) || + (lastKeyInStt <= pBlockInfo->lastKey && (!asc))) { tsdbDebug("%p lastKeyInStt:%" PRId64 ", overlap with file block, brange:%" PRId64 "-%" PRId64 " %s", pReader, - lastKeyInStt, pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr); + lastKeyInStt, pBlockInfo->firstKey, pBlockInfo->lastKey, pReader->idStr); break; } } @@ -2915,7 +2864,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { static int32_t buildBlockFromBufferSeqForPreFileset(STsdbReader* pReader, int64_t endKey) { SReaderStatus* pStatus = &pReader->status; - tsdbDebug("seq load data blocks from cache that preceeds fileset %d, %s", pReader->status.pCurrentFileset->fid, pReader->idStr); + tsdbDebug("seq load data blocks from cache that preceeds fileset %d, %s", pReader->status.pCurrentFileset->fid, + pReader->idStr); while (1) { if (pReader->code != TSDB_CODE_SUCCESS) { @@ -3008,8 +2958,8 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) lastKey = pScanInfo->lastProcKey; } - pDumpInfo->totalRows = pBlockInfo->record.numRow; - pDumpInfo->rowIndex = ASCENDING_TRAVERSE(pReader->info.order) ? 0 : pBlockInfo->record.numRow - 1; + pDumpInfo->totalRows = pBlockInfo->numRow; + pDumpInfo->rowIndex = ASCENDING_TRAVERSE(pReader->info.order) ? 0 : pBlockInfo->numRow - 1; } else { pDumpInfo->totalRows = 0; pDumpInfo->rowIndex = 0; @@ -3091,6 +3041,17 @@ static ERetrieveType doReadDataFromSttFiles(STsdbReader* pReader) { return TSDB_READ_RETURN; } + if (pReader->status.bProcMemPreFileset) { + code = buildFromPreFilesetBuffer(pReader); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + if (pResBlock->info.rows > 0) { + pReader->status.processingMemPreFileSet = true; + return TSDB_READ_RETURN; + } + } + if (pBlockIter->numOfBlocks > 0) { // there are data blocks existed. return TSDB_READ_CONTINUE; } else { // all blocks in data file are checked, let's check the data in last files @@ -3219,7 +3180,7 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_ bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order, SVersionRange* pVerRange) { - if (pDelList == NULL || (taosArrayGetSize(pDelList) == 0)) { + if (pDelList == NULL || (TARRAY_SIZE(pDelList) == 0)) { return false; } @@ -3327,16 +3288,22 @@ TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* p TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter); TSDBKEY key = TSDBROW_KEY(pRow); - + int32_t order = pReader->info.order; if (outOfTimeWindow(key.ts, &pReader->info.window)) { pIter->hasVal = false; return NULL; } // it is a valid data version - if ((key.version <= pReader->info.verRange.maxVer && key.version >= pReader->info.verRange.minVer) && - (!hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, pReader->info.order, &pReader->info.verRange))) { - return pRow; + if (key.version <= pReader->info.verRange.maxVer && key.version >= pReader->info.verRange.minVer) { + if (pDelList == NULL || TARRAY_SIZE(pDelList) == 0) { + return pRow; + } else { + bool dropped = hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, order, &pReader->info.verRange); + if (!dropped) { + return pRow; + } + } } while (1) { @@ -3353,9 +3320,15 @@ TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* p return NULL; } - if (key.version <= pReader->info.verRange.maxVer && key.version >= pReader->info.verRange.minVer && - (!hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, pReader->info.order, &pReader->info.verRange))) { - return pRow; + if (key.version <= pReader->info.verRange.maxVer && key.version >= pReader->info.verRange.minVer) { + if (pDelList == NULL || TARRAY_SIZE(pDelList) == 0) { + return pRow; + } else { + bool dropped = hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, order, &pReader->info.verRange); + if (!dropped) { + return pRow; + } + } } } } @@ -3827,7 +3800,6 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e pBlockScanInfo->lastProcKey = row.pBlockData->aTSKEY[row.iRow]; } - // no data in buffer, return immediately if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) { break; @@ -3925,7 +3897,7 @@ static int32_t doOpenReaderImpl(STsdbReader* pReader) { int32_t code = TSDB_CODE_SUCCESS; if (pStatus->fileIter.numOfFiles == 0) { pStatus->loadFromFile = false; -// } else if (READER_EXEC_DATA == pReader->info.readMode) { + // } else if (READER_EXEC_DATA == pReader->info.readMode) { // DO NOTHING } else { code = initForFirstBlockInFile(pReader, pBlockIter); @@ -4072,7 +4044,7 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi } pReader->flag = READER_STATUS_SUSPEND; - pReader->info.execMode = pCond->notLoadData? READER_EXEC_ROWS : READER_EXEC_DATA; + pReader->info.execMode = pCond->notLoadData ? READER_EXEC_ROWS : READER_EXEC_DATA; pReader->pIgnoreTables = pIgnoreTables; tsdbDebug("%p total numOfTable:%d, window:%" PRId64 " - %" PRId64 ", verRange:%" PRId64 " - %" PRId64 @@ -4137,7 +4109,7 @@ void tsdbReaderClose2(STsdbReader* pReader) { } SReadCostSummary* pCost = &pReader->cost; - SFilesetIter* pFilesetIter = &pReader->status.fileIter; + SFilesetIter* pFilesetIter = &pReader->status.fileIter; if (pFilesetIter->pSttBlockReader != NULL) { SSttBlockReader* pLReader = pFilesetIter->pSttBlockReader; tMergeTreeClose(&pLReader->mergeTree); @@ -4178,35 +4150,21 @@ void tsdbReaderClose2(STsdbReader* pReader) { taosMemoryFreeClear(pReader); } -int32_t tsdbReaderSuspend2(STsdbReader* pReader) { - // save reader's base state & reset top state to be reconstructed from base state - int32_t code = 0; - SReaderStatus* pStatus = &pReader->status; - STableBlockScanInfo* pBlockScanInfo = NULL; +static int32_t doSuspendCurrentReader(STsdbReader* pCurrentReader) { + SReaderStatus* pStatus = &pCurrentReader->status; if (pStatus->loadFromFile) { - SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); - if (pBlockInfo != NULL) { - pBlockScanInfo = getTableBlockScanInfo(pStatus->pTableMap, pBlockInfo->uid, pReader->idStr); - if (pBlockScanInfo == NULL) { - goto _err; - } - } else { - pBlockScanInfo = *pStatus->pTableIter; - } + tsdbDataFileReaderClose(&pCurrentReader->pFileReader); - tsdbDataFileReaderClose(&pReader->pFileReader); - - SReadCostSummary* pCost = &pReader->cost; - pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &pCost->sttCost); - pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES); + SReadCostSummary* pCost = &pCurrentReader->cost; + pStatus->pLDataIterArray = destroySttBlockReader(pStatus->pLDataIterArray, &pCost->sttCost); + pStatus->pLDataIterArray = taosArrayInit(4, POINTER_BYTES); } // resetDataBlockScanInfo excluding lastKey STableBlockScanInfo** p = NULL; - int32_t step = ASCENDING_TRAVERSE(pReader->info.order)? 1:-1; - + int32_t step = ASCENDING_TRAVERSE(pCurrentReader->info.order) ? 1 : -1; int32_t iter = 0; while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) { STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p; @@ -4217,6 +4175,26 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { pStatus->uidList.currentIndex = 0; initReaderStatus(pStatus); + return TSDB_CODE_SUCCESS; +} + +int32_t tsdbReaderSuspend2(STsdbReader* pReader) { + // save reader's base state & reset top state to be reconstructed from base state + int32_t code = 0; + pReader->status.suspendInvoked = true; // record the suspend status + + if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { + if (pReader->step == EXTERNAL_ROWS_PREV) { + doSuspendCurrentReader(pReader->innerReader[0]); + } else if (pReader->step == EXTERNAL_ROWS_MAIN) { + doSuspendCurrentReader(pReader); + } else { + doSuspendCurrentReader(pReader->innerReader[1]); + } + } else { + doSuspendCurrentReader(pReader); + } + tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); pReader->pReadSnap = NULL; if (pReader->bFilesetDelimited) { @@ -4225,16 +4203,16 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } pReader->flag = READER_STATUS_SUSPEND; + if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { + clearSharedPtr(pReader->innerReader[0]); + clearSharedPtr(pReader->innerReader[1]); + } + #if SUSPEND_RESUME_TEST - tsem_post(&pReader->resumeAfterSuspend); + tsem_post(&pReader->resumeAfterSuspend); #endif - tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo ? pBlockScanInfo->uid : 0, - pReader->idStr); - return code; - -_err: - tsdbError("failed to suspend data reader, code:%s %s", tstrerror(code), pReader->idStr); + tsdbDebug("reader: %p suspended in this query %s, step:%d", pReader, pReader->idStr, pReader->step); return code; } @@ -4265,8 +4243,7 @@ int32_t tsdbReaderResume2(STsdbReader* pReader) { int32_t code = 0; STableBlockScanInfo** pBlockScanInfo = pReader->status.pTableIter; - // restore reader's state - // task snapshot + // restore reader's state, task snapshot int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap); if (numOfTables > 0) { qTrace("tsdb/reader: %p, take snapshot", pReader); @@ -4291,7 +4268,14 @@ int32_t tsdbReaderResume2(STsdbReader* pReader) { pNextReader->resBlockInfo.capacity = 1; setSharedPtr(pNextReader, pReader); - code = doOpenReaderImpl(pPrevReader); + if (pReader->step == 0 || pReader->step == EXTERNAL_ROWS_PREV) { + code = doOpenReaderImpl(pPrevReader); + } else if (pReader->step == EXTERNAL_ROWS_MAIN) { + code = doOpenReaderImpl(pReader); + } else { + code = doOpenReaderImpl(pNextReader); + } + if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4309,13 +4293,13 @@ _err: } static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SReaderStatus* pStatus = &pReader->status; SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock; - int32_t fid = pReader->status.pCurrentFileset->fid; - STimeWindow win = {0}; + int32_t fid = pReader->status.pCurrentFileset->fid; + STimeWindow win = {0}; tsdbFidKeyRange(fid, pReader->pTsdb->keepCfg.days, pReader->pTsdb->keepCfg.precision, &win.skey, &win.ekey); int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? win.skey : win.ekey; @@ -4325,6 +4309,7 @@ static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader) { } else { tsdbDebug("finished pre-fileset %d buffer processing. %s", fid, pReader->idStr); pStatus->bProcMemPreFileset = false; + pStatus->processingMemPreFileSet = false; if (pReader->notifyFn) { STsdReaderNotifyInfo info = {0}; info.duration.filesetId = fid; @@ -4337,8 +4322,8 @@ static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader) { static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; - int32_t code = TSDB_CODE_SUCCESS; - SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock; + int32_t code = TSDB_CODE_SUCCESS; + SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock; if (pStatus->loadFromFile) { if (pStatus->bProcMemPreFileset) { @@ -4353,12 +4338,12 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { return code; } - tsdbTrace("block from file rows: %"PRId64", will process pre-file set buffer: %d. %s", - pBlock->info.rows, pStatus->bProcMemFirstFileset, pReader->idStr); + tsdbTrace("block from file rows: %" PRId64 ", will process pre-file set buffer: %d. %s", pBlock->info.rows, + pStatus->bProcMemFirstFileset, pReader->idStr); if (pStatus->bProcMemPreFileset) { if (pBlock->info.rows > 0) { - if (pReader->notifyFn) { - int32_t fid = pReader->status.pCurrentFileset->fid; + if (pReader->notifyFn && !pReader->status.processingMemPreFileSet) { + int32_t fid = pReader->status.pCurrentFileset->fid; STsdReaderNotifyInfo info = {0}; info.duration.filesetId = fid; pReader->notifyFn(TSD_READER_NOTIFY_NEXT_DURATION_BLOCK, &info, pReader->notifyParam); @@ -4367,7 +4352,7 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { pStatus->bProcMemPreFileset = false; } } - + if (pBlock->info.rows <= 0) { resetTableListIndex(&pReader->status); int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN; @@ -4382,8 +4367,8 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { static int32_t doTsdbNextDataBlockFilesFirst(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; - int32_t code = TSDB_CODE_SUCCESS; - SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock; + int32_t code = TSDB_CODE_SUCCESS; + SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock; if (pStatus->loadFromFile) { code = buildBlockFromFiles(pReader); @@ -4615,7 +4600,9 @@ int32_t tsdbRetrieveDatablockSMA2(STsdbReader* pReader, SSDataBlock* pDataBlock, // int64_t st = taosGetTimestampUs(); TARRAY2_CLEAR(&pSup->colAggArray, 0); - code = tsdbDataFileReadBlockSma(pReader->pFileReader, &pFBlock->record, &pSup->colAggArray); + SBrinRecord pRecord; + blockInfoToRecord(&pRecord, pFBlock); + code = tsdbDataFileReadBlockSma(pReader->pFileReader, &pRecord, &pSup->colAggArray); if (code != TSDB_CODE_SUCCESS) { tsdbDebug("vgId:%d, failed to load block SMA for uid %" PRIu64 ", code:%s, %s", 0, pFBlock->uid, tstrerror(code), pReader->idStr); @@ -4646,7 +4633,7 @@ int32_t tsdbRetrieveDatablockSMA2(STsdbReader* pReader, SSDataBlock* pDataBlock, } // do fill all null column value SMA info - doFillNullColSMA(pSup, pFBlock->record.numRow, numOfCols, pTsAgg); + doFillNullColSMA(pSup, pFBlock->numRow, numOfCols, pTsAgg); size_t size = pSup->colAggArray.size; @@ -4867,7 +4854,7 @@ int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pT while (true) { if (hasNext) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); - int32_t numOfRows = pBlockInfo->record.numRow; + int32_t numOfRows = pBlockInfo->numRow; pTableBlockInfo->totalRows += numOfRows; @@ -4879,7 +4866,7 @@ int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pT pTableBlockInfo->minRows = numOfRows; } - pTableBlockInfo->totalSize += pBlockInfo->record.blockSize; + pTableBlockInfo->totalSize += pBlockInfo->blockSize; int32_t bucketIndex = getBucketIndex(pTableBlockInfo->defMinRows, bucketRange, numOfRows, numOfBuckets); pTableBlockInfo->blockRowsHisto[bucketIndex]++; @@ -4918,7 +4905,7 @@ static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t int64_t minKey = INT64_MAX; void* pHashIter = tSimpleHashIterate(pStatus->pTableMap, NULL, &iter); - while (pHashIter!= NULL) { + while (pHashIter != NULL) { STableBlockScanInfo* pBlockScanInfo = *(STableBlockScanInfo**)pHashIter; STbData* d = NULL; @@ -5122,7 +5109,6 @@ void tsdbUntakeReadSnap2(STsdbReader* pReader, STsdbReadSnap* pSnap, bool proact tsdbUnrefMemTable(pSnap->pIMem, pSnap->pINode, proactive); } - tsdbFSUnref(pTsdb, &pSnap->fs); if (pSnap->pNode) taosMemoryFree(pSnap->pNode); if (pSnap->pINode) taosMemoryFree(pSnap->pINode); @@ -5143,9 +5129,7 @@ void tsdbReaderSetId2(STsdbReader* pReader, const char* idstr) { void tsdbReaderSetCloseFlag(STsdbReader* pReader) { /*pReader->code = TSDB_CODE_TSC_QUERY_CANCELLED;*/ } -void tsdbSetFilesetDelimited(STsdbReader* pReader) { - pReader->bFilesetDelimited = true; -} +void tsdbSetFilesetDelimited(STsdbReader* pReader) { pReader->bFilesetDelimited = true; } void tsdbReaderSetNotifyCb(STsdbReader* pReader, TsdReaderNotifyCbFn notifyFn, void* param) { pReader->notifyFn = notifyFn; diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c index a223a2dc2d..33604d21de 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c @@ -22,9 +22,8 @@ #include "tsdbUtil2.h" #include "tsimplehash.h" -#define INIT_TIMEWINDOW(_w) do { (_w)->skey = INT64_MAX; (_w)->ekey = INT64_MIN;} while(0); - -static bool overlapWithDelSkylineWithoutVer(STableBlockScanInfo* pBlockScanInfo, const SBrinRecord* pRecord, int32_t order); +static bool overlapWithDelSkylineWithoutVer(STableBlockScanInfo* pBlockScanInfo, const SBrinRecord* pRecord, + int32_t order); static int32_t initBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables) { int32_t num = numOfTables / pBuf->numPerBucket; @@ -160,6 +159,9 @@ SSHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf INIT_TIMEWINDOW(&pScanInfo->sttWindow); INIT_TIMEWINDOW(&pScanInfo->filesetWindow); + pScanInfo->cleanSttBlocks = false; + pScanInfo->sttBlockReturned = false; + pUidList->tableUidList[j] = idList[j].uid; if (ASCENDING_TRAVERSE(pTsdbReader->info.order)) { @@ -354,6 +356,21 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v return pLeftBlock->offset > pRightBlock->offset ? 1 : -1; } +static void recordToBlockInfo(SFileDataBlockInfo* pBlockInfo, SBrinRecord* record){ + pBlockInfo->uid = record->uid; + pBlockInfo->firstKey = record->firstKey; + pBlockInfo->lastKey = record->lastKey; + pBlockInfo->minVer = record->minVer; + pBlockInfo->maxVer = record->maxVer; + pBlockInfo->blockOffset = record->blockOffset; + pBlockInfo->smaOffset = record->smaOffset; + pBlockInfo->blockSize = record->blockSize; + pBlockInfo->blockKeySize = record->blockKeySize; + pBlockInfo->smaSize = record->smaSize; + pBlockInfo->numRow = record->numRow; + pBlockInfo->count = record->count; +} + int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t numOfBlocks, SArray* pTableList) { bool asc = ASCENDING_TRAVERSE(pReader->info.order); @@ -411,8 +428,9 @@ int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int3 pTableScanInfo->pBlockIdxList = taosArrayInit(numOfBlocks, sizeof(STableDataBlockIdx)); } for (int32_t i = 0; i < numOfBlocks; ++i) { - SFileDataBlockInfo blockInfo = {.uid = sup.pDataBlockInfo[0][i].uid, .tbBlockIdx = i}; - blockInfo.record = *(SBrinRecord*)taosArrayGet(sup.pDataBlockInfo[0][i].pInfo->pBlockList, i); + SFileDataBlockInfo blockInfo = {.tbBlockIdx = i}; + SBrinRecord* record = (SBrinRecord*)taosArrayGet(sup.pDataBlockInfo[0][i].pInfo->pBlockList, i); + recordToBlockInfo(&blockInfo, record); taosArrayPush(pBlockIter->blockList, &blockInfo); STableDataBlockIdx tableDataBlockIdx = {.globalIndex = i}; @@ -445,11 +463,12 @@ int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int3 int32_t pos = tMergeTreeGetChosenIndex(pTree); int32_t index = sup.indexPerTable[pos]++; - SFileDataBlockInfo blockInfo = {.uid = sup.pDataBlockInfo[pos][index].uid, .tbBlockIdx = index}; - blockInfo.record = *(SBrinRecord*)taosArrayGet(sup.pDataBlockInfo[pos][index].pInfo->pBlockList, index); + SFileDataBlockInfo blockInfo = {.tbBlockIdx = index}; + SBrinRecord* record = (SBrinRecord*)taosArrayGet(sup.pDataBlockInfo[pos][index].pInfo->pBlockList, index); + recordToBlockInfo(&blockInfo, record); taosArrayPush(pBlockIter->blockList, &blockInfo); - STableBlockScanInfo *pTableScanInfo = sup.pDataBlockInfo[pos][index].pInfo; + STableBlockScanInfo* pTableScanInfo = sup.pDataBlockInfo[pos][index].pInfo; if (pTableScanInfo->pBlockIdxList == NULL) { size_t szTableDataBlocks = taosArrayGetSize(pTableScanInfo->pBlockList); pTableScanInfo->pBlockIdxList = taosArrayInit(szTableDataBlocks, sizeof(STableDataBlockIdx)); @@ -504,7 +523,7 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_ int32_t code = 0; STombRecord record = {0}; - uint64_t uid = pReader->status.uidList.tableUidList[*j]; + uint64_t uid = pReader->status.uidList.tableUidList[*j]; STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr); if (pScanInfo->pFileDelData == NULL) { pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData)); @@ -595,12 +614,12 @@ static int32_t doLoadTombDataFromTombBlk(const TTombBlkArray* pTombBlkArray, STs return code; } -// uint64_t uid = pReader->status.uidList.tableUidList[j]; + // uint64_t uid = pReader->status.uidList.tableUidList[j]; -// STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr); -// if (pScanInfo->pFileDelData == NULL) { -// pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData)); -// } + // STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr); + // if (pScanInfo->pFileDelData == NULL) { + // pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData)); + // } ETombBlkCheckEnum ret = 0; code = doCheckTombBlock(&block, pReader, numOfTables, &j, &ret); @@ -650,6 +669,7 @@ void loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piMemT SDelData* p = NULL; if (pMemTbData != NULL) { + taosRLockLatch(&pMemTbData->lock); p = pMemTbData->pHead; while (p) { if (p->version <= ver) { @@ -658,6 +678,7 @@ void loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piMemT p = p->pNext; } + taosRUnLockLatch(&pMemTbData->lock); } if (piMemTbData != NULL) { @@ -681,7 +702,7 @@ int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo } int32_t i = 0; - while((i < TARRAY2_SIZE(pStatisBlkArray)) && (pStatisBlkArray->data[i].maxTbid.suid < suid)) { + while ((i < TARRAY2_SIZE(pStatisBlkArray)) && (pStatisBlkArray->data[i].maxTbid.suid < suid)) { ++i; } @@ -689,7 +710,7 @@ int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo return 0; } - SStatisBlk *p = &pStatisBlkArray->data[i]; + SStatisBlk* p = &pStatisBlkArray->data[i]; STbStatisBlock* pStatisBlock = taosMemoryCalloc(1, sizeof(STbStatisBlock)); tStatisBlockInit(pStatisBlock); @@ -759,14 +780,14 @@ void doAdjustValidDataIters(SArray* pLDIterList, int32_t numOfFileObj) { if (size < numOfFileObj) { int32_t inc = numOfFileObj - size; for (int32_t k = 0; k < inc; ++k) { - SLDataIter *pIter = taosMemoryCalloc(1, sizeof(SLDataIter)); + SLDataIter* pIter = taosMemoryCalloc(1, sizeof(SLDataIter)); taosArrayPush(pLDIterList, &pIter); } } else if (size > numOfFileObj) { // remove unused LDataIter int32_t inc = size - numOfFileObj; for (int i = 0; i < inc; ++i) { - SLDataIter *pIter = taosArrayPop(pLDIterList); + SLDataIter* pIter = taosArrayPop(pLDIterList); destroyLDataIter(pIter); } } @@ -781,9 +802,9 @@ int32_t adjustSttDataIters(SArray* pSttFileBlockIterArray, STFileSet* pFileSet) taosArrayPush(pSttFileBlockIterArray, &pList); } - for(int32_t j = 0; j < numOfLevels; ++j) { + for (int32_t j = 0; j < numOfLevels; ++j) { SSttLvl* pSttLevel = pFileSet->lvlArr->data[j]; - SArray* pList = taosArrayGetP(pSttFileBlockIterArray, j); + SArray* pList = taosArrayGetP(pSttFileBlockIterArray, j); doAdjustValidDataIters(pList, TARRAY2_SIZE(pSttLevel->fobjArr)); } @@ -829,8 +850,8 @@ int32_t tsdbGetRowsInSttFiles(STFileSet* pFileSet, SArray* pSttFileBlockIterArra } // load stt blocks statis for all stt-blocks, to decide if the data of queried table exists in current stt file - TStatisBlkArray *pStatisBlkArray = NULL; - int32_t code = tsdbSttFileReadStatisBlk(pIter->pReader, (const TStatisBlkArray **)&pStatisBlkArray); + TStatisBlkArray* pStatisBlkArray = NULL; + int32_t code = tsdbSttFileReadStatisBlk(pIter->pReader, (const TStatisBlkArray**)&pStatisBlkArray); if (code != TSDB_CODE_SUCCESS) { tsdbError("failed to load stt block statistics, code:%s, %s", tstrerror(code), pstr); continue; @@ -899,11 +920,12 @@ static int32_t sortUidComparFn(const void* p1, const void* p2) { if (px1->skey == px2->skey) { return 0; } else { - return px1->skey < px2->skey? -1:1; + return px1->skey < px2->skey ? -1 : 1; } } -bool isCleanSttBlock(SArray* pTimewindowList, STimeWindow* pQueryWindow, STableBlockScanInfo *pScanInfo, int32_t order) { +bool isCleanSttBlock(SArray* pTimewindowList, STimeWindow* pQueryWindow, STableBlockScanInfo* pScanInfo, + int32_t order) { // check if it overlap with del skyline taosArraySort(pTimewindowList, sortUidComparFn); @@ -935,7 +957,7 @@ bool isCleanSttBlock(SArray* pTimewindowList, STimeWindow* pQueryWindow, STableB } static bool doCheckDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBrinRecord* pRecord, - int32_t startIndex) { + int32_t startIndex) { size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline); for (int32_t i = startIndex; i < num; i += 1) { diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h index 3679015e9c..16ee90823b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h @@ -26,6 +26,12 @@ extern "C" { #define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) +#define INIT_TIMEWINDOW(_w) \ + do { \ + (_w)->skey = INT64_MAX; \ + (_w)->ekey = INT64_MIN; \ + } while (0); + typedef enum { READER_STATUS_SUSPEND = 0x1, READER_STATUS_NORMAL = 0x2, @@ -175,9 +181,22 @@ typedef struct SFilesetIter { typedef struct SFileDataBlockInfo { // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it - uint64_t uid; - int32_t tbBlockIdx; - SBrinRecord record; + // int64_t suid; + int64_t uid; + int64_t firstKey; +// int64_t firstKeyVer; + int64_t lastKey; +// int64_t lastKeyVer; + int64_t minVer; + int64_t maxVer; + int64_t blockOffset; + int64_t smaOffset; + int32_t blockSize; + int32_t blockKeySize; + int32_t smaSize; + int32_t numRow; + int32_t count; + int32_t tbBlockIdx; } SFileDataBlockInfo; typedef struct SDataBlockIter { @@ -197,6 +216,7 @@ typedef struct SFileBlockDumpInfo { } SFileBlockDumpInfo; typedef struct SReaderStatus { + bool suspendInvoked; bool loadFromFile; // check file stage bool composedDataBlock; // the returned data block is a composed block or not SSHashObj* pTableMap; // SHash @@ -210,7 +230,7 @@ typedef struct SReaderStatus { SArray* pLDataIterArray; SRowMerger merger; SColumnInfoData* pPrimaryTsCol; // primary time stamp output col info data - // the following for preceeds fileset memory processing + // the following for preceeds fileset memory processing // TODO: refactor into seperate struct bool bProcMemPreFileset; int64_t memTableMaxKey; @@ -218,6 +238,7 @@ typedef struct SReaderStatus { int64_t prevFilesetStartKey; int64_t prevFilesetEndKey; bool bProcMemFirstFileset; + bool processingMemPreFileSet; STableUidList procMemUidList; STableBlockScanInfo** pProcMemTableIter; } SReaderStatus; diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index e6f419362c..babf8c75fb 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -433,635 +433,6 @@ _exit: return code; } -// SDataFWriter ==================================================== -int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet) { - int32_t code = 0; - int32_t flag; - int64_t n; - int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; - SDataFWriter *pWriter = NULL; - char fname[TSDB_FILENAME_LEN]; - char hdr[TSDB_FHDR_SIZE] = {0}; - - // alloc - pWriter = taosMemoryCalloc(1, sizeof(*pWriter)); - if (pWriter == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - pWriter->pTsdb = pTsdb; - pWriter->wSet = (SDFileSet){.diskId = pSet->diskId, - .fid = pSet->fid, - .pHeadF = &pWriter->fHead, - .pDataF = &pWriter->fData, - .pSmaF = &pWriter->fSma, - .nSttF = pSet->nSttF}; - pWriter->fHead = *pSet->pHeadF; - pWriter->fData = *pSet->pDataF; - pWriter->fSma = *pSet->pSmaF; - for (int8_t iStt = 0; iStt < pSet->nSttF; iStt++) { - pWriter->wSet.aSttF[iStt] = &pWriter->fStt[iStt]; - pWriter->fStt[iStt] = *pSet->aSttF[iStt]; - } - - // head - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pHeadFD); - if (code) goto _err; - - code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - pWriter->fHead.size += TSDB_FHDR_SIZE; - - // data - if (pWriter->fData.size == 0) { - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - } else { - flag = TD_FILE_READ | TD_FILE_WRITE; - } - tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pDataFD); - if (code) goto _err; - if (pWriter->fData.size == 0) { - code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - pWriter->fData.size += TSDB_FHDR_SIZE; - } - - // sma - if (pWriter->fSma.size == 0) { - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - } else { - flag = TD_FILE_READ | TD_FILE_WRITE; - } - tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pSmaFD); - if (code) goto _err; - if (pWriter->fSma.size == 0) { - code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - pWriter->fSma.size += TSDB_FHDR_SIZE; - } - - // stt - ASSERT(pWriter->fStt[pSet->nSttF - 1].size == 0); - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - tsdbSttFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fStt[pSet->nSttF - 1], fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pSttFD); - if (code) goto _err; - code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - pWriter->fStt[pWriter->wSet.nSttF - 1].size += TSDB_FHDR_SIZE; - - *ppWriter = pWriter; - return code; - -_err: - tsdbError("vgId:%d, tsdb data file writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - *ppWriter = NULL; - return code; -} - -int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) { - int32_t code = 0; - STsdb *pTsdb = NULL; - - if (*ppWriter == NULL) goto _exit; - - pTsdb = (*ppWriter)->pTsdb; - if (sync) { - code = tsdbFsyncFile((*ppWriter)->pHeadFD); - if (code) goto _err; - - code = tsdbFsyncFile((*ppWriter)->pDataFD); - if (code) goto _err; - - code = tsdbFsyncFile((*ppWriter)->pSmaFD); - if (code) goto _err; - - code = tsdbFsyncFile((*ppWriter)->pSttFD); - if (code) goto _err; - } - - tsdbCloseFile(&(*ppWriter)->pHeadFD); - tsdbCloseFile(&(*ppWriter)->pDataFD); - tsdbCloseFile(&(*ppWriter)->pSmaFD); - tsdbCloseFile(&(*ppWriter)->pSttFD); - - for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) { - tFree((*ppWriter)->aBuf[iBuf]); - } - taosMemoryFree(*ppWriter); -_exit: - *ppWriter = NULL; - return code; - -_err: - tsdbError("vgId:%d, data file writer close failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter) { - int32_t code = 0; - int64_t n; - char hdr[TSDB_FHDR_SIZE]; - - // head ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutHeadFile(hdr, &pWriter->fHead); - code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - // data ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutDataFile(hdr, &pWriter->fData); - code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - // sma ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutSmaFile(hdr, &pWriter->fSma); - code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - // stt ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutSttFile(hdr, &pWriter->fStt[pWriter->wSet.nSttF - 1]); - code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - return code; - -_err: - tsdbError("vgId:%d, update DFileSet header failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) { - int32_t code = 0; - SHeadFile *pHeadFile = &pWriter->fHead; - int64_t size; - int64_t n; - - // check - if (taosArrayGetSize(aBlockIdx) == 0) { - pHeadFile->offset = pHeadFile->size; - goto _exit; - } - - // prepare - size = 0; - for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) { - size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = 0; - for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) { - n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx)); - } - ASSERT(n == size); - - // write - code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pHeadFile->offset = pHeadFile->size; - pHeadFile->size += size; - -_exit: - // tsdbTrace("vgId:%d, write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d", - // TD_VID(pWriter->pTsdb->pVnode), - // pHeadFile->offset, size, taosArrayGetSize(aBlockIdx)); - return code; - -_err: - tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx) { - int32_t code = 0; - SHeadFile *pHeadFile = &pWriter->fHead; - int64_t size; - int64_t n; - - ASSERT(mDataBlk->nItem > 0); - - // alloc - size = tPutMapData(NULL, mDataBlk); - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = tPutMapData(pWriter->aBuf[0], mDataBlk); - - // write - code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pBlockIdx->offset = pHeadFile->size; - pBlockIdx->size = size; - pHeadFile->size += size; - - tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%" PRId64 " suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64 - " size:%" PRId64 " nItem:%d", - TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid, - pBlockIdx->offset, pBlockIdx->size, mDataBlk->nItem); - return code; - -_err: - tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk) { - int32_t code = 0; - SSttFile *pSttFile = &pWriter->fStt[pWriter->wSet.nSttF - 1]; - int64_t size = 0; - int64_t n; - - // check - if (taosArrayGetSize(aSttBlk) == 0) { - pSttFile->offset = pSttFile->size; - goto _exit; - } - - // size - size = 0; - for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) { - size += tPutSttBlk(NULL, taosArrayGet(aSttBlk, iBlockL)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // encode - n = 0; - for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) { - n += tPutSttBlk(pWriter->aBuf[0] + n, taosArrayGet(aSttBlk, iBlockL)); - } - - // write - code = tsdbWriteFile(pWriter->pSttFD, pSttFile->size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pSttFile->offset = pSttFile->size; - pSttFile->size += size; - -_exit: - tsdbTrace("vgId:%d, tsdb write stt block, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), - pSttFile->offset, size); - return code; - -_err: - tsdbError("vgId:%d, tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) { - int32_t code = 0; - - pSmaInfo->offset = 0; - pSmaInfo->size = 0; - - // encode - for (int32_t iColData = 0; iColData < pBlockData->nColData; iColData++) { - SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - - if ((!pColData->smaOn) || ((pColData->flag & HAS_VALUE) == 0)) continue; - - SColumnDataAgg sma = {.colId = pColData->cid}; - tColDataCalcSMA[pColData->type](pColData, &sma.sum, &sma.max, &sma.min, &sma.numOfNull); - - code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma)); - if (code) goto _err; - pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma); - } - - // write - if (pSmaInfo->size) { - code = tsdbWriteFile(pWriter->pSmaFD, pWriter->fSma.size, pWriter->aBuf[0], pSmaInfo->size); - if (code) goto _err; - - pSmaInfo->offset = pWriter->fSma.size; - pWriter->fSma.size += pSmaInfo->size; - } - - return code; - -_err: - tsdbError("vgId:%d, tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo, - int8_t cmprAlg, int8_t toLast) { - int32_t code = 0; - - ASSERT(pBlockData->nRow > 0); - - if (toLast) { - pBlkInfo->offset = pWriter->fStt[pWriter->wSet.nSttF - 1].size; - } else { - pBlkInfo->offset = pWriter->fData.size; - } - pBlkInfo->szBlock = 0; - pBlkInfo->szKey = 0; - - int32_t aBufN[4] = {0}; - code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN); - if (code) goto _err; - - // write ================= - STsdbFD *pFD = toLast ? pWriter->pSttFD : pWriter->pDataFD; - - pBlkInfo->szKey = aBufN[3] + aBufN[2]; - pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3]; - - int64_t offset = pBlkInfo->offset; - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[3], aBufN[3]); - if (code) goto _err; - offset += aBufN[3]; - - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[2], aBufN[2]); - if (code) goto _err; - offset += aBufN[2]; - - if (aBufN[1]) { - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[1], aBufN[1]); - if (code) goto _err; - offset += aBufN[1]; - } - - if (aBufN[0]) { - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[0], aBufN[0]); - if (code) goto _err; - } - - // update info - if (toLast) { - pWriter->fStt[pWriter->wSet.nSttF - 1].size += pBlkInfo->szBlock; - } else { - pWriter->fData.size += pBlkInfo->szBlock; - } - - // ================= SMA ==================== - if (pSmaInfo) { - code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo); - if (code) goto _err; - } - -_exit: - tsdbTrace("vgId:%d, tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d", - TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset, - pBlkInfo->szBlock); - return code; - -_err: - tsdbError("vgId:%d, tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDiskData(SDataFWriter *pWriter, const SDiskData *pDiskData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo) { - int32_t code = 0; - int32_t lino = 0; - - STsdbFD *pFD = NULL; - if (pSmaInfo) { - pFD = pWriter->pDataFD; - pBlkInfo->offset = pWriter->fData.size; - } else { - pFD = pWriter->pSttFD; - pBlkInfo->offset = pWriter->fStt[pWriter->wSet.nSttF - 1].size; - } - pBlkInfo->szBlock = 0; - pBlkInfo->szKey = 0; - - // hdr - int32_t n = tPutDiskDataHdr(NULL, &pDiskData->hdr); - code = tRealloc(&pWriter->aBuf[0], n); - TSDB_CHECK_CODE(code, lino, _exit); - - tPutDiskDataHdr(pWriter->aBuf[0], &pDiskData->hdr); - - code = tsdbWriteFile(pFD, pBlkInfo->offset, pWriter->aBuf[0], n); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += n; - pBlkInfo->szBlock += n; - - // uid + ver + key - if (pDiskData->pUid) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskData->pUid, pDiskData->hdr.szUid); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += pDiskData->hdr.szUid; - pBlkInfo->szBlock += pDiskData->hdr.szUid; - } - - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskData->pVer, pDiskData->hdr.szVer); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += pDiskData->hdr.szVer; - pBlkInfo->szBlock += pDiskData->hdr.szVer; - - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskData->pKey, pDiskData->hdr.szKey); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += pDiskData->hdr.szKey; - pBlkInfo->szBlock += pDiskData->hdr.szKey; - - // aBlockCol - if (pDiskData->hdr.szBlkCol) { - code = tRealloc(&pWriter->aBuf[0], pDiskData->hdr.szBlkCol); - TSDB_CHECK_CODE(code, lino, _exit); - - n = 0; - for (int32_t iDiskCol = 0; iDiskCol < taosArrayGetSize(pDiskData->aDiskCol); iDiskCol++) { - SDiskCol *pDiskCol = (SDiskCol *)taosArrayGet(pDiskData->aDiskCol, iDiskCol); - n += tPutBlockCol(pWriter->aBuf[0] + n, pDiskCol); - } - ASSERT(n == pDiskData->hdr.szBlkCol); - - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pWriter->aBuf[0], pDiskData->hdr.szBlkCol); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskData->hdr.szBlkCol; - } - - // aDiskCol - for (int32_t iDiskCol = 0; iDiskCol < taosArrayGetSize(pDiskData->aDiskCol); iDiskCol++) { - SDiskCol *pDiskCol = (SDiskCol *)taosArrayGet(pDiskData->aDiskCol, iDiskCol); - - if (pDiskCol->pBit) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskCol->pBit, pDiskCol->bCol.szBitmap); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskCol->bCol.szBitmap; - } - - if (pDiskCol->pOff) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskCol->pOff, pDiskCol->bCol.szOffset); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskCol->bCol.szOffset; - } - - if (pDiskCol->pVal) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskCol->pVal, pDiskCol->bCol.szValue); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskCol->bCol.szValue; - } - } - - if (pSmaInfo) { - pWriter->fData.size += pBlkInfo->szBlock; - } else { - pWriter->fStt[pWriter->wSet.nSttF - 1].size += pBlkInfo->szBlock; - goto _exit; - } - - pSmaInfo->offset = 0; - pSmaInfo->size = 0; - for (int32_t iDiskCol = 0; iDiskCol < taosArrayGetSize(pDiskData->aDiskCol); iDiskCol++) { - SDiskCol *pDiskCol = (SDiskCol *)taosArrayGet(pDiskData->aDiskCol, iDiskCol); - - if (IS_VAR_DATA_TYPE(pDiskCol->bCol.type)) continue; - if (pDiskCol->bCol.flag == HAS_NULL || pDiskCol->bCol.flag == (HAS_NULL | HAS_NONE)) continue; - if (!pDiskCol->bCol.smaOn) continue; - - code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &pDiskCol->agg)); - TSDB_CHECK_CODE(code, lino, _exit); - pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &pDiskCol->agg); - } - - if (pSmaInfo->size) { - pSmaInfo->offset = pWriter->fSma.size; - - code = tsdbWriteFile(pWriter->pSmaFD, pSmaInfo->offset, pWriter->aBuf[0], pSmaInfo->size); - TSDB_CHECK_CODE(code, lino, _exit); - - pWriter->fSma.size += pSmaInfo->size; - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) { - int32_t code = 0; - int64_t n; - int64_t size; - TdFilePtr pOutFD = NULL; - TdFilePtr PInFD = NULL; - int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; - char fNameFrom[TSDB_FILENAME_LEN]; - char fNameTo[TSDB_FILENAME_LEN]; - - // head - tsdbHeadFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pHeadF, fNameFrom); - tsdbHeadFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pHeadF, fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pHeadF->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - - // data - tsdbDataFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pDataF, fNameFrom); - tsdbDataFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pDataF, fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pDataF->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - - // sma - tsdbSmaFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pSmaF, fNameFrom); - tsdbSmaFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pSmaF, fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pSmaF->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - - // stt - for (int8_t iStt = 0; iStt < pSetFrom->nSttF; iStt++) { - tsdbSttFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->aSttF[iStt], fNameFrom); - tsdbSttFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->aSttF[iStt], fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->aSttF[iStt]->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - } - - return code; - -_err: - tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - return code; -} - // SDataFReader ==================================================== int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) { int32_t code = 0; @@ -1478,173 +849,6 @@ _exit: return code; } -// SDelFWriter ==================================================== -int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) { - int32_t code = 0; - int32_t lino = 0; - char fname[TSDB_FILENAME_LEN]; - uint8_t hdr[TSDB_FHDR_SIZE] = {0}; - SDelFWriter *pDelFWriter = NULL; - int64_t n; - - // alloc - pDelFWriter = (SDelFWriter *)taosMemoryCalloc(1, sizeof(*pDelFWriter)); - if (pDelFWriter == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - pDelFWriter->pTsdb = pTsdb; - pDelFWriter->fDel = *pFile; - - tsdbDelFileName(pTsdb, pFile, fname); - code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE, &pDelFWriter->pWriteH); - TSDB_CHECK_CODE(code, lino, _exit); - - // update header - code = tsdbWriteFile(pDelFWriter->pWriteH, 0, hdr, TSDB_FHDR_SIZE); - TSDB_CHECK_CODE(code, lino, _exit); - - pDelFWriter->fDel.size = TSDB_FHDR_SIZE; - pDelFWriter->fDel.offset = 0; - - *ppWriter = pDelFWriter; - -_exit: - if (code) { - if (pDelFWriter) { - tsdbCloseFile(&pDelFWriter->pWriteH); - taosMemoryFree(pDelFWriter); - } - *ppWriter = NULL; - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(errno)); - } else { - *ppWriter = pDelFWriter; - } - return code; -} - -int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) { - int32_t code = 0; - SDelFWriter *pWriter = *ppWriter; - STsdb *pTsdb = pWriter->pTsdb; - - // sync - if (sync) { - code = tsdbFsyncFile(pWriter->pWriteH); - if (code) goto _err; - } - - // close - tsdbCloseFile(&pWriter->pWriteH); - - for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) { - tFree(pWriter->aBuf[iBuf]); - } - taosMemoryFree(pWriter); - - *ppWriter = NULL; - return code; - -_err: - tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) { - int32_t code = 0; - int64_t size; - int64_t n; - - // prepare - size = 0; - for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) { - size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = 0; - for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) { - n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData)); - } - ASSERT(n == size); - - // write - code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pDelIdx->offset = pWriter->fDel.size; - pDelIdx->size = size; - pWriter->fDel.size += size; - - return code; - -_err: - tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) { - int32_t code = 0; - int64_t size; - int64_t n; - SDelIdx *pDelIdx; - - // prepare - size = 0; - for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) { - size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = 0; - for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) { - n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx)); - } - ASSERT(n == size); - - // write - code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pWriter->fDel.offset = pWriter->fDel.size; - pWriter->fDel.size += size; - - return code; - -_err: - tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter) { - int32_t code = 0; - char hdr[TSDB_FHDR_SIZE] = {0}; - int64_t size = TSDB_FHDR_SIZE; - int64_t n; - - // build - tPutDelFile(hdr, &pWriter->fDel); - - // write - code = tsdbWriteFile(pWriter->pWriteH, 0, hdr, size); - if (code) goto _err; - - return code; - -_err: - tsdbError("vgId:%d, update del file hdr failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} // SDelFReader ==================================================== struct SDelFReader { STsdb *pTsdb; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index afb8f962b3..db807d000b 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -1266,6 +1266,7 @@ _exit: return 0; } +#ifdef BUILD_NO_CALL static int32_t vnodeDebugPrintSingleSubmitMsg(SMeta *pMeta, SSubmitBlk *pBlock, SSubmitMsgIter *msgIter, const char *tags) { SSubmitBlkIter blkIter = {0}; @@ -1296,7 +1297,7 @@ static int32_t vnodeDebugPrintSingleSubmitMsg(SMeta *pMeta, SSubmitBlk *pBlock, return TSDB_CODE_SUCCESS; } - +#endif typedef struct SSubmitReqConvertCxt { SSubmitMsgIter msgIter; SSubmitBlk *pBlock; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 5871a60c9e..e6a80757ff 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -14,11 +14,11 @@ */ #define _DEFAULT_SOURCE -#include "tq.h" #include "sync.h" +#include "tq.h" +#include "tqCommon.h" #include "tsdb.h" #include "vnd.h" -#include "tqCommon.h" #define BATCH_ENABLE 0 @@ -220,8 +220,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) isWeak, isBlock, msg, numOfMsgs, arrayPos, pMsg->info.handle); if (!pVnode->restored) { - vGError("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, - TMSG_INFO(pMsg->msgType)); + vGWarn("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, + TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_SYN_RESTORING; vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_SYN_RESTORING); rpcFreeCont(pMsg->pCont); @@ -284,8 +284,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) vnodeIsMsgBlock(pMsg->msgType), msg, numOfMsgs, pMsg->info.handle); if (!pVnode->restored) { - vGError("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, - TMSG_INFO(pMsg->msgType)); + vGWarn("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, + TMSG_INFO(pMsg->msgType)); vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_SYN_RESTORING); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); @@ -411,7 +411,7 @@ static int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { } static int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { - int32_t code = tmsgSendReq(pEpSet, pMsg); + int32_t code = tmsgSendSyncReq(pEpSet, pMsg); if (code != 0) { rpcFreeCont(pMsg->pCont); pMsg->pCont = NULL; @@ -477,8 +477,8 @@ static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta } static int32_t vnodeSnapshotStartRead(const SSyncFSM *pFsm, void *pParam, void **ppReader) { - SVnode *pVnode = pFsm->data; - int32_t code = vnodeSnapReaderOpen(pVnode, (SSnapshotParam *)pParam, (SVSnapReader **)ppReader); + SVnode *pVnode = pFsm->data; + int32_t code = vnodeSnapReaderOpen(pVnode, (SSnapshotParam *)pParam, (SVSnapReader **)ppReader); return code; } @@ -555,7 +555,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) walApplyVer(pVnode->pWal, commitIdx); pVnode->restored = true; - SStreamMeta* pMeta = pVnode->pTq->pStreamMeta; + SStreamMeta *pMeta = pVnode->pTq->pStreamMeta; streamMetaWLock(pMeta); if (pMeta->startInfo.tasksWillRestart) { @@ -569,9 +569,22 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) if (tsDisableStream) { vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId); } else { - vInfo("vgId:%d sync restore finished, start to launch stream tasks", pVnode->config.vgId); - resetStreamTaskStatus(pVnode->pTq->pStreamMeta); - tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false); + vInfo("vgId:%d sync restore finished, start to launch stream task(s)", pVnode->config.vgId); + int32_t numOfTasks = 0; + tqStreamTaskResetStatus(pMeta, &numOfTasks); + + if (numOfTasks > 0) { + if (pMeta->startInfo.taskStarting == 1) { + pMeta->startInfo.restartCount += 1; + tqDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId, + pMeta->startInfo.restartCount); + } else { + pMeta->startInfo.taskStarting = 1; + streamMetaWUnLock(pMeta); + tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false); + return; + } + } } } else { vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId); @@ -594,7 +607,7 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) { if (pVnode->pTq) { tqUpdateNodeStage(pVnode->pTq, false); - tqStopStreamTasks(pVnode->pTq); + tqStopStreamTasksAsync(pVnode->pTq); } } diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 946d3311a4..640ed2f2f9 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -105,6 +105,7 @@ typedef struct STableListInfo { int32_t* groupOffset; // keep the offset value for each group in the tableList SArray* pTableList; SHashObj* map; // speedup acquire the tableQueryInfo by table uid + SHashObj* remainGroups; // remaining group has not yet processed the empty group STableListIdInfo idInfo; // this maybe the super table or ordinary table } STableListInfo; diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index e3e504cdbc..c3f47cde9d 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -217,6 +217,13 @@ enum { TABLE_SCAN__BLOCK_ORDER = 2, }; +typedef enum ETableCountState { + TABLE_COUNT_STATE_NONE = 0, // before start scan + TABLE_COUNT_STATE_SCAN = 1, // cur group scanning + TABLE_COUNT_STATE_PROCESSED = 2, // cur group processed + TABLE_COUNT_STATE_END = 3, // finish or noneed to process +} ETableCountState; + typedef struct SAggSupporter { SSHashObj* pResultRowHashTable; // quick locate the window object for each result char* keyBuf; // window key buffer @@ -263,14 +270,16 @@ typedef struct STableScanInfo { SSDataBlock* pResBlock; SHashObj* pIgnoreTables; SSampleExecInfo sample; // sample execution info - int32_t currentGroupId; - int32_t currentTable; + int32_t tableStartIndex; // current group scan start + int32_t tableEndIndex; // current group scan end + int32_t currentGroupIndex; // current group index of groupOffset int8_t scanMode; int8_t assignBlockUid; + uint8_t countState; // empty table count state bool hasGroupByTag; bool countOnly; - // TsdReader readerAPI; bool filesetDelimited; + bool needCountEmptyTable; } STableScanInfo; typedef struct STableMergeScanInfo { @@ -298,7 +307,8 @@ typedef struct STableMergeScanInfo { SHashObj* mSkipTables; int64_t mergeLimit; SSortExecInfo sortExecInfo; - + bool needCountEmptyTable; + bool bGroupProcessed; // the group return data means processed bool filesetDelimited; bool bNewFilesetEvent; bool bNextDurationBlockEvent; @@ -845,6 +855,8 @@ void resetWinRange(STimeWindow* winRange); bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, TSKEY ts); int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval); void resetUnCloseSessionWinInfo(SSHashObj* winMap); +void setStreamOperatorCompleted(struct SOperatorInfo* pOperator); +void reloadAggSupFromDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup); int32_t encodeSSessionKey(void** buf, SSessionKey* key); void* decodeSSessionKey(void* buf, SSessionKey* key); diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 75fe3c51dd..9d4c20493a 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -54,10 +54,10 @@ static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColM #define SCAN_ROW_TYPE(_t) ((_t) ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW) -static void setColIdForCacheReadBlock(SSDataBlock* pBlock, SNodeList* pTargets) { +static void setColIdForCacheReadBlock(SSDataBlock* pBlock, SLastRowScanPhysiNode* pScan) { SNode* pNode; int32_t idx = 0; - FOREACH(pNode, pTargets) { + FOREACH(pNode, pScan->pTargets) { if (nodeType(pNode) == QUERY_NODE_COLUMN) { SColumnNode* pCol = (SColumnNode*)pNode; SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx); @@ -65,6 +65,19 @@ static void setColIdForCacheReadBlock(SSDataBlock* pBlock, SNodeList* pTargets) } idx++; } + + for (; idx < pBlock->pDataBlock->size; ++idx) { + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx); + if (pScan->scan.pScanPseudoCols) { + FOREACH(pNode, pScan->scan.pScanPseudoCols) { + STargetNode* pTarget = (STargetNode*)pNode; + if (pColInfo->info.slotId == pTarget->slotId) { + pColInfo->info.colId = 0; + break; + } + } + } + } } SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, @@ -127,12 +140,12 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe capacity = TMIN(totalTables, 4096); pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false); - setColIdForCacheReadBlock(pInfo->pBufferredRes, pScanNode->pTargets); + setColIdForCacheReadBlock(pInfo->pBufferredRes, pScanNode); blockDataEnsureCapacity(pInfo->pBufferredRes, capacity); } else { // by tags pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE | SCAN_ROW_TYPE(pScanNode->ignoreNull); capacity = 1; // only one row output - setColIdForCacheReadBlock(pInfo->pRes, pScanNode->pTargets); + setColIdForCacheReadBlock(pInfo->pRes, pScanNode); } initResultSizeInfo(&pOperator->resultInfo, capacity); diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index ef8bf9674a..2797cb2d82 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -16,16 +16,14 @@ #include "executorInt.h" #include "filter.h" #include "function.h" -#include "index.h" #include "operator.h" -#include "os.h" #include "query.h" #include "querytask.h" #include "tdatablock.h" #include "thash.h" #include "tmsg.h" -#include "tname.h" #include "tref.h" +#include "trpc.h" typedef struct SFetchRspHandleWrapper { uint32_t exchangeId; @@ -131,14 +129,14 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn if (pRsp->completed == 1) { pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 - " execId:%d index:%d completed, blocks:%d, numOfRows:%" PRId64 ", rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 - ", total:%.2f Kb, try next %d/%" PRIzu, + " execId:%d index:%d completed, blocks:%d, numOfRows:%" PRId64 ", rowsOfSource:%" PRIu64 + ", totalRows:%" PRIu64 ", total:%.2f Kb, try next %d/%" PRIzu, GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRsp->numOfBlocks, pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0, i + 1, totalSources); } else { - qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 - " execId:%d blocks:%d, numOfRows:%" PRId64 ", totalRows:%" PRIu64 ", total:%.2f Kb", + qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d blocks:%d, numOfRows:%" PRId64 + ", totalRows:%" PRIu64 ", total:%.2f Kb", GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRsp->numOfBlocks, pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0); } @@ -232,7 +230,7 @@ static SSDataBlock* loadRemoteData(SOperatorInfo* pOperator) { if (blockDataGetNumOfRows(pBlock) == 0) { continue; } - + SLimitInfo* pLimitInfo = &pExchangeInfo->limitInfo; if (hasLimitOffsetInfo(pLimitInfo)) { int32_t status = handleLimitOffset(pOperator, pLimitInfo, pBlock, false); @@ -261,7 +259,7 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const if (pInfo->dynamicOp) { return TSDB_CODE_SUCCESS; } - + for (int32_t i = 0; i < numOfSources; ++i) { SSourceDataInfo dataInfo = {0}; dataInfo.status = EX_SOURCE_DATA_NOT_READY; @@ -343,8 +341,8 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode goto _error; } - pOperator->fpSet = - createOperatorFpSet(prepareLoadRemoteData, loadRemoteData, NULL, destroyExchangeOperatorInfo, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, loadRemoteData, NULL, destroyExchangeOperatorInfo, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); return pOperator; _error: @@ -383,7 +381,7 @@ void doDestroyExchangeOperatorInfo(void* param) { blockDataDestroy(pExInfo->pDummyBlock); tSimpleHashCleanup(pExInfo->pHashSources); - + tsem_destroy(&pExInfo->ready); taosMemoryFreeClear(param); } @@ -411,13 +409,18 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) { pRsp->useconds = htobe64(pRsp->useconds); pRsp->numOfBlocks = htonl(pRsp->numOfBlocks); - qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%" PRId64 ", %p", pSourceDataInfo->taskId, index, pRsp->numOfBlocks, - pRsp->numOfRows, pExchangeInfo); + qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%" PRId64 ", %p", pSourceDataInfo->taskId, index, + pRsp->numOfBlocks, pRsp->numOfRows, pExchangeInfo); } else { taosMemoryFree(pMsg->pData); - pSourceDataInfo->code = code; - qDebug("%s fetch rsp received, index:%d, error:%s, %p", pSourceDataInfo->taskId, index, tstrerror(code), - pExchangeInfo); + pSourceDataInfo->code = rpcCvtErrCode(code); + if (pSourceDataInfo->code != code) { + qError("%s fetch rsp received, index:%d, error:%s, cvted error: %s, %p", pSourceDataInfo->taskId, index, + tstrerror(code), tstrerror(pSourceDataInfo->code), pExchangeInfo); + } else { + qError("%s fetch rsp received, index:%d, error:%s, %p", pSourceDataInfo->taskId, index, tstrerror(code), + pExchangeInfo); + } } pSourceDataInfo->status = EX_SOURCE_DATA_READY; @@ -450,7 +453,7 @@ int32_t buildTableScanOperatorParam(SOperatorParam** ppRes, SArray* pUidList, in return TSDB_CODE_OUT_OF_MEMORY; } pScan->tableSeq = tableSeq; - + (*ppRes)->opType = srcOpType; (*ppRes)->downstreamIdx = 0; (*ppRes)->value = pScan; @@ -459,9 +462,8 @@ int32_t buildTableScanOperatorParam(SOperatorParam** ppRes, SArray* pUidList, in return TSDB_CODE_SUCCESS; } - int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTaskInfo, int32_t sourceIndex) { - SSourceDataInfo* pDataInfo = taosArrayGet(pExchangeInfo->pSourceDataInfo, sourceIndex); + SSourceDataInfo* pDataInfo = taosArrayGet(pExchangeInfo->pSourceDataInfo, sourceIndex); if (EX_SOURCE_DATA_NOT_READY != pDataInfo->status) { return TSDB_CODE_SUCCESS; } @@ -490,7 +492,8 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas req.queryId = pTaskInfo->id.queryId; req.execId = pSource->execId; if (pDataInfo->pSrcUidList) { - int32_t code = buildTableScanOperatorParam(&req.pOpParam, pDataInfo->pSrcUidList, pDataInfo->srcOpType, pDataInfo->tableSeq); + int32_t code = + buildTableScanOperatorParam(&req.pOpParam, pDataInfo->pSrcUidList, pDataInfo->srcOpType, pDataInfo->tableSeq); taosArrayDestroy(pDataInfo->pSrcUidList); pDataInfo->pSrcUidList = NULL; if (TSDB_CODE_SUCCESS != code) { @@ -499,7 +502,7 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas return pTaskInfo->code; } } - + int32_t msgSize = tSerializeSResFetchReq(NULL, 0, &req); if (msgSize < 0) { pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; @@ -741,8 +744,8 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) { SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp; if (pRsp->completed == 1) { - qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64 ", rowsOfSource:%" PRIu64 - ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu, + qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64 + ", rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu, GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1, totalSources); @@ -769,7 +772,7 @@ _error: } int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasicParam* pBasicParam) { - SExchangeInfo* pExchangeInfo = pOperator->info; + SExchangeInfo* pExchangeInfo = pOperator->info; SExchangeSrcIndex* pIdx = tSimpleHashGet(pExchangeInfo->pHashSources, &pBasicParam->vgId, sizeof(pBasicParam->vgId)); if (NULL == pIdx) { qError("No exchange source for vgId: %d", pBasicParam->vgId); @@ -784,7 +787,7 @@ int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasic dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL); dataInfo.srcOpType = pBasicParam->srcOpType; dataInfo.tableSeq = pBasicParam->tableSeq; - + taosArrayPush(pExchangeInfo->pSourceDataInfo, &dataInfo); pIdx->inUseIdx = taosArrayGetSize(pExchangeInfo->pSourceDataInfo) - 1; } else { @@ -800,15 +803,14 @@ int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasic return TSDB_CODE_SUCCESS; } - int32_t addDynamicExchangeSource(SOperatorInfo* pOperator) { - SExchangeInfo* pExchangeInfo = pOperator->info; - int32_t code = TSDB_CODE_SUCCESS; + SExchangeInfo* pExchangeInfo = pOperator->info; + int32_t code = TSDB_CODE_SUCCESS; SExchangeOperatorBasicParam* pBasicParam = NULL; - SExchangeOperatorParam* pParam = (SExchangeOperatorParam*)pOperator->pOperatorGetParam->value; + SExchangeOperatorParam* pParam = (SExchangeOperatorParam*)pOperator->pOperatorGetParam->value; if (pParam->multiParams) { SExchangeOperatorBatchParam* pBatch = (SExchangeOperatorBatchParam*)pOperator->pOperatorGetParam->value; - int32_t iter = 0; + int32_t iter = 0; while (NULL != (pBasicParam = tSimpleHashIterate(pBatch->pBatchs, pBasicParam, &iter))) { code = addSingleExchangeSource(pOperator, pBasicParam); if (code) { @@ -826,11 +828,11 @@ int32_t addDynamicExchangeSource(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } - int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { SExchangeInfo* pExchangeInfo = pOperator->info; - int32_t code = TSDB_CODE_SUCCESS; - if ((OPTR_IS_OPENED(pOperator) && !pExchangeInfo->dynamicOp) || (pExchangeInfo->dynamicOp && NULL == pOperator->pOperatorGetParam)) { + int32_t code = TSDB_CODE_SUCCESS; + if ((OPTR_IS_OPENED(pOperator) && !pExchangeInfo->dynamicOp) || + (pExchangeInfo->dynamicOp && NULL == pOperator->pOperatorGetParam)) { return TSDB_CODE_SUCCESS; } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 6674dd731b..111784316a 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -456,7 +456,7 @@ static void genTbGroupDigest(const SNode* pGroup, uint8_t* filterDigest, T_MD5_C } int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInfo* pTableListInfo, uint8_t* digest, - SStorageAPI* pAPI) { + SStorageAPI* pAPI, bool initRemainGroups) { int32_t code = TSDB_CODE_SUCCESS; SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; @@ -590,6 +590,15 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf goto end; } + if (initRemainGroups) { + pTableListInfo->remainGroups = + taosHashInit(rows, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (pTableListInfo->remainGroups == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + } + for (int i = 0; i < rows; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -631,6 +640,14 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf int32_t len = (int32_t)(pStart - (char*)keyBuf); info->groupId = calcGroupId(keyBuf, len); + if (initRemainGroups) { + // groupId ~ table uid + taosHashPut(pTableListInfo->remainGroups, &(info->groupId), sizeof(info->groupId), &(info->uid), sizeof(info->uid)); + } + } + + if (initRemainGroups) { + pTableListInfo->numOfOuputGroups = taosHashGetSize(pTableListInfo->remainGroups); } if (tsTagFilterCache) { @@ -1821,7 +1838,7 @@ static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) { STimeWindow w = {0}; w.skey = taosTimeTruncate(ts, pInterval); - w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; + w.ekey = taosTimeGetIntervalEnd(w.skey, pInterval); return w; } @@ -1870,31 +1887,17 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI } void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order) { + int64_t slidingStart = 0; + if (pInterval->offset > 0) { + slidingStart = taosTimeAdd(tw->skey, -1 * pInterval->offset, pInterval->offsetUnit, pInterval->precision); + } else { + slidingStart = tw->skey; + } int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order); - if (!IS_CALENDAR_TIME_DURATION(pInterval->slidingUnit)) { - tw->skey += pInterval->sliding * factor; - tw->ekey = taosTimeAdd(tw->skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; - return; - } - - // convert key to second - int64_t key = convertTimePrecision(tw->skey, pInterval->precision, TSDB_TIME_PRECISION_MILLI) / 1000; - - int64_t duration = pInterval->sliding; - if (pInterval->slidingUnit == 'y') { - duration *= 12; - } - - struct tm tm; - time_t t = (time_t)key; - taosLocalTime(&t, &tm, NULL); - - int mon = (int)(tm.tm_year * 12 + tm.tm_mon + duration * factor); - tm.tm_year = mon / 12; - tm.tm_mon = mon % 12; - tw->skey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, pInterval->precision); - - tw->ekey = taosTimeAdd(tw->skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; + slidingStart = taosTimeAdd(slidingStart, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision); + tw->skey = taosTimeAdd(slidingStart, pInterval->offset, pInterval->offsetUnit, pInterval->precision); + int64_t slidingEnd = taosTimeAdd(slidingStart, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; + tw->ekey = taosTimeAdd(slidingEnd, pInterval->offset, pInterval->offsetUnit, pInterval->precision); } bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) { @@ -2025,6 +2028,7 @@ STableListInfo* tableListCreate() { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + pListInfo->remainGroups = NULL; pListInfo->pTableList = taosArrayInit(4, sizeof(STableKeyInfo)); if (pListInfo->pTableList == NULL) { @@ -2054,7 +2058,7 @@ void* tableListDestroy(STableListInfo* pTableListInfo) { taosMemoryFreeClear(pTableListInfo->groupOffset); taosHashCleanup(pTableListInfo->map); - + taosHashCleanup(pTableListInfo->remainGroups); pTableListInfo->pTableList = NULL; pTableListInfo->map = NULL; taosMemoryFree(pTableListInfo); @@ -2068,6 +2072,7 @@ void tableListClear(STableListInfo* pTableListInfo) { taosArrayClear(pTableListInfo->pTableList); taosHashClear(pTableListInfo->map); + taosHashClear(pTableListInfo->remainGroups); taosMemoryFree(pTableListInfo->groupOffset); pTableListInfo->numOfOuputGroups = 1; pTableListInfo->oneTableForEachGroup = false; @@ -2122,6 +2127,9 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* bool groupByTbname = groupbyTbname(group); size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); + if (!numOfTables) { + return code; + } if (group == NULL || groupByTbname) { for (int32_t i = 0; i < numOfTables; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -2139,11 +2147,18 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pTableListInfo->numOfOuputGroups = 1; } } else { - code = getColInfoResultForGroupby(pHandle->vnode, group, pTableListInfo, digest, pAPI); + bool initRemainGroups = false; + if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanNode)) { + STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pScanNode; + if (tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable && !(groupSort || pScanNode->groupOrderScan)) { + initRemainGroups = true; + } + } + + code = getColInfoResultForGroupby(pHandle->vnode, group, pTableListInfo, digest, pAPI, initRemainGroups); if (code != TSDB_CODE_SUCCESS) { return code; } - if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList); if (groupSort || pScanNode->groupOrderScan) { code = sortTableGroup(pTableListInfo); @@ -2246,8 +2261,11 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag, const char* taskIdStr } void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr, const char* taskIdStr) { - if (!pBlock || pBlock->info.rows == 0) { - qDebug("%s===stream===%s: Block is Null or Empty", taskIdStr, flag); + if (!pBlock) { + qDebug("%s===stream===%s %s: Block is Null", taskIdStr, flag, opStr); + return; + } else if (pBlock->info.rows == 0) { + qDebug("%s===stream===%s %s: Block is Empty. block type %d", taskIdStr, flag, opStr, pBlock->info.type); return; } if (qDebugFlag & DEBUG_DEBUG) { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 8ebde1aea0..6f1593a121 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -922,8 +922,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan pStreamInfo->fillHistoryWindow = *pWindow; pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE2; - qDebug("%s step 2. set param for stream scanner for scan-history data, verRange:%" PRId64 " - %" PRId64 - ", window:%" PRId64 " - %" PRId64, + qDebug("%s step 2. set param for stream scanner scan wal, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64 + " - %" PRId64, GET_TASKID(pTaskInfo), pStreamInfo->fillHistoryVer.minVer, pStreamInfo->fillHistoryVer.maxVer, pWindow->skey, pWindow->ekey); return 0; @@ -1023,6 +1023,57 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) { return 0; } +int32_t qResetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + SOperatorInfo* pOperator = pTaskInfo->pRoot; + + while (1) { + int32_t type = pOperator->operatorType; + if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL || + type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) { + SStreamIntervalOperatorInfo* pInfo = pOperator->info; + STimeWindowAggSupp* pSup = &pInfo->twAggSup; + + pSup->calTriggerSaved = 0; + pSup->deleteMarkSaved = 0; + qInfo("reset stream param for interval: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + + } else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION || + type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION || + type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) { + SStreamSessionAggOperatorInfo* pInfo = pOperator->info; + STimeWindowAggSupp* pSup = &pInfo->twAggSup; + + pSup->calTriggerSaved = 0; + pSup->deleteMarkSaved = 0; + qInfo("reset stream param for session: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + + } else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) { + SStreamStateAggOperatorInfo* pInfo = pOperator->info; + STimeWindowAggSupp* pSup = &pInfo->twAggSup; + + pSup->calTriggerSaved = 0; + pSup->deleteMarkSaved = 0; + qInfo("reset stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + + } else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT) { + SStreamEventAggOperatorInfo* pInfo = pOperator->info; + STimeWindowAggSupp* pSup = &pInfo->twAggSup; + + pSup->calTriggerSaved = 0; + pSup->deleteMarkSaved = 0; + qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + } + + // iterate operator tree + if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) { + return 0; + } else { + pOperator = pOperator->pDownstream[0]; + } + } +} + int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; const char* id = GET_TASKID(pTaskInfo); @@ -1078,7 +1129,7 @@ int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow; - qDebug("%s remove scan-history filter window:%" PRId64 "-%" PRId64 ", set new window:%" PRId64 "-%" PRId64, + qDebug("%s remove timeWindow filter:%" PRId64 "-%" PRId64 ", set new window:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX); pWindow->skey = INT64_MIN; @@ -1209,7 +1260,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0); uid = pTableInfo->uid; ts = INT64_MIN; - pScanInfo->currentTable = 0; + pScanInfo->tableEndIndex = 0; } else { taosRUnLockLatch(&pTaskInfo->lock); qError("no table in table list, %s", id); @@ -1223,16 +1274,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT pInfo->pTableScanOp->resultInfo.totalRows = 0; // start from current accessed position - // we cannot start from the pScanInfo->currentTable, since the commit offset may cause the rollback of the start + // we cannot start from the pScanInfo->tableEndIndex, since the commit offset may cause the rollback of the start // position, let's find it from the beginning. index = tableListFind(pTableListInfo, uid, 0); taosRUnLockLatch(&pTaskInfo->lock); if (index >= 0) { - pScanInfo->currentTable = index; + pScanInfo->tableEndIndex = index; } else { qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid, - numOfTables, pScanInfo->currentTable, id); + numOfTables, pScanInfo->tableEndIndex, id); terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } @@ -1255,12 +1306,12 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT } qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s", - uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id); + uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id); } else { pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1); pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond); qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s", - uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id); + uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id); } // restore the key value diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 95d26fdd0e..ff4d3d0d27 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -449,7 +449,7 @@ STimeWindow getAlignQueryTimeWindow(const SInterval* pInterval, int64_t key) { * if the realSkey > INT64_MAX - pInterval->interval, the query duration between * realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges. */ - win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; + win.ekey = taosTimeGetIntervalEnd(win.skey, pInterval); if (win.ekey < win.skey) { win.ekey = INT64_MAX; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 0ef8c4b81a..b60731a286 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -655,6 +655,76 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, colDataDestroy(&infoData); } + +static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) { + pInfo->tableStartIndex = pInfo->tableEndIndex + 1; + + STableListInfo* pTableListInfo = pInfo->base.pTableListInfo; + int32_t numOfTables = tableListGetSize(pTableListInfo); + STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pTableListInfo, pInfo->tableStartIndex); + + if (pTableListInfo->oneTableForEachGroup) { + pInfo->tableEndIndex = pInfo->tableStartIndex; + } else if (pTableListInfo->groupOffset) { + pInfo->currentGroupIndex++; + if (pInfo->currentGroupIndex + 1 < pTableListInfo->numOfOuputGroups) { + pInfo->tableEndIndex = pTableListInfo->groupOffset[pInfo->currentGroupIndex + 1] - 1; + } else { + pInfo->tableEndIndex = numOfTables - 1; + } + } else { + pInfo->tableEndIndex = numOfTables - 1; + } + + if (!pInfo->needCountEmptyTable) { + pInfo->countState = TABLE_COUNT_STATE_END; + } else { + pInfo->countState = TABLE_COUNT_STATE_SCAN; + } + + *pKeyInfo = pStart; + *size = pInfo->tableEndIndex - pInfo->tableStartIndex + 1; +} + +void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) { + if (pInfo->countState == TABLE_COUNT_STATE_END) { + return; + } + if (pInfo->base.pTableListInfo->oneTableForEachGroup || pInfo->base.pTableListInfo->groupOffset) { + pInfo->countState = TABLE_COUNT_STATE_PROCESSED; + } else { + taosHashRemove(pInfo->base.pTableListInfo->remainGroups, &groupId, sizeof(groupId)); + } +} + +static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBase* pBase, SSDataBlock* pBlock, + const STableKeyInfo* tbInfo) { + blockDataEmpty(pBlock); + pBlock->info.rows = 1; + pBlock->info.id.uid = tbInfo->uid; + pBlock->info.id.groupId = tbInfo->groupId; + + // only one row: set all col data to null & hasNull + int32_t col_num = blockDataGetNumOfCols(pBlock); + for (int32_t i = 0; i < col_num; ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + colDataSetNULL(pColInfoData, 0); + } + + // set tag/tbname + doSetTagColumnData(pBase, pBlock, pTaskInfo, pBlock->info.rows); + return pBlock; +} + +static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STableKeyInfo* tbInfo) { + STableScanInfo* pTableScanInfo = pOperator->info; + SSDataBlock* pBlock = + getOneRowResultBlock(pOperator->pTaskInfo, &pTableScanInfo->base, pTableScanInfo->pResBlock, tbInfo); + + pOperator->resultInfo.totalRows++; + return pBlock; +} + static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -736,6 +806,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { + markGroupProcessed(pTableScanInfo, p->info.id.groupId); return p; } @@ -764,6 +835,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { while (pTableScanInfo->scanTimes < total) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { + markGroupProcessed(pTableScanInfo, p->info.id.groupId); return p; } @@ -780,6 +852,33 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { } } + if (pTableScanInfo->countState < TABLE_COUNT_STATE_END) { + STableListInfo* pTableListInfo = pTableScanInfo->base.pTableListInfo; + if (pTableListInfo->oneTableForEachGroup || pTableListInfo->groupOffset) { // group by tbname, group by tag + sort + if (pTableScanInfo->countState < TABLE_COUNT_STATE_PROCESSED) { + pTableScanInfo->countState = TABLE_COUNT_STATE_PROCESSED; + STableKeyInfo* pStart = + (STableKeyInfo*)tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->tableStartIndex); + return getBlockForEmptyTable(pOperator, pStart); + } + } else { // group by tag + no sort + int32_t numOfTables = tableListGetSize(pTableListInfo); + if (pTableScanInfo->tableEndIndex + 1 >= numOfTables) { + // get empty group, mark processed & rm from hash + void* pIte = taosHashIterate(pTableListInfo->remainGroups, NULL); + if (pIte != NULL) { + size_t keySize = 0; + uint64_t* pGroupId = taosHashGetKey(pIte, &keySize); + STableKeyInfo info = {.uid = *(uint64_t*)pIte, .groupId = *pGroupId}; + taosHashCancelIterate(pTableListInfo->remainGroups, pIte); + markGroupProcessed(pTableScanInfo, *pGroupId); + return getBlockForEmptyTable(pOperator, &info); + } + } + } + pTableScanInfo->countState = TABLE_COUNT_STATE_END; + } + return NULL; } @@ -839,8 +938,8 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; - - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { + int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); + if (pInfo->tableEndIndex + 1 >= numOfTables) { setOperatorCompleted(pOperator); if (pOperator->dynamicTask) { taosArrayClear(pInfo->base.pTableListInfo->pTableList); @@ -855,8 +954,8 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) { int32_t num = 0; STableKeyInfo* pList = NULL; - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); - + initNextGroupScan(pInfo, &pList, &num); + pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pInfo->scanTimes = 0; @@ -876,16 +975,17 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; + int32_t num = 0; + STableKeyInfo* pList = NULL; - if (pInfo->currentGroupId == -1) { - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { + if (pInfo->tableEndIndex == -1) { + int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); + if (pInfo->tableEndIndex + 1 == numOfTables) { setOperatorCompleted(pOperator); return NULL; } - int32_t num = 0; - STableKeyInfo* pList = NULL; - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); + initNextGroupScan(pInfo, &pList, &num); ASSERT(pInfo->base.dataReader == NULL); int32_t code = pAPI->tsdReader.tsdReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num, pInfo->pResBlock, @@ -934,7 +1034,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, code); } if (pOperator->status == OP_EXEC_DONE) { - pInfo->currentGroupId = -1; + pInfo->tableEndIndex = -1; pOperator->status = OP_OPENED; SSDataBlock* result = NULL; while (true) { @@ -950,6 +1050,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) { int32_t numOfTables = 0; // tableListGetSize(pTaskInfo->pTableListInfo); STableKeyInfo tInfo = {0}; + pInfo->countState = TABLE_COUNT_STATE_END; while (1) { SSDataBlock* result = doGroupedTableScan(pOperator); @@ -958,23 +1059,23 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { } // if no data, switch to next table and continue scan - pInfo->currentTable++; + pInfo->tableEndIndex++; taosRLockLatch(&pTaskInfo->lock); numOfTables = tableListGetSize(pInfo->base.pTableListInfo); - if (pInfo->currentTable >= numOfTables) { + if (pInfo->tableEndIndex >= numOfTables) { qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo)); taosRUnLockLatch(&pTaskInfo->lock); return NULL; } - tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable); + tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableEndIndex); taosRUnLockLatch(&pTaskInfo->lock); pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1); qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables, - pInfo->currentTable, numOfTables, GET_TASKID(pTaskInfo)); + pInfo->tableEndIndex, numOfTables, GET_TASKID(pTaskInfo)); pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pInfo->scanTimes = 0; @@ -1067,7 +1168,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, goto _error; } - pInfo->currentGroupId = -1; + pInfo->tableEndIndex = -1; + pInfo->currentGroupIndex = -1; pInfo->assignBlockUid = pTableScanNode->assignBlockUid; pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false; @@ -1075,6 +1177,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pTaskInfo); pOperator->exprSupp.numOfExprs = numOfCols; + pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable; + pInfo->base.pTableListInfo = pTableListInfo; pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5); if (pInfo->base.metaCache.pTableMetaEntryCache == NULL) { @@ -1161,7 +1265,7 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint6 pTableScanInfo->base.cond.startVersion = 0; pTableScanInfo->base.cond.endVersion = ver; pTableScanInfo->scanTimes = 0; - pTableScanInfo->currentGroupId = -1; + pTableScanInfo->tableEndIndex = -1; pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader); pTableScanInfo->base.dataReader = NULL; } @@ -1962,8 +2066,10 @@ static void setBlockGroupIdByUid(SStreamScanInfo* pInfo, SSDataBlock* pBlock) { } static void doCheckUpdate(SStreamScanInfo* pInfo, TSKEY endKey, SSDataBlock* pBlock) { - if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) { + if (pInfo->pUpdateInfo) { pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pBlock->info.version); + } + if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) { checkUpdateData(pInfo, true, pBlock, true); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, endKey); if (pInfo->pUpdateDataRes->info.rows > 0) { @@ -1998,7 +2104,6 @@ void streamScanOperatorSaveCheckpoint(SStreamScanInfo* pInfo) { int32_t len = streamScanOperatorEncode(pInfo, &pBuf); pInfo->stateStore.streamStateSaveInfo(pInfo->pState, STREAM_SCAN_OP_CHECKPOINT_NAME, strlen(STREAM_SCAN_OP_CHECKPOINT_NAME), pBuf, len); taosMemoryFree(pBuf); - pInfo->stateStore.streamStateCommit(pInfo->pState); } // other properties are recovered from the execution plan @@ -2063,7 +2168,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { pInfo->pTableScanOp->status = OP_OPENED; pTSInfo->scanTimes = 0; - pTSInfo->currentGroupId = -1; + pTSInfo->tableEndIndex = -1; } if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) { @@ -2317,6 +2422,7 @@ FETCH_NEXT_BLOCK: doCheckUpdate(pInfo, pBlockInfo->window.ekey, pInfo->pRes); doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); + blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); int64_t numOfUpdateRes = pInfo->pUpdateDataRes->info.rows; qDebug("%s %" PRId64 " rows in datablock, update res:%" PRId64, id, pBlockInfo->rows, numOfUpdateRes); @@ -3478,7 +3584,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { } pInfo->tableEndIndex = i - 1; } - + pInfo->bGroupProcessed = false; int32_t tableStartIdx = pInfo->tableStartIndex; int32_t tableEndIdx = pInfo->tableEndIndex; @@ -3600,9 +3706,14 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator); + if (pBlock == NULL && !pInfo->bGroupProcessed && pInfo->needCountEmptyTable) { + STableKeyInfo* tbInfo = tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex); + pBlock = getOneRowResultBlock(pTaskInfo, &pInfo->base, pInfo->pResBlock, tbInfo); + } if (pBlock != NULL) { pBlock->info.id.groupId = pInfo->groupId; pOperator->resultInfo.totalRows += pBlock->info.rows; + pInfo->bGroupProcessed = true; return pBlock; } else { if (pInfo->bNewFilesetEvent) { @@ -3757,6 +3868,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN } else { pInfo->filesetDelimited = pTableScanNode->filesetDelimited; } + pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable; + setOperatorInfo(pOperator, "TableMergeScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, false, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->exprSupp.numOfExprs = numOfCols; diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 6adab74344..8aca76597b 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -480,18 +480,18 @@ static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) { return resBlock; } + if (pInfo->recvGetAll) { + pInfo->recvGetAll = false; + resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); + } + if (pInfo->reCkBlock) { pInfo->reCkBlock = false; printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); return pInfo->pCheckpointRes; } - if (pInfo->recvGetAll) { - pInfo->recvGetAll = false; - resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); - } - - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -561,7 +561,7 @@ static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) { if (resBlock != NULL) { return resBlock; } - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -622,6 +622,7 @@ void streamEventReloadState(SOperatorInfo* pOperator) { } setEventWindowFlag(pAggSup, &curInfo); if (!curInfo.pWinFlag->startFlag || curInfo.pWinFlag->endFlag) { + saveSessionOutputBuf(pAggSup, &curInfo.winInfo); continue; } @@ -653,6 +654,7 @@ void streamEventReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 919b6545dc..e4d04c3bdb 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -430,6 +430,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { taosMemoryFreeClear(param); } +#ifdef BUILD_NO_CALL static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { for (int32_t i = 0; i < numOfCols; i++) { if (fmIsUserDefinedFunc(pFCtx[i].functionId) || !fmIsInvertible(pFCtx[i].functionId)) { @@ -438,6 +439,7 @@ static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { } return true; } +#endif void reloadFromDownStream(SOperatorInfo* downstream, SStreamIntervalOperatorInfo* pInfo) { SStateStore* pAPI = &downstream->pTaskInfo->storageAPI.stateStore; @@ -804,10 +806,24 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat SRowBuffPos* pResPos = NULL; SResultRow* pResult = NULL; int32_t forwardRows = 0; + int32_t endRowId = pSDataBlock->info.rows - 1; SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); tsCols = (int64_t*)pColDataInfo->pData; + if (pSDataBlock->info.window.skey != tsCols[0] || pSDataBlock->info.window.ekey != tsCols[endRowId]) { + qError("table uid %" PRIu64 " data block timestamp range may not be calculated! minKey %" PRId64 + ",maxKey %" PRId64, + pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); + blockDataUpdateTsWindow(pSDataBlock, pInfo->primaryTsIndex); + + // timestamp of the data is incorrect + if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { + qError("table uid %" PRIu64 " data block timestamp is out of range! minKey %" PRId64 ",maxKey %" PRId64, + pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); + } + } + int32_t startPos = 0; TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols); STimeWindow nextWin = {0}; @@ -905,19 +921,6 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat pInfo->delKey = key; } int32_t prevEndPos = (forwardRows - 1) * step + startPos; - if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { - qError("table uid %" PRIu64 " data block timestamp range may not be calculated! minKey %" PRId64 - ",maxKey %" PRId64, - pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); - blockDataUpdateTsWindow(pSDataBlock, 0); - - // timestamp of the data is incorrect - if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { - qError("table uid %" PRIu64 " data block timestamp is out of range! minKey %" PRId64 ",maxKey %" PRId64, - pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); - } - } - if (IS_FINAL_INTERVAL_OP(pOperator)) { startPos = getNextQualifiedFinalWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos); } else { @@ -1226,7 +1229,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { return pInfo->pCheckpointRes; } - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); if (!IS_FINAL_INTERVAL_OP(pOperator)) { clearFunctionContext(&pOperator->exprSupp); // semi interval operator clear disk buffer @@ -1635,6 +1638,7 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num pDummy[i].functionId = pCtx[i].functionId; pDummy[i].isNotNullFunc = pCtx[i].isNotNullFunc; pDummy[i].isPseudoFunc = pCtx[i].isPseudoFunc; + pDummy[i].fpSet.init = pCtx[i].fpSet.init; } } @@ -2354,10 +2358,6 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, void* pState, SSDa } int32_t code = pAPI->stateStore.streamStateGetByPos(pState, pPos, (void**)&pRow); - if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { - ASSERT(pBlock->info.rows > 0); - break; - } if (code == -1) { // for history @@ -2374,6 +2374,11 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, void* pState, SSDa continue; } + if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { + ASSERT(pBlock->info.rows > 0); + break; + } + pGroupResInfo->index += 1; for (int32_t j = 0; j < numOfExprs; ++j) { @@ -2620,18 +2625,18 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { return opRes; } + if (pInfo->recvGetAll) { + pInfo->recvGetAll = false; + resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); + } + if (pInfo->reCkBlock) { pInfo->reCkBlock = false; printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); return pInfo->pCheckpointRes; } - if (pInfo->recvGetAll) { - pInfo->recvGetAll = false; - resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); - } - - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -2729,7 +2734,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { return opRes; } - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -2770,6 +2775,18 @@ void getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, } } +void reloadAggSupFromDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup) { + SStateStore* pAPI = &downstream->pTaskInfo->storageAPI.stateStore; + + if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + reloadAggSupFromDownStream(downstream->pDownstream[0], pAggSup); + return; + } + + SStreamScanInfo* pScanInfo = downstream->info; + pAggSup->pUpdateInfo = pScanInfo->pUpdateInfo; +} + void streamSessionSemiReloadState(SOperatorInfo* pOperator) { SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; @@ -2786,6 +2803,9 @@ void streamSessionSemiReloadState(SOperatorInfo* pOperator) { for (int32_t i = 0; i < num; i++) { SResultWindowInfo winInfo = {0}; getSessionWindowInfoByKey(pAggSup, pSeKeyBuf + i, &winInfo); + if (!IS_VALID_SESSION_WIN(winInfo)) { + continue; + } compactSessionSemiWindow(pOperator, &winInfo); saveSessionOutputBuf(pAggSup, &winInfo); } @@ -2798,6 +2818,7 @@ void streamSessionSemiReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } void streamSessionReloadState(SOperatorInfo* pOperator) { @@ -2850,6 +2871,7 @@ void streamSessionReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, @@ -3009,7 +3031,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { clearFunctionContext(&pOperator->exprSupp); // semi session operator clear disk buffer clearStreamSessionOperator(pInfo); - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); pInfo->clearState = false; return NULL; } @@ -3082,7 +3104,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { clearFunctionContext(&pOperator->exprSupp); // semi session operator clear disk buffer clearStreamSessionOperator(pInfo); - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -3559,18 +3581,18 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { return resBlock; } + if (pInfo->recvGetAll) { + pInfo->recvGetAll = false; + resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); + } + if (pInfo->reCkBlock) { pInfo->reCkBlock = false; printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); return pInfo->pCheckpointRes; } - if (pInfo->recvGetAll) { - pInfo->recvGetAll = false; - resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); - } - - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -3636,7 +3658,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { if (resBlock != NULL) { return resBlock; } - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -3732,6 +3754,7 @@ void streamStateReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, @@ -3837,6 +3860,7 @@ _error: return NULL; } +#ifdef BUILD_NO_CALL static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) { for (int i = 0; i < num; i++) { if (type == STREAM_INVERT) { @@ -3846,6 +3870,7 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type } } } +#endif static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SStreamIntervalOperatorInfo* pInfo = pOperator->info; @@ -3872,11 +3897,11 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { if (pInfo->reCkBlock) { pInfo->reCkBlock = false; - // printDataBlock(pInfo->pCheckpointRes, "single interval ck"); + printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); return pInfo->pCheckpointRes; } - setOperatorCompleted(pOperator); + setStreamOperatorCompleted(pOperator); return NULL; } @@ -3908,7 +3933,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { doDeleteWindows(pOperator, &pInfo->interval, pBlock, pInfo->pDelWins, pInfo->pUpdatedMap); continue; } else if (pBlock->info.type == STREAM_GET_ALL) { - qDebug("===stream===%s recv|block type STREAM_GET_ALL", getStreamOpName(pOperator->operatorType)); pInfo->recvGetAll = true; getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap); continue; @@ -3939,9 +3963,11 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { // caller. Note that all the time window are not close till now. // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); +#ifdef BUILD_NO_CALL if (pInfo->invertible) { setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type); } +#endif doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pInfo->pUpdatedMap); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); @@ -4295,3 +4321,8 @@ static SSDataBlock* doStreamMidIntervalAgg(SOperatorInfo* pOperator) { return buildIntervalResult(pOperator); } + +void setStreamOperatorCompleted(SOperatorInfo* pOperator) { + setOperatorCompleted(pOperator); + qDebug("stask:%s %s status: %d. set completed", GET_TASKID(pOperator->pTaskInfo), getStreamOpName(pOperator->operatorType), pOperator->status); +} diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 76de204cab..0c421bf354 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -15,9 +15,7 @@ #include "executorInt.h" #include "filter.h" -#include "function.h" #include "functionMgt.h" -#include "os.h" #include "querynodes.h" #include "systable.h" #include "tname.h" @@ -32,6 +30,7 @@ #include "storageapi.h" #include "tcompare.h" #include "thash.h" +#include "trpc.h" #include "ttypes.h" typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype); @@ -1789,8 +1788,8 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan setOperatorInfo(pOperator, "SysTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, false, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); - pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); return pOperator; _error: @@ -1867,7 +1866,13 @@ int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code) { pRsp->handle = htobe64(pRsp->handle); pRsp->compLen = htonl(pRsp->compLen); } else { - operator->pTaskInfo->code = code; + operator->pTaskInfo->code = rpcCvtErrCode(code); + if (operator->pTaskInfo->code != code) { + qError("load systable rsp received, error:%s, cvted error:%s", tstrerror(code), + tstrerror(operator->pTaskInfo->code)); + } else { + qError("load systable rsp received, error:%s", tstrerror(code)); + } } tsem_post(&pScanResInfo->ready); diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 6c537d7b98..7b63bc8720 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -169,13 +169,14 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* setNotFillColumn(pFillInfo, pDstCol, index, i); } } else { - SGroupKeys* pKey = taosArrayGet(pFillInfo->prev.pRowVal, i); + SRowVal* pRVal = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; + SGroupKeys* pKey = taosArrayGet(pRVal->pRowVal, i); if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pKey->isNull) { colDataSetNULL(pDstCol, index); continue; } - SGroupKeys* pKey1 = taosArrayGet(pFillInfo->prev.pRowVal, pFillInfo->tsSlotId); + SGroupKeys* pKey1 = taosArrayGet(pRVal->pRowVal, pFillInfo->tsSlotId); int64_t prevTs = *(int64_t*)pKey1->pData; int32_t srcSlotId = GET_DEST_SLOT_ID(pCol); @@ -346,9 +347,10 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t char* src = colDataGetData(pSrc, pFillInfo->index); if (!colDataIsNull_s(pSrc, pFillInfo->index)) { colDataSetVal(pDst, index, src, false); - saveColData(pFillInfo->prev.pRowVal, i, src, false); + SRowVal* pRVal = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; + saveColData(pRVal->pRowVal, i, src, false); if (pFillInfo->srcTsSlotId == dstSlotId) { - pFillInfo->prev.key = *(int64_t*)src; + pRVal->key = *(int64_t*)src; } } else { // the value is null if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { @@ -361,7 +363,8 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t } else if (pFillInfo->type == TSDB_FILL_LINEAR) { bool isNull = colDataIsNull_s(pSrc, pFillInfo->index); colDataSetVal(pDst, index, src, isNull); - saveColData(pFillInfo->prev.pRowVal, i, src, isNull); // todo: + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; + saveColData(p, i, src, isNull); // todo: } else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { colDataSetNULL(pDst, index); } else if (pFillInfo->type == TSDB_FILL_NEXT) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 0ca91f74ad..b86253a8d1 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -450,7 +450,7 @@ int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBl TSKEY next = primaryKeys[startPos]; if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') { pNext->skey = taosTimeTruncate(next, pInterval); - pNext->ekey = taosTimeAdd(pNext->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1; + pNext->ekey = taosTimeGetIntervalEnd(pNext->skey, pInterval); } else { pNext->ekey += ((next - pNext->ekey + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding; pNext->skey = pNext->ekey - pInterval->interval + 1; @@ -459,7 +459,7 @@ int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBl TSKEY next = primaryKeys[startPos]; if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') { pNext->skey = taosTimeTruncate(next, pInterval); - pNext->ekey = taosTimeAdd(pNext->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1; + pNext->ekey = taosTimeGetIntervalEnd(pNext->skey, pInterval); } else { pNext->skey -= ((pNext->skey - next + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding; pNext->ekey = pNext->skey + pInterval->interval - 1; @@ -1079,16 +1079,6 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) { return (rows == 0) ? NULL : pBlock; } -static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) { - for (int i = 0; i < num; i++) { - if (type == STREAM_INVERT) { - fmSetInvertFunc(pCtx[i].functionId, &(pCtx[i].fpSet)); - } else if (type == STREAM_NORMAL) { - fmSetNormalFunc(pCtx[i].functionId, &(pCtx[i].fpSet)); - } - } -} - static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) { SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false); if (NULL == pResult) { @@ -1328,6 +1318,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator SWindowRowsSup* pRowSup = &pInfo->winSup; pRowSup->numOfRows = 0; + pRowSup->startRowIndex = 0; // In case of ascending or descending order scan data, only one time window needs to be kepted for each table. TSKEY* tsList = (TSKEY*)pColInfoData->pData; @@ -1339,9 +1330,6 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator ((pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap))) { // The gap is less than the threshold, so it belongs to current session window that has been opened already. doKeepTuple(pRowSup, tsList[j], gid); - if (j == 0 && pRowSup->startRowIndex != 0) { - pRowSup->startRowIndex = 0; - } } else { // start a new session window SResultRow* pResult = NULL; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1ebc7ad3c6..ee1d831a24 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -475,6 +475,7 @@ static void appendOneRowToDataBlock(SSDataBlock* pBlock, const SSDataBlock* pSou if (isNull) { colDataSetVal(pColInfo, pBlock->info.rows, NULL, true); } else { + if (!pSrcColInfo->pData) continue; char* pData = colDataGetData(pSrcColInfo, *rowIndex); colDataSetVal(pColInfo, pBlock->info.rows, pData, false); } @@ -877,6 +878,9 @@ static int32_t blockCompareTsFn(const void* pLeft, const void* pRight, void* par static int32_t appendDataBlockToPageBuf(SSortHandle* pHandle, SSDataBlock* blk, SArray* aPgId) { int32_t pageId = -1; void* pPage = getNewBufPage(pHandle->pBuf, &pageId); + if (pPage == NULL) { + return terrno; + } taosArrayPush(aPgId, &pageId); int32_t size = blockDataGetSize(blk) + sizeof(int32_t) + taosArrayGetSize(blk->pDataBlock) * sizeof(int32_t); @@ -900,7 +904,7 @@ static int32_t getPageBufIncForRow(SSDataBlock* blk, int32_t row, int32_t rowIdx for (int32_t i = 0; i < numCols; ++i) { SColumnInfoData* pColInfoData = TARRAY_GET_ELEM(blk->pDataBlock, i); if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - if (pColInfoData->varmeta.offset[row] != -1) { + if ((pColInfoData->varmeta.offset[row] != -1) && (pColInfoData->pData)) { char* p = colDataGetData(pColInfoData, row); sz += varDataTLen(p); } @@ -943,16 +947,15 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO totalRows += blk->info.rows; } - SArray* aPgId = taosArrayInit(8, sizeof(int32_t)); - SMultiwayMergeTreeInfo* pTree = NULL; code = tMergeTreeCreate(&pTree, taosArrayGetSize(aBlk), &sup, blockCompareTsFn); if (TSDB_CODE_SUCCESS != code) { taosMemoryFree(sup.aRowIdx); taosMemoryFree(sup.aTs); - return code; } + + SArray* aPgId = taosArrayInit(8, sizeof(int32_t)); int32_t nRows = 0; int32_t nMergedRows = 0; bool mergeLimitReached = false; @@ -968,9 +971,15 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO if (blkPgSz <= pHandle->pageSize && blkPgSz + bufInc > pHandle->pageSize) { SColumnInfoData* tsCol = taosArrayGet(pHandle->pDataBlock->pDataBlock, order->slotId); lastPageBufTs = ((int64_t*)tsCol->pData)[pHandle->pDataBlock->info.rows - 1]; - appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); + code = appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); + if (code != TSDB_CODE_SUCCESS) { + taosMemoryFree(pTree); + taosArrayDestroy(aPgId); + taosMemoryFree(sup.aRowIdx); + taosMemoryFree(sup.aTs); + return code; + } nMergedRows += pHandle->pDataBlock->info.rows; - blockDataCleanup(pHandle->pDataBlock); blkPgSz = pgHeaderSz; bufInc = getPageBufIncForRow(minBlk, minRow, 0); @@ -1001,7 +1010,14 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO if (!mergeLimitReached) { SColumnInfoData* tsCol = taosArrayGet(pHandle->pDataBlock->pDataBlock, order->slotId); lastPageBufTs = ((int64_t*)tsCol->pData)[pHandle->pDataBlock->info.rows - 1]; - appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); + code = appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); + if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(aPgId); + taosMemoryFree(pTree); + taosMemoryFree(sup.aRowIdx); + taosMemoryFree(sup.aTs); + return code; + } nMergedRows += pHandle->pDataBlock->info.rows; if ((pHandle->mergeLimit != -1) && (nMergedRows >= pHandle->mergeLimit)) { mergeLimitReached = true; diff --git a/source/libs/executor/test/timewindowTest.cpp b/source/libs/executor/test/timewindowTest.cpp index 2894c66587..3639bf15e1 100644 --- a/source/libs/executor/test/timewindowTest.cpp +++ b/source/libs/executor/test/timewindowTest.cpp @@ -158,4 +158,32 @@ TEST(testCase, timewindow_gen) { } +TEST(testCase, timewindow_natural) { + osSetTimezone("CST"); + + int32_t precision = TSDB_TIME_PRECISION_MILLI; + + SInterval interval2 = createInterval(17, 17, 13392000000, 'n', 'n', 0, precision); + int64_t key = 1648970865984; + STimeWindow w0 = getAlignQueryTimeWindow(&interval2, key); + printTimeWindow(&w0, precision, key); + ASSERT_GE(w0.ekey, key); + + int64_t key1 = 1633446027072; + STimeWindow w1 = {0}; + getInitialStartTimeWindow(&interval2, key1, &w1, true); + printTimeWindow(&w1, precision, key1); + STimeWindow w3 = getAlignQueryTimeWindow(&interval2, key1); + printf("%ld win %ld, %ld\n", key1, w3.skey, w3.ekey); + + int64_t key2 = 1648758398208; + STimeWindow w2 = {0}; + getInitialStartTimeWindow(&interval2, key2, &w2, true); + printTimeWindow(&w2, precision, key2); + STimeWindow w4 = getAlignQueryTimeWindow(&interval2, key2); + printf("%ld win %ld, %ld\n", key2, w3.skey, w3.ekey); + + ASSERT_EQ(w3.skey, w4.skey); + ASSERT_EQ(w3.ekey, w4.ekey); +} #pragma GCC diagnostic pop \ No newline at end of file diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h index b13d481254..4c1e46dbba 100644 --- a/source/libs/function/inc/builtins.h +++ b/source/libs/function/inc/builtins.h @@ -40,7 +40,9 @@ typedef struct SBuiltinFuncDefinition { FExecProcess processFunc; FScalarExecProcess sprocessFunc; FExecFinalize finalizeFunc; +#ifdef BUILD_NO_CALL FExecProcess invertFunc; +#endif FExecCombine combineFunc; const char* pPartialFunc; const char* pMiddleFunc; diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index d2f19ed2eb..ba7bf72aea 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -59,12 +59,19 @@ int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); EFuncDataRequired countDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow); bool getCountFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t countFunction(SqlFunctionCtx* pCtx); + +#ifdef BUILD_NO_CALL int32_t countInvertFunction(SqlFunctionCtx* pCtx); +#endif EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow); bool getSumFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t sumFunction(SqlFunctionCtx* pCtx); + +#ifdef BUILD_NO_CALL int32_t sumInvertFunction(SqlFunctionCtx* pCtx); +#endif + int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool minmaxFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo); @@ -81,7 +88,11 @@ int32_t avgFunction(SqlFunctionCtx* pCtx); int32_t avgFunctionMerge(SqlFunctionCtx* pCtx); int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t avgPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); + +#ifdef BUILD_NO_CALL int32_t avgInvertFunction(SqlFunctionCtx* pCtx); +#endif + int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); int32_t getAvgInfoSize(); @@ -91,7 +102,11 @@ int32_t stddevFunction(SqlFunctionCtx* pCtx); int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx); int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t stddevPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); + +#ifdef BUILD_NO_CALL int32_t stddevInvertFunction(SqlFunctionCtx* pCtx); +#endif + int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); int32_t getStddevInfoSize(); @@ -99,7 +114,6 @@ bool getLeastSQRFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo); int32_t leastSQRFunction(SqlFunctionCtx* pCtx); int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); -int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx); int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getPercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); diff --git a/source/libs/function/inc/tfunctionInt.h b/source/libs/function/inc/tfunctionInt.h index 821544106f..b4c48abf37 100644 --- a/source/libs/function/inc/tfunctionInt.h +++ b/source/libs/function/inc/tfunctionInt.h @@ -28,8 +28,6 @@ extern "C" { #include "tudf.h" #include "tvariant.h" -bool topbot_datablock_filter(SqlFunctionCtx *pCtx, const char *minval, const char *maxval); - /** * the numOfRes should be kept, since it may be used later * and allow the ResultInfo to be re initialized diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index 65b7b38a05..5351594b2f 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -26,12 +26,12 @@ extern "C" { typedef struct MinMaxEntry { union { double dMinVal; - int64_t i64MinVal; + //double i64MinVal; uint64_t u64MinVal; }; union { double dMaxVal; - int64_t i64MaxVal; + //double i64MaxVal; int64_t u64MaxVal; }; } MinMaxEntry; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 535b4bf640..7be9f07ae4 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2382,7 +2382,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = countFunction, .sprocessFunc = countScalarFunction, .finalizeFunc = functionFinalize, +#ifdef BUILD_NO_CALL .invertFunc = countInvertFunction, +#endif .combineFunc = combineFunction, .pPartialFunc = "count", .pMergeFunc = "sum" @@ -2398,7 +2400,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = sumFunction, .sprocessFunc = sumScalarFunction, .finalizeFunc = functionFinalize, +#ifdef BUILD_NO_CALL .invertFunc = sumInvertFunction, +#endif .combineFunc = sumCombine, .pPartialFunc = "sum", .pMergeFunc = "sum" @@ -2443,7 +2447,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = stddevFunction, .sprocessFunc = stddevScalarFunction, .finalizeFunc = stddevFinalize, +#ifdef BUILD_NO_CALL .invertFunc = stddevInvertFunction, +#endif .combineFunc = stddevCombine, .pPartialFunc = "_stddev_partial", .pMergeFunc = "_stddev_merge" @@ -2457,7 +2463,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = stddevFunctionSetup, .processFunc = stddevFunction, .finalizeFunc = stddevPartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = stddevInvertFunction, +#endif .combineFunc = stddevCombine, }, { @@ -2469,7 +2477,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = stddevFunctionSetup, .processFunc = stddevFunctionMerge, .finalizeFunc = stddevFinalize, +#ifdef BUILD_NO_CALL .invertFunc = stddevInvertFunction, +#endif .combineFunc = stddevCombine, }, { @@ -2482,7 +2492,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = leastSQRFunction, .sprocessFunc = leastSQRScalarFunction, .finalizeFunc = leastSQRFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = leastSQRCombine, }, { @@ -2496,7 +2508,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = avgFunction, .sprocessFunc = avgScalarFunction, .finalizeFunc = avgFinalize, +#ifdef BUILD_NO_CALL .invertFunc = avgInvertFunction, +#endif .combineFunc = avgCombine, .pPartialFunc = "_avg_partial", .pMiddleFunc = "_avg_middle", @@ -2512,7 +2526,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = avgFunctionSetup, .processFunc = avgFunction, .finalizeFunc = avgPartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = avgInvertFunction, +#endif .combineFunc = avgCombine, }, { @@ -2524,7 +2540,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = avgFunctionSetup, .processFunc = avgFunctionMerge, .finalizeFunc = avgFinalize, +#ifdef BUILD_NO_CALL .invertFunc = avgInvertFunction, +#endif .combineFunc = avgCombine, }, { @@ -2538,7 +2556,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = percentileFunction, .sprocessFunc = percentileScalarFunction, .finalizeFunc = percentileFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = NULL, }, { @@ -2551,7 +2571,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = apercentileFunction, .sprocessFunc = apercentileScalarFunction, .finalizeFunc = apercentileFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = apercentileCombine, .pPartialFunc = "_apercentile_partial", .pMergeFunc = "_apercentile_merge", @@ -2566,7 +2588,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = apercentileFunctionSetup, .processFunc = apercentileFunction, .finalizeFunc = apercentilePartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = apercentileCombine, }, { @@ -2578,7 +2602,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = apercentileFunctionSetup, .processFunc = apercentileFunctionMerge, .finalizeFunc = apercentileFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = apercentileCombine, }, { @@ -2624,7 +2650,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = spreadFunction, .sprocessFunc = spreadScalarFunction, .finalizeFunc = spreadFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = spreadCombine, .pPartialFunc = "_spread_partial", .pMergeFunc = "_spread_merge" @@ -2639,7 +2667,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = spreadFunctionSetup, .processFunc = spreadFunction, .finalizeFunc = spreadPartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = spreadCombine, }, { @@ -2652,7 +2682,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = spreadFunctionSetup, .processFunc = spreadFunctionMerge, .finalizeFunc = spreadFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = spreadCombine, }, { @@ -2666,7 +2698,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = elapsedFunctionSetup, .processFunc = elapsedFunction, .finalizeFunc = elapsedFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = elapsedCombine, }, { @@ -2679,7 +2713,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = elapsedFunctionSetup, .processFunc = elapsedFunction, .finalizeFunc = elapsedPartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = elapsedCombine, }, { @@ -2692,7 +2728,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = elapsedFunctionSetup, .processFunc = elapsedFunctionMerge, .finalizeFunc = elapsedFinalize, + #ifdef BUILD_NO_CALL .invertFunc = NULL, + #endif .combineFunc = elapsedCombine, }, { @@ -2922,7 +2960,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = histogramFunction, .sprocessFunc = histogramScalarFunction, .finalizeFunc = histogramFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = histogramCombine, .pPartialFunc = "_histogram_partial", .pMergeFunc = "_histogram_merge", @@ -2936,7 +2976,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = histogramFunctionSetup, .processFunc = histogramFunctionPartial, .finalizeFunc = histogramPartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = histogramCombine, }, { @@ -2948,7 +2990,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = functionSetup, .processFunc = histogramFunctionMerge, .finalizeFunc = histogramFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = histogramCombine, }, { @@ -2961,7 +3005,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = hllFunction, .sprocessFunc = hllScalarFunction, .finalizeFunc = hllFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = hllCombine, .pPartialFunc = "_hyperloglog_partial", .pMergeFunc = "_hyperloglog_merge" @@ -2975,7 +3021,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = functionSetup, .processFunc = hllFunction, .finalizeFunc = hllPartialFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = hllCombine, }, { @@ -2987,7 +3035,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = functionSetup, .processFunc = hllFunctionMerge, .finalizeFunc = hllFinalize, +#ifdef BUILD_NO_CALL .invertFunc = NULL, +#endif .combineFunc = hllCombine, }, { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 8a2e118fe7..390190e8db 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -549,6 +549,7 @@ int32_t countFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +#ifdef BUILD_NO_CALL int32_t countInvertFunction(SqlFunctionCtx* pCtx) { int64_t numOfElem = getNumOfElems(pCtx); @@ -559,6 +560,7 @@ int32_t countInvertFunction(SqlFunctionCtx* pCtx) { SET_VAL(pResInfo, *((int64_t*)buf), 1); return TSDB_CODE_SUCCESS; } +#endif int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); @@ -642,6 +644,7 @@ _sum_over: return TSDB_CODE_SUCCESS; } +#ifdef BUILD_NO_CALL int32_t sumInvertFunction(SqlFunctionCtx* pCtx) { int32_t numOfElem = 0; @@ -699,6 +702,7 @@ int32_t sumInvertFunction(SqlFunctionCtx* pCtx) { SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1); return TSDB_CODE_SUCCESS; } +#endif int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); @@ -1230,6 +1234,7 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +#ifdef BUILD_NO_CALL int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) { int32_t numOfElem = 0; @@ -1294,6 +1299,7 @@ int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) { SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1); return TSDB_CODE_SUCCESS; } +#endif int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SInputColumnInfoData* pInput = &pCtx->input; @@ -1578,11 +1584,6 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return pResInfo->numOfRes; } -int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx) { - // TODO - return TSDB_CODE_SUCCESS; -} - int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); SLeastSQRInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); diff --git a/source/libs/function/src/detail/tavgfunction.c b/source/libs/function/src/detail/tavgfunction.c index e626c937da..6bcbd1c3a7 100644 --- a/source/libs/function/src/detail/tavgfunction.c +++ b/source/libs/function/src/detail/tavgfunction.c @@ -724,6 +724,7 @@ int32_t avgFunctionMerge(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +#ifdef BUILD_NO_CALL int32_t avgInvertFunction(SqlFunctionCtx* pCtx) { int32_t numOfElem = 0; @@ -786,6 +787,7 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) { SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1); return TSDB_CODE_SUCCESS; } +#endif int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 1f3fb2e943..068d1532c0 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -281,6 +281,7 @@ void fmFuncMgtDestroy() { } } +#ifdef BUILD_NO_CALL int32_t fmSetInvertFunc(int32_t funcId, SFuncExecFuncs* pFpSet) { if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) { return TSDB_CODE_FAILED; @@ -314,6 +315,7 @@ bool fmIsInvertible(int32_t funcId) { } return res; } +#endif // function has same input/output type bool fmIsSameInOutType(int32_t funcId) { diff --git a/source/libs/function/src/tfunctionInt.c b/source/libs/function/src/tfunctionInt.c index e8041d1704..80b869b2d2 100644 --- a/source/libs/function/src/tfunctionInt.c +++ b/source/libs/function/src/tfunctionInt.c @@ -27,41 +27,3 @@ #include "tpercentile.h" #include "ttszip.h" #include "tudf.h" - -void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell) { pCell->initialized = false; } - -int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock) { - int32_t maxRows = 0; - - for (int32_t j = 0; j < num; ++j) { - SResultRowEntryInfo* pResInfo = GET_RES_INFO(&pCtx[j]); - if (pResInfo != NULL && maxRows < pResInfo->numOfRes) { - maxRows = pResInfo->numOfRes; - } - } - - blockDataEnsureCapacity(pResBlock, maxRows); - for (int32_t i = 0; i < num; ++i) { - SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i); - - SResultRowEntryInfo* pResInfo = GET_RES_INFO(&pCtx[i]); - if (pResInfo->numOfRes == 0) { - for (int32_t j = 0; j < pResInfo->numOfRes; ++j) { - colDataSetVal(pCol, j, NULL, true); // TODO add set null data api - } - } else { - for (int32_t j = 0; j < pResInfo->numOfRes; ++j) { - colDataSetVal(pCol, j, GET_ROWCELL_INTERBUF(pResInfo), false); - } - } - } - - pResBlock->info.rows = maxRows; - return maxRows; -} - -bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry) { - return pEntry->complete; -} - -bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry) { return pEntry->initialized; } diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 8101b342a4..c671e7717c 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -63,8 +63,8 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) static void resetBoundingBox(MinMaxEntry *range, int32_t type) { if (IS_SIGNED_NUMERIC_TYPE(type)) { - range->i64MaxVal = INT64_MIN; - range->i64MinVal = INT64_MAX; + range->dMaxVal = INT64_MIN; + range->dMinVal = INT64_MAX; } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { range->u64MaxVal = 0; range->u64MinVal = UINT64_MAX; @@ -80,8 +80,8 @@ static int32_t setBoundingBox(MinMaxEntry *range, int16_t type, double minval, d } if (IS_SIGNED_NUMERIC_TYPE(type)) { - range->i64MinVal = (int64_t)minval; - range->i64MaxVal = (int64_t)maxval; + range->dMinVal = (int64_t)minval; + range->dMaxVal = (int64_t)maxval; } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { range->u64MinVal = (uint64_t)minval; range->u64MaxVal = (uint64_t)maxval; @@ -137,21 +137,21 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { int32_t index = -1; - if (v > pBucket->range.i64MaxVal || v < pBucket->range.i64MinVal) { + if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { return index; } // divide the value range into 1024 buckets - uint64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal; + uint64_t span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (span < pBucket->numOfSlots) { - int64_t delta = v - pBucket->range.i64MinVal; + int64_t delta = v - pBucket->range.dMinVal; index = (delta % pBucket->numOfSlots); } else { double slotSpan = ((double)span) / pBucket->numOfSlots; - uint64_t delta = v - pBucket->range.i64MinVal; + uint64_t delta = (uint64_t)(v - pBucket->range.dMinVal); - index = (int32_t)(delta / slotSpan); - if (v == pBucket->range.i64MaxVal || index == pBucket->numOfSlots) { + index = delta / slotSpan; + if (v == pBucket->range.dMaxVal || index == pBucket->numOfSlots) { index -= 1; } } @@ -318,23 +318,23 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT int64_t v = 0; GET_TYPED_DATA(v, int64_t, dataType, data); - if (r->i64MinVal > v) { - r->i64MinVal = v; + if (r->dMinVal > v) { + r->dMinVal = v; } - if (r->i64MaxVal < v) { - r->i64MaxVal = v; + if (r->dMaxVal < v) { + r->dMaxVal = v; } } else if (IS_UNSIGNED_NUMERIC_TYPE(dataType)) { uint64_t v = 0; GET_TYPED_DATA(v, uint64_t, dataType, data); - if (r->i64MinVal > v) { - r->i64MinVal = v; + if (r->u64MinVal > v) { + r->u64MinVal = v; } - if (r->i64MaxVal < v) { - r->i64MaxVal = v; + if (r->u64MaxVal < v) { + r->u64MaxVal = v; } } else if (IS_FLOAT_TYPE(dataType)) { double v = 0; @@ -438,7 +438,7 @@ static double getIdenticalDataVal(tMemBucket *pMemBucket, int32_t slotIndex) { double finalResult = 0.0; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - finalResult = (double)pSlot->range.i64MinVal; + finalResult = (double)pSlot->range.dMinVal; } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { finalResult = (double)pSlot->range.u64MinVal; } else { @@ -469,8 +469,8 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction double maxOfThisSlot = 0; double minOfNextSlot = 0; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - maxOfThisSlot = (double)pSlot->range.i64MaxVal; - minOfNextSlot = (double)next.i64MinVal; + maxOfThisSlot = (double)pSlot->range.dMaxVal; + minOfNextSlot = (double)next.dMinVal; } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { maxOfThisSlot = (double)pSlot->range.u64MaxVal; minOfNextSlot = (double)next.u64MinVal; @@ -577,7 +577,7 @@ int32_t getPercentile(tMemBucket *pMemBucket, double percent, double *result) { MinMaxEntry *pRange = &pMemBucket->range; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->i64MaxVal : pRange->i64MinVal); + *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->dMaxVal : pRange->dMinVal); } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->u64MaxVal : pRange->u64MinVal); } else { @@ -603,6 +603,6 @@ bool isIdenticalData(tMemBucket *pMemBucket, int32_t index) { if (IS_FLOAT_TYPE(pMemBucket->type)) { return fabs(pSeg->range.dMaxVal - pSeg->range.dMinVal) < DBL_EPSILON; } else { - return pSeg->range.i64MinVal == pSeg->range.i64MaxVal; + return pSeg->range.dMinVal == pSeg->range.dMaxVal; } } diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index e40c470dde..ccb75e9341 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -424,6 +424,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { COPY_SCALAR_FIELD(groupOrderScan); COPY_SCALAR_FIELD(onlyMetaCtbIdx); COPY_SCALAR_FIELD(filesetDelimited); + COPY_SCALAR_FIELD(isCountByTag); return TSDB_CODE_SUCCESS; } @@ -652,6 +653,7 @@ static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhy COPY_SCALAR_FIELD(watermark); COPY_SCALAR_FIELD(igExpired); COPY_SCALAR_FIELD(filesetDelimited); + COPY_SCALAR_FIELD(needCountEmptyTable); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index e066797100..3f75c6724e 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1843,6 +1843,7 @@ static const char* jkTableScanPhysiPlanSubtable = "Subtable"; static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid"; static const char* jkTableScanPhysiPlanIgnoreUpdate = "IgnoreUpdate"; static const char* jkTableScanPhysiPlanFilesetDelimited = "FilesetDelimited"; +static const char* jkTableScanPhysiPlanNeedCountEmptyTable = "NeedCountEmptyTable"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1914,7 +1915,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanFilesetDelimited, pNode->filesetDelimited); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanNeedCountEmptyTable, pNode->needCountEmptyTable); + } return code; } @@ -1988,7 +1991,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanFilesetDelimited, &pNode->filesetDelimited); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanNeedCountEmptyTable, &pNode->needCountEmptyTable); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index a2ea8a7e59..a4837899fd 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2170,6 +2170,9 @@ static int32_t physiTableScanNodeInlineToMsg(const void* pObj, STlvEncoder* pEnc if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeValueBool(pEncoder, pNode->filesetDelimited); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueBool(pEncoder, pNode->needCountEmptyTable); + } return code; } @@ -2251,7 +2254,9 @@ static int32_t msgToPhysiTableScanNodeInline(STlvDecoder* pDecoder, void* pObj) if (TSDB_CODE_SUCCESS == code) { code = tlvDecodeValueBool(pDecoder, &pNode->filesetDelimited); } - + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueBool(pDecoder, &pNode->needCountEmptyTable); + } return code; } diff --git a/source/libs/parser/CMakeLists.txt b/source/libs/parser/CMakeLists.txt index 4e4a4def1d..c5ee1a00c4 100644 --- a/source/libs/parser/CMakeLists.txt +++ b/source/libs/parser/CMakeLists.txt @@ -4,9 +4,6 @@ IF (TD_ENTERPRISE) LIST(APPEND PARSER_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/parserView.c) ENDIF () -IF (TD_BI_SUPPORT) - LIST(APPEND PARSER_SRC ${TD_ENTERPRISE_DIR}/src/plugins/bi/src/biRewriteQuery.c) -ENDIF () add_library(parser STATIC ${PARSER_SRC}) target_include_directories( parser diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index eb866c99aa..8e89ae1f53 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -371,6 +371,18 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* return (SNode*)val; } +static bool hasHint(SNodeList* pHintList, EHintOption hint) { + if (!pHintList) return false; + SNode* pNode; + FOREACH(pNode, pHintList) { + SHintNode* pHint = (SHintNode*)pNode; + if (pHint->option == hint) { + return true; + } + } + return false; +} + bool addHintNodeToList(SAstCreateContext* pCxt, SNodeList** ppHintList, EHintOption opt, SToken* paramList, int32_t paramNum) { void* value = NULL; @@ -384,6 +396,10 @@ bool addHintNodeToList(SAstCreateContext* pCxt, SNodeList** ppHintList, EHintOpt } case HINT_SORT_FOR_GROUP: if (paramNum > 0) return true; + if (hasHint(*ppHintList, HINT_PARTITION_FIRST)) return true; + break; + case HINT_PARTITION_FIRST: + if (paramNum > 0 || hasHint(*ppHintList, HINT_SORT_FOR_GROUP)) return true; break; default: return true; @@ -455,6 +471,14 @@ SNodeList* createHintNodeList(SAstCreateContext* pCxt, const SToken* pLiteral) { } opt = HINT_SORT_FOR_GROUP; break; + case TK_PARTITION_FIRST: + lastComma = false; + if (0 != opt || inParamList) { + quit = true; + break; + } + opt = HINT_PARTITION_FIRST; + break; case TK_NK_LP: lastComma = false; if (0 == opt || inParamList) { diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 441f4da3b1..af24624067 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -334,6 +334,13 @@ static int32_t calcConstSelectWithoutFrom(SCalcConstContext* pCxt, SSelectStmt* static int32_t calcConstSelectFrom(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) { int32_t code = calcConstFromTable(pCxt, pSelect->pFromTable); + if (TSDB_CODE_SUCCESS == code && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && + ((STempTableNode*)pSelect->pFromTable)->pSubquery != NULL && + QUERY_NODE_SELECT_STMT == nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + ((SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery)->isEmptyResult){ + pSelect->isEmptyResult = true; + return code; + } if (TSDB_CODE_SUCCESS == code) { code = calcConstProjections(pCxt, pSelect, subquery); } diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 2dbba38212..13c4431b62 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -210,7 +210,15 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 SSmlKv* kv = (SSmlKv*)data; if(kv->keyLen != strlen(pColSchema->name) || memcmp(kv->key, pColSchema->name, kv->keyLen) != 0 || kv->type != pColSchema->type){ ret = TSDB_CODE_SML_INVALID_DATA; - uInfo("SML smlBuildCol error col not same %s", pColSchema->name); + char* tmp = taosMemoryCalloc(kv->keyLen + 1, 1); + if(tmp){ + memcpy(tmp, kv->key, kv->keyLen); + uInfo("SML data(name:%s type:%s) is not same like the db data(name:%s type:%s)", + tmp, tDataTypes[kv->type].name, pColSchema->name, tDataTypes[pColSchema->type].name); + taosMemoryFree(tmp); + }else{ + uError("SML smlBuildCol out of memory"); + } goto end; } if (kv->type == TSDB_DATA_TYPE_NCHAR) { diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index 03a5317cd3..072892fe7f 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -170,6 +170,7 @@ static SKeyword keywordTable[] = { {"PAGES", TK_PAGES}, {"PAGESIZE", TK_PAGESIZE}, {"PARTITION", TK_PARTITION}, + {"PARTITION_FIRST", TK_PARTITION_FIRST}, {"PASS", TK_PASS}, {"PORT", TK_PORT}, {"PPS", TK_PPS}, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3bb24566c2..e407cc1e81 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1104,11 +1104,192 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p return DEAL_RES_CONTINUE; } -#ifndef TD_ENTERPRISE +static void biMakeAliasNameInMD5(char* pExprStr, int32_t len, char* pAlias) { + T_MD5_CTX ctx; + tMD5Init(&ctx); + tMD5Update(&ctx, pExprStr, len); + tMD5Final(&ctx); + char* p = pAlias; + for (uint8_t i = 0; i < tListLen(ctx.digest); ++i) { + sprintf(p, "%02x", ctx.digest[i]); + p += 2; + } +} + +static SNode* biMakeTbnameProjectAstNode(char* funcName, char* tableAlias) { + SValueNode* valNode = NULL; + if (tableAlias != NULL) { + SValueNode* n = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + n->literal = tstrdup(tableAlias); + n->node.resType.type = TSDB_DATA_TYPE_BINARY; + n->node.resType.bytes = strlen(n->literal); + n->isDuration = false; + n->translate = false; + valNode = n; + } + + SFunctionNode* tbNameFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION); + tstrncpy(tbNameFunc->functionName, "tbname", TSDB_FUNC_NAME_LEN); + if (valNode != NULL) { + nodesListMakeAppend(&tbNameFunc->pParameterList, (SNode*)valNode); + } + snprintf(tbNameFunc->node.userAlias, sizeof(tbNameFunc->node.userAlias), + (tableAlias)? "%s.tbname" : "%stbname", + (tableAlias)? tableAlias : ""); + strncpy(tbNameFunc->node.aliasName, tbNameFunc->functionName, TSDB_COL_NAME_LEN); + + if (funcName == NULL) { + return (SNode*)tbNameFunc; + } else { + SFunctionNode* multiResFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION); + tstrncpy(multiResFunc->functionName, funcName, TSDB_FUNC_NAME_LEN); + nodesListMakeAppend(&multiResFunc->pParameterList, (SNode*)tbNameFunc); + + if (tsKeepColumnName) { + snprintf(multiResFunc->node.userAlias, sizeof(tbNameFunc->node.userAlias), + (tableAlias)? "%s.tbname" : "%stbname", + (tableAlias)? tableAlias : ""); + strcpy(multiResFunc->node.aliasName, tbNameFunc->functionName); + } else { + snprintf(multiResFunc->node.userAlias, sizeof(multiResFunc->node.userAlias), + tableAlias? "%s(%s.tbname)" : "%s(%stbname)", funcName, + tableAlias? tableAlias: ""); + biMakeAliasNameInMD5(multiResFunc->node.userAlias, strlen(multiResFunc->node.userAlias), multiResFunc->node.aliasName); + } + + return (SNode*)multiResFunc; + } +} + +static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt* pSelect, SNode* pNode, SListCell* pSelectListCell) { + SNodeList* pTbnameNodeList = nodesMakeList(); + + SFunctionNode* pFunc = (SFunctionNode*)pNode; + if (strcasecmp(pFunc->functionName, "last") == 0 || + strcasecmp(pFunc->functionName, "last_row") == 0 || + strcasecmp(pFunc->functionName, "first") == 0) { + SNodeList* pParams = pFunc->pParameterList; + SNode* pPara = NULL; + FOREACH(pPara, pParams) { + if (nodesIsStar(pPara)) { + SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel); + size_t n = taosArrayGetSize(pTables); + for (int32_t i = 0; i < n; ++i) { + STableNode* pTable = taosArrayGetP(pTables, i); + if (nodeType(pTable) == QUERY_NODE_REAL_TABLE && ((SRealTableNode*)pTable)->pMeta != NULL && + ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { + SNode* pTbnameNode = biMakeTbnameProjectAstNode(pFunc->functionName, NULL); + nodesListAppend(pTbnameNodeList, pTbnameNode); + } + } + if (LIST_LENGTH(pTbnameNodeList) > 0) { + nodesListInsertListAfterPos(pSelect->pProjectionList, pSelectListCell, pTbnameNodeList); + } + } else if (nodesIsTableStar(pPara)) { + char* pTableAlias = ((SColumnNode*)pPara)->tableAlias; + STableNode* pTable = NULL; + int32_t code = findTable(pCxt, pTableAlias, &pTable); + if (TSDB_CODE_SUCCESS == code && nodeType(pTable) == QUERY_NODE_REAL_TABLE && + ((SRealTableNode*)pTable)->pMeta != NULL && + ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { + SNode* pTbnameNode = biMakeTbnameProjectAstNode(pFunc->functionName, pTableAlias); + nodesListAppend(pTbnameNodeList, pTbnameNode); + } + if (LIST_LENGTH(pTbnameNodeList) > 0) { + nodesListInsertListAfterPos(pSelect->pProjectionList, pSelectListCell, pTbnameNodeList); + } + } + } + } + return TSDB_CODE_SUCCESS; +} + +// after translate from +// before translate select list +int32_t biRewriteSelectStar(STranslateContext* pCxt, SSelectStmt* pSelect) { + SNode* pNode = NULL; + SNodeList* pTbnameNodeList = nodesMakeList(); + WHERE_EACH(pNode, pSelect->pProjectionList) { + if (nodesIsStar(pNode)) { + SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel); + size_t n = taosArrayGetSize(pTables); + for (int32_t i = 0; i < n; ++i) { + STableNode* pTable = taosArrayGetP(pTables, i); + if (nodeType(pTable) == QUERY_NODE_REAL_TABLE && + ((SRealTableNode*)pTable)->pMeta != NULL && + ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { + SNode* pTbnameNode = biMakeTbnameProjectAstNode(NULL, NULL); + nodesListAppend(pTbnameNodeList, pTbnameNode); + } + } + if (LIST_LENGTH(pTbnameNodeList) > 0) { + nodesListInsertListAfterPos(pSelect->pProjectionList, cell, pTbnameNodeList); + } + } else if (nodesIsTableStar(pNode)) { + char* pTableAlias = ((SColumnNode*)pNode)->tableAlias; + STableNode* pTable = NULL; + int32_t code = findTable(pCxt, pTableAlias, &pTable); + if (TSDB_CODE_SUCCESS == code && + nodeType(pTable) == QUERY_NODE_REAL_TABLE && + ((SRealTableNode*)pTable)->pMeta != NULL && + ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { + SNode* pTbnameNode = biMakeTbnameProjectAstNode(NULL, pTableAlias); + nodesListAppend(pTbnameNodeList, pTbnameNode); + } + if (LIST_LENGTH(pTbnameNodeList) > 0) { + nodesListInsertListAfterPos(pSelect->pProjectionList, cell, pTbnameNodeList); + } + } else if (nodeType(pNode) == QUERY_NODE_FUNCTION) { + biRewriteSelectFuncParamStar(pCxt, pSelect, pNode, cell); + } + WHERE_NEXT; + } + + return TSDB_CODE_SUCCESS; +} + bool biRewriteToTbnameFunc(STranslateContext* pCxt, SNode** ppNode) { + SColumnNode* pCol = (SColumnNode*)(*ppNode); + if ((strcasecmp(pCol->colName, "tbname") == 0) && + ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable && + QUERY_NODE_REAL_TABLE == nodeType(((SSelectStmt*)pCxt->pCurrStmt)->pFromTable)) { + SFunctionNode* tbnameFuncNode = NULL; + tbnameFuncNode = (SFunctionNode*)biMakeTbnameProjectAstNode(NULL, (pCol->tableAlias[0]!='\0') ? pCol->tableAlias : NULL); + tbnameFuncNode->node.resType = pCol->node.resType; + strcpy(tbnameFuncNode->node.aliasName, pCol->node.aliasName); + strcpy(tbnameFuncNode->node.userAlias, pCol->node.userAlias); + + nodesDestroyNode(*ppNode); + *ppNode = (SNode*)tbnameFuncNode; + return true; + } + return false; } -#endif + +int32_t biCheckCreateTableTbnameCol(STranslateContext* pCxt, SCreateTableStmt* pStmt) { + if (pStmt->pTags) { + SNode* pNode = NULL; + FOREACH(pNode, pStmt->pTags) { + SColumnDefNode* pTag = (SColumnDefNode*)pNode; + if (strcasecmp(pTag->colName, "tbname") == 0) { + int32_t code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, "tbname can not used for tags in BI mode"); + return code; + } + } + } + if (pStmt->pCols) { + SNode* pNode = NULL; + FOREACH(pNode, pStmt->pCols) { + SColumnDefNode* pCol = (SColumnDefNode*)pNode; + if (strcasecmp(pCol->colName, "tbname") == 0) { + int32_t code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, "tbname can not used for columns in BI mode"); + return code; + } + } + } + return TSDB_CODE_SUCCESS; +} static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { if (NULL == pCxt->pCurrStmt || @@ -1902,6 +2083,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) { if (NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt)) { SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt; pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId); + pSelect->hasCountFunc = pSelect->hasCountFunc ? true : (FUNCTION_TYPE_COUNT == pFunc->funcType); pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId); if (fmIsIndefiniteRowsFunc(pFunc->funcId)) { @@ -2976,7 +3158,11 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable) { ((SSelectStmt*)pTempTable->pSubquery)->isEmptyResult && isSelectStmt(pCxt->pCurrStmt)) { ((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true; } - + if (QUERY_NODE_SELECT_STMT == nodeType(pTempTable->pSubquery) && isSelectStmt(pCxt->pCurrStmt)) { + SSelectStmt* pSubStmt = (SSelectStmt*)pTempTable->pSubquery; + SSelectStmt* pCurrSmt = (SSelectStmt*)(pCxt->pCurrStmt); + pCurrSmt->timeLineResMode = pSubStmt->timeLineResMode; + } pTempTable->table.precision = getStmtPrecision(pTempTable->pSubquery); pTempTable->table.singleTable = stmtIsSingleTable(pTempTable->pSubquery); code = addNamespace(pCxt, pTempTable); @@ -3178,10 +3364,6 @@ static int32_t createTags(STranslateContext* pCxt, SNodeList** pOutput) { return TSDB_CODE_SUCCESS; } -#ifndef TD_ENTERPRISE -int32_t biRewriteSelectStar(STranslateContext* pCxt, SSelectStmt* pSelect) { return TSDB_CODE_SUCCESS; } -#endif - static int32_t translateStar(STranslateContext* pCxt, SSelectStmt* pSelect) { SNode* pNode = NULL; WHERE_EACH(pNode, pSelect->pProjectionList) { @@ -5743,11 +5925,6 @@ static int32_t checkTableDeleteMarkOption(STranslateContext* pCxt, STableOptions return code; } -#ifndef TD_ENTERPRISE -int32_t biCheckCreateTableTbnameCol(STranslateContext* pCxt, SCreateTableStmt* pStmt) { - return TSDB_CODE_SUCCESS; -} -#endif static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt, bool createStable) { if (NULL != strchr(pStmt->tableName, '.')) { @@ -6757,11 +6934,37 @@ static int32_t translateCreateFullTextIndex(STranslateContext* pCxt, SCreateInde } static int32_t translateCreateNormalIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { + int32_t code = 0; + SName name; + STableMeta* pMeta = NULL; + + code = getTargetMeta(pCxt, toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name), &pMeta, false); + if (code) { + taosMemoryFree(pMeta); + return code; + } + + if (LIST_LENGTH(pStmt->pCols) != 1) { + taosMemoryFree(pMeta); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_NUM, "Only one tag is allowed"); + } + + SNode* pNode = NULL; + FOREACH(pNode, pStmt->pCols) { + const SSchema* pSchema = getTagSchema(pMeta, ((SColumnNode*)pNode)->colName); + if (!pSchema) { + taosMemoryFree(pMeta); + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, ((SColumnNode*)pNode)->colName); + } + } + SCreateTagIndexReq createTagIdxReq = {0}; - int32_t code = buildCreateTagIndexReq(pCxt, pStmt, &createTagIdxReq); + code = buildCreateTagIndexReq(pCxt, pStmt, &createTagIdxReq); if (TSDB_CODE_SUCCESS == code) { code = buildCmdMsg(pCxt, TDMT_MND_CREATE_INDEX, (FSerializeFunc)tSerializeSCreateTagIdxReq, &createTagIdxReq); } +_exit: + taosMemoryFree(pMeta); return code; } @@ -7274,7 +7477,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream } } if (!found) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnDefNode*)pTag)->colName); + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, getTagNameForCreateStreamTag(pTag)); } } return TSDB_CODE_SUCCESS; @@ -7989,9 +8192,9 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void } if (TSDB_CODE_SUCCESS == code) { if (interval.interval > 0) { - pStmt->pReq->lastTs = taosTimeTruncate(lastTs, &interval); + pStmt->pReq->lastTs = taosTimeAdd(taosTimeTruncate(lastTs, &interval), interval.interval, interval.intervalUnit, interval.precision); } else { - pStmt->pReq->lastTs = lastTs; + pStmt->pReq->lastTs = lastTs + 1; // start key of the next time window } code = buildCmdMsg(&cxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, pStmt->pReq); } @@ -8791,7 +8994,7 @@ static int32_t extractCompactDbResultSchema(int32_t* numOfCols, SSchema** pSchem (*pSchema)[0].type = TSDB_DATA_TYPE_BINARY; (*pSchema)[0].bytes = COMPACT_DB_RESULT_FIELD1_LEN; - strcpy((*pSchema)[0].name, "name"); + strcpy((*pSchema)[0].name, "result"); (*pSchema)[1].type = TSDB_DATA_TYPE_INT; (*pSchema)[1].bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes; diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h index e2a4ded5a9..d4074e1373 100644 --- a/source/libs/planner/inc/planInt.h +++ b/source/libs/planner/inc/planInt.h @@ -47,6 +47,7 @@ int32_t validateQueryPlan(SPlanContext* pCxt, SQueryPlan* pPlan); bool getBatchScanOptionFromHint(SNodeList* pList); bool getSortForGroupOptHint(SNodeList* pList); +bool getOptHint(SNodeList* pList, EHintOption hint); SLogicNode* getLogicNodeRootNode(SLogicNode* pCurr); int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes); bool isPartTableAgg(SAggLogicNode* pAgg); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 2adc5b3072..12b7360165 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -486,6 +486,16 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect code = tagScanSetExecutionMode(pScan); } + bool isCountByTag = false; + if (pSelect->hasCountFunc && NULL == pSelect->pWindow) { + if (pSelect->pGroupByList) { + isCountByTag = !keysHasCol(pSelect->pGroupByList); + } else if (pSelect->pPartitionByList) { + isCountByTag = !keysHasCol(pSelect->pPartitionByList); + } + } + pScan->isCountByTag = isCountByTag; + if (TSDB_CODE_SUCCESS == code) { *pLogicNode = (SLogicNode*)pScan; } else { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index aa3181e166..730dcd9352 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2529,7 +2529,7 @@ static bool lastRowScanOptCheckFuncList(SLogicNode* pNode, bool* hasOtherFunc) { SNode* pParam = nodesListGetNode(pAggFunc->pParameterList, 0); if (QUERY_NODE_COLUMN == nodeType(pParam)) { SColumnNode* pCol = (SColumnNode*)pParam; - if (PRIMARYKEY_TIMESTAMP_COL_ID != pCol->colId) { + if (COLUMN_TYPE_COLUMN == pCol->colType && PRIMARYKEY_TIMESTAMP_COL_ID != pCol->colId) { if (selectNonPKColId != pCol->colId) { selectNonPKColId = pCol->colId; selectNonPKColNum++; @@ -3997,7 +3997,8 @@ static int32_t partitionColsOpt(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub } } return code; - } else if (pNode->node.pParent && nodeType(pNode->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) { + } else if (pNode->node.pParent && nodeType(pNode->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG && + !getOptHint(pRootNode->pHint, HINT_PARTITION_FIRST)) { // Check if we can delete partition node SAggLogicNode* pAgg = (SAggLogicNode*)pNode->node.pParent; FOREACH(node, pNode->pPartitionKeys) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 60b04bb1d1..8b9bfcfa22 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -623,6 +623,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->igCheckUpdate = pScanLogicNode->igCheckUpdate; pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false; pTableScan->filesetDelimited = pScanLogicNode->filesetDelimited; + pTableScan->needCountEmptyTable = pScanLogicNode->isCountByTag; int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c index 2da270e42d..aeef3f2487 100644 --- a/source/libs/planner/src/planUtil.c +++ b/source/libs/planner/src/planUtil.c @@ -432,6 +432,7 @@ bool getBatchScanOptionFromHint(SNodeList* pList) { } bool getSortForGroupOptHint(SNodeList* pList) { + if (!pList) return false; SNode* pNode; FOREACH(pNode, pList) { SHintNode* pHint = (SHintNode*)pNode; @@ -442,6 +443,18 @@ bool getSortForGroupOptHint(SNodeList* pList) { return false; } +bool getOptHint(SNodeList* pList, EHintOption hint) { + if (!pList) return false; + SNode* pNode; + FOREACH(pNode, pList) { + SHintNode* pHint = (SHintNode*)pNode; + if (pHint->option == hint) { + return true; + } + } + return false; +} + int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes) { int32_t code = TSDB_CODE_SUCCESS; SLogicNode* pCurr = (SLogicNode*)pNode; diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 734d1e7d17..e713043b80 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1254,6 +1254,7 @@ int32_t toCharFunction(SScalarParam* pInput, int32_t inputNum, SScalarParam* pOu char * out = taosMemoryCalloc(1, TS_FORMAT_MAX_LEN + VARSTR_HEADER_SIZE); int32_t len; SArray *formats = NULL; + int32_t code = 0; for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { if (colDataIsNull_s(pInput[1].columnData, i) || colDataIsNull_s(pInput[0].columnData, i)) { colDataSetNULL(pOutput->columnData, i); @@ -1272,14 +1273,15 @@ int32_t toCharFunction(SScalarParam* pInput, int32_t inputNum, SScalarParam* pOu } } int32_t precision = pInput[0].columnData->info.precision; - taosTs2Char(format, &formats, *(int64_t *)ts, precision, varDataVal(out), TS_FORMAT_MAX_LEN); + code = taosTs2Char(format, &formats, *(int64_t *)ts, precision, varDataVal(out), TS_FORMAT_MAX_LEN); + if (code) break; varDataSetLen(out, strlen(varDataVal(out))); colDataSetVal(pOutput->columnData, i, out, false); } if (formats) taosArrayDestroy(formats); taosMemoryFree(format); taosMemoryFree(out); - return TSDB_CODE_SUCCESS; + return code; } /** Time functions **/ diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index 9bfec5577c..b89664a6c1 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -19,10 +19,7 @@ #include "rocksdb/c.h" //#include "streamInt.h" #include "streamState.h" -#include "tcoding.h" #include "tcommon.h" -#include "tcompare.h" -#include "ttimer.h" typedef struct SCfComparator { rocksdb_comparator_t** comp; diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 0d4dbf1658..8a8904e916 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -92,6 +92,9 @@ extern int32_t streamBackendId; extern int32_t streamBackendCfWrapperId; extern int32_t taskDbWrapperId; +int32_t streamTimerInit(); +void streamTimerCleanUp(); + void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration); int32_t streamDispatchStreamBlock(SStreamTask* pTask); void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups); @@ -109,13 +112,12 @@ int32_t streamBroadcastToUpTasks(SStreamTask* pTask, const SSDataBlock* pBlock); int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq); int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId); -int32_t streamTaskBuildCheckpoint(SStreamTask* pTask); -int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId); int32_t streamSendCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet); int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t srcTaskId, int32_t index, int64_t checkpointId); int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask); int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask); +void streamTaskSetCheckpointFailedId(SStreamTask* pTask); int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask); int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, float quotaRate, const char*); STaskId streamTaskExtractKey(const SStreamTask* pTask); @@ -137,17 +139,6 @@ int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo, int32_t streamNotifyUpstreamContinue(SStreamTask* pTask); int32_t streamTransferStateToStreamTask(SStreamTask* pTask); -// <<<<<<< HEAD -// void streamClearChkptReadyMsg(SStreamTask* pTask); - -// int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, float quotaRate, const -// char*); STaskId streamTaskExtractKey(const SStreamTask* pTask); void streamTaskInitForLaunchHTask(SHistoryTaskInfo* -// pInfo); void streamTaskSetRetryInfoForLaunch(SHistoryTaskInfo* pInfo); - -// void streamMetaResetStartInfo(STaskStartInfo* pMeta); - -// ======= -// >>>>>>> 3.0 SStreamQueue* streamQueueOpen(int64_t cap); void streamQueueClose(SStreamQueue* pQueue, int32_t taskId); void streamQueueProcessSuccess(SStreamQueue* queue); diff --git a/source/libs/stream/inc/streamsm.h b/source/libs/stream/inc/streamsm.h index 7be655fbed..abdafc0240 100644 --- a/source/libs/stream/inc/streamsm.h +++ b/source/libs/stream/inc/streamsm.h @@ -23,17 +23,14 @@ extern "C" { #endif // moore finite state machine for stream task -typedef struct SStreamTaskState { - ETaskStatus state; - char* name; -} SStreamTaskState; - typedef int32_t (*__state_trans_fn)(SStreamTask*); typedef int32_t (*__state_trans_succ_fn)(SStreamTask*); typedef struct SAttachedEventInfo { ETaskStatus status; // required status that this event can be handled EStreamTaskEvent event; // the delayed handled event + void* pParam; + void* pFn; } SAttachedEventInfo; typedef struct STaskStateTrans { diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index e50e373be0..c17567d5fc 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -21,13 +21,18 @@ void* streamTimer = NULL; int32_t streamTimerInit() { streamTimer = taosTmrInit(1000, 100, 10000, "STREAM"); if (streamTimer == NULL) { + stError("init stream timer failed, code:%s", tstrerror(terrno)); return -1; } + + stInfo("init stream timer, %p", streamTimer); return 0; } void streamTimerCleanUp() { + stInfo("cleanup stream timer, %p", streamTimer); taosTmrCleanUp(streamTimer); + streamTimer = NULL; } char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) { @@ -49,7 +54,7 @@ static void streamSchedByTimer(void* param, void* tmrId) { return; } - if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__CK) { + if (streamTaskGetStatus(pTask)->state == TASK_STATUS__CK) { stDebug("s-task:%s in checkpoint procedure, not retrieve result, next:%dms", id, nextTrigger); } else { if (status == TASK_TRIGGER_STATUS__ACTIVE) { @@ -57,7 +62,7 @@ static void streamSchedByTimer(void* param, void* tmrId) { if (pTrigger == NULL) { stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory", nextTrigger); - taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer); + taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer); return; } @@ -68,7 +73,7 @@ static void streamSchedByTimer(void* param, void* tmrId) { stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory", nextTrigger); - taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer); + taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer); return; } @@ -77,7 +82,7 @@ static void streamSchedByTimer(void* param, void* tmrId) { int32_t code = streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pTrigger); if (code != TSDB_CODE_SUCCESS) { - taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer); + taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer); return; } @@ -85,17 +90,17 @@ static void streamSchedByTimer(void* param, void* tmrId) { } } - taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer); + taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer); } int32_t streamSetupScheduleTrigger(SStreamTask* pTask) { if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) { int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); - ASSERT(ref == 2 && pTask->schedInfo.pTimer == NULL); + ASSERT(ref == 2 && pTask->schedInfo.pDelayTimer == NULL); stDebug("s-task:%s setup scheduler trigger, delay:%" PRId64 " ms", pTask->id.idStr, pTask->info.triggerParam); - pTask->schedInfo.pTimer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamTimer); + pTask->schedInfo.pDelayTimer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamTimer); pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE; } @@ -198,8 +203,9 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq) } int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { - int32_t status = 0; - const char* id = pTask->id.idStr; + int32_t status = 0; + SStreamMeta* pMeta = pTask->pMeta; + const char* id = pTask->id.idStr; stDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64 ", msgId:%d", id, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen, pReq->msgId); @@ -207,7 +213,7 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId); ASSERT(pInfo != NULL); - if (pTask->pMeta->role == NODE_ROLE_FOLLOWER) { + if (pMeta->role == NODE_ROLE_FOLLOWER) { stError("s-task:%s task on follower received dispatch msgs, dispatch msg rejected", id); status = TASK_INPUT_STATUS__REFUSED; } else { @@ -228,6 +234,22 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S atomic_add_fetch_32(&pTask->upstreamInfo.numOfClosed, 1); streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId); stDebug("s-task:%s close inputQ for upstream:0x%x, msgId:%d", id, pReq->upstreamTaskId, pReq->msgId); + } else if (pReq->type == STREAM_INPUT__TRANS_STATE) { + atomic_add_fetch_32(&pTask->upstreamInfo.numOfClosed, 1); + streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId); + + // disable the related stream task here to avoid it to receive the newly arrived data after the transfer-state + STaskId* pRelTaskId = &pTask->streamTaskId; + SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pRelTaskId->streamId, pRelTaskId->taskId); + if (pStreamTask != NULL) { + atomic_add_fetch_32(&pStreamTask->upstreamInfo.numOfClosed, 1); + streamTaskCloseUpstreamInput(pStreamTask, pReq->upstreamRelTaskId); + streamMetaReleaseTask(pMeta, pStreamTask); + } + + stDebug("s-task:%s close inputQ for upstream:0x%x since trans-state msgId:%d recv, rel stream-task:0x%" PRIx64 + " close inputQ for upstream:0x%x", + id, pReq->upstreamTaskId, pReq->msgId, pTask->streamTaskId.taskId, pReq->upstreamRelTaskId); } status = streamTaskAppendInputBlocks(pTask, pReq); @@ -236,9 +258,9 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S } // disable the data from upstream tasks - if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__HALT) { - status = TASK_INPUT_STATUS__BLOCKED; - } +// if (streamTaskGetStatus(pTask)->state == TASK_STATUS__HALT) { +// status = TASK_INPUT_STATUS__BLOCKED; +// } { // do send response with the input status @@ -280,6 +302,7 @@ void streamTaskOpenAllUpstreamInput(SStreamTask* pTask) { } pTask->upstreamInfo.numOfClosed = 0; + stDebug("s-task:%s opening up inputQ from upstream tasks", pTask->id.idStr); } void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) { diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 5d2c7ec504..4b2a9e4a65 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -14,8 +14,6 @@ */ #include "streamBackendRocksdb.h" -#include "executor.h" -#include "query.h" #include "streamInt.h" #include "tcommon.h" #include "tref.h" @@ -665,6 +663,7 @@ void streamBackendHandleCleanup(void* arg) { return; } +#ifdef BUILD_NO_CALL int32_t getLatestCheckpoint(void* arg, int64_t* checkpoint) { SStreamMeta* pMeta = arg; taosWLockLatch(&pMeta->chkpDirLock); @@ -738,6 +737,7 @@ int32_t delObsoleteCheckpoint(void* arg, const char* path) { taosArrayDestroy(chkpDel); return 0; } +#endif /* * checkpointSave |--cp1--|--cp2--|--cp3--|--cp4--|--cp5--| * chkpInUse: |--cp2--|--cp4--| @@ -855,6 +855,7 @@ int32_t streamBackendLoadCheckpointInfo(void* arg) { return 0; } +#ifdef BUILD_NO_CALL int32_t chkpGetAllDbCfHandle(SStreamMeta* pMeta, rocksdb_column_family_handle_t*** ppHandle, SArray* refs) { return 0; // SArray* pHandle = taosArrayInit(16, POINTER_BYTES); @@ -891,6 +892,7 @@ int32_t chkpGetAllDbCfHandle(SStreamMeta* pMeta, rocksdb_column_family_handle_t* // *ppHandle = ppCf; // return nCf; } +#endif int32_t chkpGetAllDbCfHandle2(STaskDbWrapper* pBackend, rocksdb_column_family_handle_t*** ppHandle) { SArray* pHandle = taosArrayInit(8, POINTER_BYTES); @@ -1006,6 +1008,7 @@ int32_t taskDbBuildSnap(void* arg, SArray* pSnap) { return code; } +#ifdef BUILD_NO_CALL int32_t streamBackendAddInUseChkp(void* arg, int64_t chkpId) { // if (arg == NULL) return 0; @@ -1029,6 +1032,7 @@ int32_t streamBackendDelInUseChkp(void* arg, int64_t chkpId) { // } // taosWUnLockLatch(&pMeta->chkpDirLock); } +#endif /* 0 @@ -1053,14 +1057,14 @@ int32_t taskDbDoCheckpoint(void* arg, int64_t chkpId) { rocksdb_column_family_handle_t** ppCf = NULL; int32_t nCf = chkpGetAllDbCfHandle2(pTaskDb, &ppCf); - qDebug("stream backend:%p start to do checkpoint at:%s, cf num: %d ", pTaskDb, pChkpIdDir, nCf); + stDebug("stream backend:%p start to do checkpoint at:%s, cf num: %d ", pTaskDb, pChkpIdDir, nCf); if ((code = chkpPreFlushDb(pTaskDb->db, ppCf, nCf)) == 0) { if ((code = chkpDoDbCheckpoint(pTaskDb->db, pChkpIdDir)) != 0) { stError("stream backend:%p failed to do checkpoint at:%s", pTaskDb, pChkpIdDir); } else { - qDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pTaskDb, pChkpIdDir, - taosGetTimestampMs() - st); + stDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pTaskDb, pChkpIdDir, + taosGetTimestampMs() - st); } } else { stError("stream backend:%p failed to flush db at:%s", pTaskDb, pChkpIdDir); @@ -1099,7 +1103,9 @@ void streamBackendDelCompare(void* backend, void* arg) { taosMemoryFree(node); } } +#ifdef BUILD_NO_CALL void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); } +#endif void destroyRocksdbCfInst(RocksdbCfInst* inst) { int cfLen = sizeof(ginitDict) / sizeof(ginitDict[0]); if (inst->pHandle) { @@ -1789,7 +1795,7 @@ STaskDbWrapper* taskDbOpenImpl(char* key, char* statePath, char* dbPath) { cfNames = NULL; } - qDebug("succ to init stream backend at %s, backend:%p", dbPath, pTaskDb); + stDebug("succ to init stream backend at %s, backend:%p", dbPath, pTaskDb); return pTaskDb; _EXIT: @@ -1818,7 +1824,7 @@ void taskDbDestroy(void* pDb, bool flush) { streamMetaRemoveDB(wrapper->pMeta, wrapper->idstr); - qDebug("succ to destroy stream backend:%p", wrapper); + stDebug("succ to destroy stream backend:%p", wrapper); int8_t nCf = sizeof(ginitDict) / sizeof(ginitDict[0]); @@ -2170,6 +2176,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t taosMemoryFree(cfOpts); return 0; } +#ifdef BUILD_NO_CALL int streamStateOpenBackend(void* backend, SStreamState* pState) { taosAcquireRef(streamBackendId, pState->streamBackendRid); SBackendWrapper* handle = backend; @@ -2279,6 +2286,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) { wrapper->remove |= remove; // update by other pState taosReleaseRef(streamBackendCfWrapperId, pState->pTdbState->backendCfWrapperId); } +#endif void streamStateDestroyCompar(void* arg) { SCfComparator* comp = (SCfComparator*)arg; for (int i = 0; i < comp->numOfComp; i++) { @@ -2297,12 +2305,14 @@ int streamStateGetCfIdx(SStreamState* pState, const char* funcName) { } } if (pState != NULL && idx != -1) { - STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; - rocksdb_column_family_handle_t* cf = NULL; + STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; + if (wrapper == NULL) { + return -1; + } taosThreadMutexLock(&wrapper->mutex); - cf = wrapper->pCf[idx]; + rocksdb_column_family_handle_t* cf = wrapper->pCf[idx]; if (cf == NULL) { char* err = NULL; cf = rocksdb_create_column_family(wrapper->db, wrapper->pCfOpts[idx], ginitDict[idx].key, &err); @@ -2311,7 +2321,7 @@ int streamStateGetCfIdx(SStreamState* pState, const char* funcName) { stError("failed to open cf, %p %s_%s, reason:%s", pState, wrapper->idstr, funcName, err); taosMemoryFree(err); } else { - qDebug("succ to open cf, %p %s_%s", pState, wrapper->idstr, funcName); + stDebug("succ to open cf, %p %s_%s", pState, wrapper->idstr, funcName); wrapper->pCf[idx] = cf; } } @@ -2353,14 +2363,14 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe char* err = NULL; \ int i = streamStateGetCfIdx(pState, funcname); \ if (i < 0) { \ - qWarn("streamState failed to get cf name: %s", funcname); \ + stWarn("streamState failed to get cf name: %s", funcname); \ code = -1; \ break; \ } \ STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; \ wrapper->dataWritten += 1; \ char toString[128] = {0}; \ - if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ + if (stDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pCf)[ginitDict[i].idx]; \ rocksdb_writeoptions_t* opts = wrapper->writeOpt; \ @@ -2373,54 +2383,54 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe taosMemoryFree(err); \ code = -1; \ } else { \ - qTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d, %p", toString, funcname, vLen, \ - ttlVLen, wrapper); \ + stTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d, %p", toString, funcname, vLen, \ + ttlVLen, wrapper); \ } \ taosMemoryFree(ttlV); \ } while (0); -#define STREAM_STATE_GET_ROCKSDB(pState, funcname, key, pVal, vLen) \ - do { \ - code = 0; \ - char buf[128] = {0}; \ - char* err = NULL; \ - int i = streamStateGetCfIdx(pState, funcname); \ - if (i < 0) { \ - qWarn("streamState failed to get cf name: %s", funcname); \ - code = -1; \ - break; \ - } \ - STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; \ - char toString[128] = {0}; \ - if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ - int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ - rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pCf)[ginitDict[i].idx]; \ - rocksdb_t* db = wrapper->db; \ - rocksdb_readoptions_t* opts = wrapper->readOpt; \ - size_t len = 0; \ - char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \ - if (val == NULL || len == 0) { \ - if (err == NULL) { \ - qTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, wrapper->idstr, funcname); \ - } else { \ - stError("streamState str: %s failed to read from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \ - taosMemoryFreeClear(err); \ - } \ - code = -1; \ - } else { \ - char* p = NULL; \ - int32_t tlen = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \ - if (tlen <= 0) { \ - stError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, wrapper->idstr, \ - funcname); \ - code = -1; \ - } else { \ - qTrace("streamState str: %s succ to read from %s_%s, valLen:%d, %p", toString, wrapper->idstr, funcname, tlen, \ - wrapper); \ - } \ - taosMemoryFree(val); \ - if (vLen != NULL) *vLen = tlen; \ - } \ +#define STREAM_STATE_GET_ROCKSDB(pState, funcname, key, pVal, vLen) \ + do { \ + code = 0; \ + char buf[128] = {0}; \ + char* err = NULL; \ + int i = streamStateGetCfIdx(pState, funcname); \ + if (i < 0) { \ + stWarn("streamState failed to get cf name: %s", funcname); \ + code = -1; \ + break; \ + } \ + STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; \ + char toString[128] = {0}; \ + if (stDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ + int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ + rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pCf)[ginitDict[i].idx]; \ + rocksdb_t* db = wrapper->db; \ + rocksdb_readoptions_t* opts = wrapper->readOpt; \ + size_t len = 0; \ + char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \ + if (val == NULL || len == 0) { \ + if (err == NULL) { \ + stTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, wrapper->idstr, funcname); \ + } else { \ + stError("streamState str: %s failed to read from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \ + taosMemoryFreeClear(err); \ + } \ + code = -1; \ + } else { \ + char* p = NULL; \ + int32_t tlen = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \ + if (tlen <= 0) { \ + stError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, wrapper->idstr, \ + funcname); \ + code = -1; \ + } else { \ + stTrace("streamState str: %s succ to read from %s_%s, valLen:%d, %p", toString, wrapper->idstr, funcname, \ + tlen, wrapper); \ + } \ + taosMemoryFree(val); \ + if (vLen != NULL) *vLen = tlen; \ + } \ } while (0); #define STREAM_STATE_DEL_ROCKSDB(pState, funcname, key) \ @@ -2430,14 +2440,14 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe char* err = NULL; \ int i = streamStateGetCfIdx(pState, funcname); \ if (i < 0) { \ - qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \ + stWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \ code = -1; \ break; \ } \ STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; \ wrapper->dataWritten += 1; \ char toString[128] = {0}; \ - if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ + if (stDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \ int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pCf)[ginitDict[i].idx]; \ rocksdb_t* db = wrapper->db; \ @@ -2448,7 +2458,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe taosMemoryFree(err); \ code = -1; \ } else { \ - qTrace("streamState str: %s succ to del from %s_%s", toString, wrapper->idstr, funcname); \ + stTrace("streamState str: %s succ to del from %s_%s", toString, wrapper->idstr, funcname); \ } \ } while (0); @@ -2667,9 +2677,9 @@ SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState) { STREAM_STATE_DEL_ROCKSDB(pState, "state", &maxStateKey); return pCur; } - +#ifdef BUILD_NO_CALL SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* key) { - qDebug("streamStateGetCur_rocksdb"); + stDebug("streamStateGetCur_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; SStreamStateCur* pCur = createStreamStateCursor(); @@ -2717,6 +2727,7 @@ int32_t streamStateFuncDel_rocksdb(SStreamState* pState, const STupleKey* key) { STREAM_STATE_DEL_ROCKSDB(pState, "func", key); return 0; } +#endif // session cf int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) { @@ -2762,7 +2773,7 @@ int32_t streamStateSessionDel_rocksdb(SStreamState* pState, const SSessionKey* k } SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState) { - qDebug("streamStateSessionSeekToLast_rocksdb"); + stDebug("streamStateSessionSeekToLast_rocksdb"); int32_t code = 0; @@ -2799,7 +2810,7 @@ SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState) { } int32_t streamStateSessionCurPrev_rocksdb(SStreamStateCur* pCur) { - qDebug("streamStateCurPrev_rocksdb"); + stDebug("streamStateCurPrev_rocksdb"); if (!pCur) return -1; rocksdb_iter_prev(pCur->iter); @@ -2849,7 +2860,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentPrev_rocksdb(SStreamState* pSta return pCur; } SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pState, SSessionKey* key) { - qDebug("streamStateSessionSeekKeyCurrentNext_rocksdb"); + stDebug("streamStateSessionSeekKeyCurrentNext_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; SStreamStateCur* pCur = createStreamStateCursor(); if (pCur == NULL) { @@ -2887,7 +2898,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pSta } SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, const SSessionKey* key) { - qDebug("streamStateSessionSeekKeyNext_rocksdb"); + stDebug("streamStateSessionSeekKeyNext_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; SStreamStateCur* pCur = createStreamStateCursor(); if (pCur == NULL) { @@ -2991,7 +3002,7 @@ int32_t streamStateFillDel_rocksdb(SStreamState* pState, const SWinKey* key) { } SStreamStateCur* streamStateFillGetCur_rocksdb(SStreamState* pState, const SWinKey* key) { - qDebug("streamStateFillGetCur_rocksdb"); + stDebug("streamStateFillGetCur_rocksdb"); SStreamStateCur* pCur = createStreamStateCursor(); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; @@ -3051,7 +3062,7 @@ int32_t streamStateFillGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, } SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key) { - qDebug("streamStateFillSeekKeyNext_rocksdb"); + stDebug("streamStateFillSeekKeyNext_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; SStreamStateCur* pCur = createStreamStateCursor(); if (!pCur) { @@ -3089,7 +3100,7 @@ SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const return NULL; } SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key) { - qDebug("streamStateFillSeekKeyPrev_rocksdb"); + stDebug("streamStateFillSeekKeyPrev_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; SStreamStateCur* pCur = createStreamStateCursor(); if (pCur == NULL) { @@ -3126,6 +3137,7 @@ SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const streamStateFreeCur(pCur); return NULL; } +#ifdef BUILD_NO_CALL int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey) { stDebug("streamStateSessionGetKeyByRange_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; @@ -3183,6 +3195,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes streamStateFreeCur(pCur); return -1; } +#endif int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen) { @@ -3315,6 +3328,7 @@ _end: return res; } +#ifdef BUILD_NO_CALL // partag cf int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen) { int code = 0; @@ -3327,6 +3341,7 @@ int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void STREAM_STATE_GET_ROCKSDB(pState, "partag", &groupId, tagVal, tagLen); return code; } +#endif // parname cfg int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) { int code = 0; @@ -3340,11 +3355,13 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi return code; } +#ifdef BUILD_NO_CALL int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) { int code = 0; STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen); return code; } +#endif int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen) { int code = 0; STREAM_STATE_GET_ROCKSDB(pState, "default", key, pVal, pVLen); @@ -3398,6 +3415,7 @@ int32_t streamDefaultIterGet_rocksdb(SStreamState* pState, const void* start, co rocksdb_iter_destroy(pIter); return code; } +#ifdef BUILD_NO_CALL void* streamDefaultIterCreate_rocksdb(SStreamState* pState) { SStreamStateCur* pCur = createStreamStateCursor(); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; @@ -3441,6 +3459,7 @@ char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len) { return ret; } +#endif // batch func void* streamStateCreateBatch() { rocksdb_writebatch_t* pBatch = rocksdb_writebatch_create(); @@ -3794,11 +3813,12 @@ void dbChkpDestroy(SDbChkp* pChkp) { taosMemoryFree(pChkp->pManifest); taosMemoryFree(pChkp); } - +#ifdef BUILD_NO_CALL int32_t dbChkpInit(SDbChkp* p) { if (p == NULL) return 0; return 0; } +#endif int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { taosThreadRwlockRdlock(&p->rwLock); int32_t code = -1; @@ -3943,6 +3963,7 @@ int32_t bkdMgtGetDelta(SBkdMgt* bm, char* taskId, int64_t chkpId, SArray* list, return code; } +#ifdef BUILD_NO_CALL int32_t bkdMgtAddChkp(SBkdMgt* bm, char* task, char* path) { int32_t code = -1; @@ -3972,4 +3993,5 @@ int32_t bkdMgtDumpTo(SBkdMgt* bm, char* taskId, char* dname) { taosThreadRwlockUnlock(&bm->rwLock); return code; -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index cf0682f037..cb3f7a3504 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -25,6 +25,7 @@ typedef struct { SStreamTask* pTask; } SAsyncUploadArg; + int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; @@ -34,6 +35,7 @@ int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckp if (tEncodeSEpSet(pEncoder, &pReq->mgmtEps) < 0) return -1; if (tEncodeI32(pEncoder, pReq->mnodeId) < 0) return -1; if (tEncodeI64(pEncoder, pReq->expireTime) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->transId) < 0) return -1; tEndEncode(pEncoder); return pEncoder->pos; } @@ -47,6 +49,7 @@ int32_t tDecodeStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSo if (tDecodeSEpSet(pDecoder, &pReq->mgmtEps) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->mnodeId) < 0) return -1; if (tDecodeI64(pDecoder, &pReq->expireTime) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->transId) < 0) return -1; tEndDecode(pDecoder); return 0; } @@ -111,6 +114,7 @@ static int32_t streamAlignCheckpoint(SStreamTask* pTask) { return atomic_sub_fetch_32(&pTask->chkInfo.downstreamAlignNum, 1); } +// todo handle down the transId of checkpoint to sink/agg tasks. static int32_t appendCheckpointIntoInputQ(SStreamTask* pTask, int32_t checkpointType) { SStreamDataBlock* pChkpoint = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, sizeof(SSDataBlock)); if (pChkpoint == NULL) { @@ -149,6 +153,7 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo // 1. set task status to be prepared for check point, no data are allowed to put into inputQ. streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); + pTask->chkInfo.transId = pReq->transId; pTask->chkInfo.checkpointingId = pReq->checkpointId; pTask->chkInfo.checkpointNotReadyTasks = streamTaskGetNumOfDownstream(pTask); pTask->chkInfo.startTs = taosGetTimestampMs(); @@ -182,7 +187,7 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc int32_t code = TSDB_CODE_SUCCESS; // set task status - if (streamTaskGetStatus(pTask, NULL) != TASK_STATUS__CK) { + if (streamTaskGetStatus(pTask)->state != TASK_STATUS__CK) { pTask->chkInfo.checkpointingId = checkpointId; code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); if (code != TSDB_CODE_SUCCESS) { @@ -273,8 +278,9 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) { pTask->chkInfo.failedId = 0; pTask->chkInfo.startTs = 0; // clear the recorded start time pTask->chkInfo.checkpointNotReadyTasks = 0; - // pTask->chkInfo.checkpointAlignCnt = 0; + pTask->chkInfo.transId = 0; pTask->chkInfo.dispatchCheckpointTrigger = false; + streamTaskOpenAllUpstreamInput(pTask); // open inputQ for all upstream tasks if (clearChkpReadyMsg) { streamClearChkptReadyMsg(pTask); @@ -282,44 +288,49 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) { } int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) { - SStreamMeta* pMeta = p->pMeta; - int32_t vgId = pMeta->vgId; - const char* id = p->id.idStr; - int32_t code = 0; + SStreamMeta* pMeta = p->pMeta; + int32_t vgId = pMeta->vgId; + const char* id = p->id.idStr; + int32_t code = 0; + SCheckpointInfo* pCKInfo = &p->chkInfo; - if (p->info.fillHistory == 1) { - return code; - } - - if (p->info.taskLevel > TASK_LEVEL__SINK) { + // fill-history task, rsma task, and sink task will not generate the checkpoint + if ((p->info.fillHistory == 1) || (p->info.taskLevel > TASK_LEVEL__SINK)) { return code; } taosThreadMutexLock(&p->lock); - ASSERT(p->chkInfo.checkpointId <= p->chkInfo.checkpointingId && p->chkInfo.checkpointingId == checkpointId && - p->chkInfo.checkpointVer <= p->chkInfo.processedVer); - p->chkInfo.checkpointId = p->chkInfo.checkpointingId; - p->chkInfo.checkpointVer = p->chkInfo.processedVer; + SStreamTaskState* pStatus = streamTaskGetStatus(p); + if (pStatus->state == TASK_STATUS__CK) { + ASSERT(pCKInfo->checkpointId <= pCKInfo->checkpointingId && pCKInfo->checkpointingId == checkpointId && + pCKInfo->checkpointVer <= pCKInfo->processedVer); - streamTaskClearCheckInfo(p, false); - char* str = NULL; - streamTaskGetStatus(p, &str); + pCKInfo->checkpointId = pCKInfo->checkpointingId; + pCKInfo->checkpointVer = pCKInfo->processedVer; - code = streamTaskHandleEvent(p->status.pSM, TASK_EVENT_CHECKPOINT_DONE); - taosThreadMutexUnlock(&p->lock); + streamTaskClearCheckInfo(p, false); + code = streamTaskHandleEvent(p->status.pSM, TASK_EVENT_CHECKPOINT_DONE); + taosThreadMutexUnlock(&p->lock); + } else { + stDebug("s-task:%s vgId:%d status:%s not keep the checkpoint metaInfo, checkpoint:%" PRId64 " failed", id, vgId, + pStatus->name, pCKInfo->checkpointingId); + taosThreadMutexUnlock(&p->lock); + + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } if (code != TSDB_CODE_SUCCESS) { stDebug("s-task:%s vgId:%d handle event:checkpoint-done failed", id, vgId); - return -1; + return code; } stDebug("vgId:%d s-task:%s level:%d open upstream inputQ, save status after checkpoint, checkpointId:%" PRId64 ", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status: normal, prev:%s", - vgId, id, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.nextProcessVer, str); + vgId, id, p->info.taskLevel, checkpointId, pCKInfo->checkpointVer, pCKInfo->nextProcessVer, pStatus->name); // save the task if not sink task - if (p->info.taskLevel != TASK_LEVEL__SINK) { + if (p->info.taskLevel < TASK_LEVEL__SINK) { streamMetaWLock(pMeta); code = streamMetaSaveTask(pMeta, p); @@ -338,12 +349,12 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) { streamMetaWUnLock(pMeta); } + return code; } -void streamTaskSetFailedId(SStreamTask* pTask) { +void streamTaskSetCheckpointFailedId(SStreamTask* pTask) { pTask->chkInfo.failedId = pTask->chkInfo.checkpointingId; - pTask->chkInfo.checkpointId = pTask->chkInfo.checkpointingId; } int32_t getChkpMeta(char* id, char* path, SArray* list) { @@ -437,16 +448,17 @@ int32_t streamTaskUploadChkp(SStreamTask* pTask, int64_t chkpId, char* taskId) { return streamMetaAsyncExec(pTask->pMeta, doUploadChkp, arg, NULL); } int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) { - int32_t code = TSDB_CODE_SUCCESS; - int64_t startTs = pTask->chkInfo.startTs; - int64_t ckId = pTask->chkInfo.checkpointingId; + int32_t code = TSDB_CODE_SUCCESS; + int64_t startTs = pTask->chkInfo.startTs; + int64_t ckId = pTask->chkInfo.checkpointingId; + const char* id = pTask->id.idStr; // sink task do not need to save the status, and generated the checkpoint if (pTask->info.taskLevel != TASK_LEVEL__SINK) { - stDebug("s-task:%s level:%d start gen checkpoint", pTask->id.idStr, pTask->info.taskLevel); + stDebug("s-task:%s level:%d start gen checkpoint", id, pTask->info.taskLevel); code = streamBackendDoCheckpoint(pTask->pBackend, ckId); if (code != TSDB_CODE_SUCCESS) { - stError("s-task:%s gen checkpoint:%" PRId64 " failed, code:%s", pTask->id.idStr, ckId, tstrerror(terrno)); + stError("s-task:%s gen checkpoint:%" PRId64 " failed, code:%s", id, ckId, tstrerror(terrno)); } } @@ -460,39 +472,38 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) { if (code != TSDB_CODE_SUCCESS) { // todo: let's retry send rsp to upstream/mnode - stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr, - ckId, tstrerror(code)); + stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", id, ckId, + tstrerror(code)); } } // clear the checkpoint info, and commit the newest checkpoint info if all works are done successfully if (code == TSDB_CODE_SUCCESS) { code = streamSaveTaskCheckpointInfo(pTask, ckId); - if (code != TSDB_CODE_SUCCESS) { - stError("s-task:%s commit taskInfo failed, checkpoint:%" PRId64 " failed, code:%s", pTask->id.idStr, ckId, - tstrerror(terrno)); - } else { - code = streamTaskUploadChkp(pTask, ckId, (char*)pTask->id.idStr); - if (code != 0) { - stError("s-task:%s failed to upload checkpoint:%" PRId64 " failed", pTask->id.idStr, ckId); + if (code == TSDB_CODE_SUCCESS) { + code = streamTaskUploadChkp(pTask, ckId, (char*)id); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s failed to upload checkpoint:%" PRId64 " failed", id, ckId); } + } else { + stError("s-task:%s commit taskInfo failed, checkpoint:%" PRId64 " failed, code:%s", id, ckId, tstrerror(code)); } } - if (code != TSDB_CODE_SUCCESS) { // clear the checkpoint info if failed + // clear the checkpoint info if failed + if (code != TSDB_CODE_SUCCESS) { taosThreadMutexLock(&pTask->lock); streamTaskClearCheckInfo(pTask, false); code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE); taosThreadMutexUnlock(&pTask->lock); - streamTaskSetFailedId(pTask); - stDebug("s-task:%s clear checkpoint flag since gen checkpoint failed, checkpointId:%" PRId64, pTask->id.idStr, - ckId); + streamTaskSetCheckpointFailedId(pTask); + stDebug("s-task:%s clear checkpoint flag since gen checkpoint failed, checkpointId:%" PRId64, id, ckId); } double el = (taosGetTimestampMs() - startTs) / 1000.0; - stInfo("s-task:%s vgId:%d level:%d, checkpointId:%" PRId64 " ver:%" PRId64 " elapsed time:%.2f Sec, %s ", - pTask->id.idStr, pTask->pMeta->vgId, pTask->info.taskLevel, ckId, pTask->chkInfo.checkpointVer, el, + stInfo("s-task:%s vgId:%d level:%d, checkpointId:%" PRId64 " ver:%" PRId64 " elapsed time:%.2f Sec, %s ", id, + pTask->pMeta->vgId, pTask->info.taskLevel, ckId, pTask->chkInfo.checkpointVer, el, (code == TSDB_CODE_SUCCESS) ? "succ" : "failed"); return code; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 1e037ee879..09b00e3712 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -58,6 +58,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->upstreamRelTaskId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->blockNum) < 0) return -1; if (tEncodeI64(pEncoder, pReq->totalLen) < 0) return -1; ASSERT(taosArrayGetSize(pReq->data) == pReq->blockNum); @@ -84,6 +85,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->upstreamRelTaskId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->blockNum) < 0) return -1; if (tDecodeI64(pDecoder, &pReq->totalLen) < 0) return -1; @@ -114,6 +116,7 @@ static int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTas pReq->upstreamTaskId = pTask->id.taskId; pReq->upstreamChildId = pTask->info.selfChildId; pReq->upstreamNodeId = pTask->info.nodeId; + pReq->upstreamRelTaskId = pTask->streamTaskId.taskId; pReq->blockNum = numOfBlocks; pReq->taskId = dstTaskId; pReq->type = type; @@ -374,6 +377,7 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD return code; } + // it's a new vnode to receive dispatch msg, so add one if (pReqs[j].blockNum == 0) { atomic_add_fetch_32(&pTask->outputInfo.shuffleDispatcher.waitingRspCnt, 1); } @@ -394,8 +398,15 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD pTask->msgInfo.pData = pReqs; } - stDebug("s-task:%s build dispatch msg success, msgId:%d, stage:%" PRId64, pTask->id.idStr, pTask->execInfo.dispatch, - pTask->pMeta->stage); + if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { + stDebug("s-task:%s build dispatch msg success, msgId:%d, stage:%" PRId64 " %p", pTask->id.idStr, + pTask->execInfo.dispatch, pTask->pMeta->stage, pTask->msgInfo.pData); + } else { + stDebug("s-task:%s build dispatch msg success, msgId:%d, stage:%" PRId64 " dstVgNum:%d %p", pTask->id.idStr, + pTask->execInfo.dispatch, pTask->pMeta->stage, pTask->outputInfo.shuffleDispatcher.waitingRspCnt, + pTask->msgInfo.pData); + } + return code; } @@ -417,9 +428,11 @@ static int32_t sendDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pDispatch SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos; int32_t numOfVgroups = taosArrayGetSize(vgInfo); - stDebug("s-task:%s (child taskId:%d) start to shuffle-dispatch blocks to %d vgroup(s), msgId:%d", id, - pTask->info.selfChildId, numOfVgroups, msgId); + int32_t actualVgroups = pTask->outputInfo.shuffleDispatcher.waitingRspCnt; + stDebug("s-task:%s (child taskId:%d) start to shuffle-dispatch blocks to %d/%d vgroup(s), msgId:%d", id, + pTask->info.selfChildId, actualVgroups, numOfVgroups, msgId); + int32_t numOfSend = 0; for (int32_t i = 0; i < numOfVgroups; i++) { if (pDispatchMsg[i].blockNum > 0) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); @@ -430,6 +443,11 @@ static int32_t sendDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pDispatch if (code < 0) { break; } + + // no need to try remain, all already send. + if (++numOfSend == actualVgroups) { + break; + } } } @@ -703,10 +721,9 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) { int32_t numOfVgs = taosArrayGetSize(vgInfo); pTask->notReadyTasks = numOfVgs; - char* p = NULL; - streamTaskGetStatus(pTask, &p); + SStreamTaskState* pState = streamTaskGetStatus(pTask); stDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s", - pTask->id.idStr, numOfVgs, p); + pTask->id.idStr, numOfVgs, pState->name); for (int32_t i = 0; i < numOfVgs; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); req.downstreamTaskId = pVgInfo->taskId; @@ -753,7 +770,7 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) { taosArrayClear(pTask->pReadyMsgList); stDebug("s-task:%s level:%d source checkpoint completed msg sent to mnode", pTask->id.idStr, pTask->info.taskLevel); } else { - stDebug("s-task:%s level:%d already send rsp to mnode", pTask->id.idStr, pTask->info.taskLevel); + stDebug("s-task:%s level:%d already send rsp checkpoint success to mnode", pTask->id.idStr, pTask->info.taskLevel); } taosThreadMutexUnlock(&pTask->lock); @@ -826,9 +843,9 @@ int32_t doDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHist initRpcMsg(&msg, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, buf, tlen + sizeof(SMsgHead)); tmsgSendReq(pEpSet, &msg); - char* p = NULL; - streamTaskGetStatus(pTask, &p); - stDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, p, + + SStreamTaskState* pState = streamTaskGetStatus(pTask); + stDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, pState->name, pReq->downstreamTaskId, vgId); return 0; } @@ -1103,6 +1120,7 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) { // this message has been sent successfully, let's try next one. static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId) { + stDebug("s-task:%s destroy dispatch msg:%p", pTask->id.idStr, pTask->msgInfo.pData); destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask)); bool delayDispatch = (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index f7e4b7856d..a35493a5c4 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -24,12 +24,12 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask); bool streamTaskShouldStop(const SStreamTask* pTask) { - ETaskStatus s = streamTaskGetStatus(pTask, NULL); - return (s == TASK_STATUS__STOP) || (s == TASK_STATUS__DROPPING); + SStreamTaskState* pState = streamTaskGetStatus(pTask); + return (pState->state == TASK_STATUS__STOP) || (pState->state == TASK_STATUS__DROPPING); } bool streamTaskShouldPause(const SStreamTask* pTask) { - return (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__PAUSE); + return (streamTaskGetStatus(pTask)->state == TASK_STATUS__PAUSE); } static int32_t doOutputResultBlockImpl(SStreamTask* pTask, SStreamDataBlock* pBlock) { @@ -107,12 +107,6 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i return 0; } - if (pTask->inputq.status == TASK_INPUT_STATUS__BLOCKED) { - stWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry exec task", pTask->id.idStr); - taosMsleep(1000); - continue; - } - SSDataBlock* output = NULL; uint64_t ts = 0; if ((code = qExecTask(pExecutor, &output, &ts)) < 0) { @@ -344,11 +338,14 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { streamMetaWUnLock(pMeta); return TSDB_CODE_STREAM_TASK_NOT_EXIST; } else { - stDebug("s-task:%s fill-history task end, update related stream task:%s info, transfer exec state", id, - pStreamTask->id.idStr); + double el = (taosGetTimestampMs() - pTask->execInfo.step2Start) / 1000.; + stDebug( + "s-task:%s fill-history task end, scal wal elapsed time:%.2fSec,update related stream task:%s info, transfer " + "exec state", + id, el, pStreamTask->id.idStr); } - ETaskStatus status = streamTaskGetStatus(pStreamTask, NULL); + ETaskStatus status = streamTaskGetStatus(pStreamTask)->state; STimeWindow* pTimeWindow = &pStreamTask->dataRange.window; // It must be halted for a source stream task, since when the related scan-history-data task start scan the history @@ -356,9 +353,16 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) { ASSERT(status == TASK_STATUS__HALT || status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP); } else { - ASSERT(status == TASK_STATUS__READY|| status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP); - streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT); - stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, id); + ASSERT(status == TASK_STATUS__READY || status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP); + int32_t code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s halt stream task:%s failed, code:%s not transfer state to stream task", id, + pStreamTask->id.idStr, tstrerror(code)); + streamMetaReleaseTask(pMeta, pStreamTask); + return code; + } else { + stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, id); + } } // wait for the stream task to handle all in the inputQ, and to be idle @@ -367,8 +371,9 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { // In case of sink tasks, no need to halt them. // In case of source tasks and agg tasks, we should HALT them, and wait for them to be idle. And then, it's safe to // start the task state transfer procedure. - char* p = NULL; - status = streamTaskGetStatus(pStreamTask, &p); + SStreamTaskState* pState = streamTaskGetStatus(pStreamTask); + status = pState->state; + char* p = pState->name; if (status == TASK_STATUS__STOP || status == TASK_STATUS__DROPPING) { stError("s-task:%s failed to transfer state from fill-history task:%s, status:%s", id, pStreamTask->id.idStr, p); streamMetaReleaseTask(pMeta, pStreamTask); @@ -386,8 +391,12 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { } // 1. expand the query time window for stream task of WAL scanner - pTimeWindow->skey = INT64_MIN; - qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor); + if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) { + pTimeWindow->skey = INT64_MIN; + qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor); + } else { + stDebug("s-task:%s non-source task no need to reset filter window", pStreamTask->id.idStr); + } // 2. transfer the ownership of executor state streamTaskReleaseState(pTask); @@ -401,10 +410,13 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { // 4. free it and remove fill-history task from disk meta-store streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id); - // 5. save to disk - pStreamTask->status.taskStatus = streamTaskGetStatus(pStreamTask, NULL); + // 5. assign the status to the value that will be kept in disk + pStreamTask->status.taskStatus = streamTaskGetStatus(pStreamTask)->state; - // 6. add empty delete block + // 6. open the inputQ for all upstream tasks + streamTaskOpenAllUpstreamInput(pStreamTask); + + // 7. add empty delete block if ((pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) && taosQueueEmpty(pStreamTask->inputq.queue->pQueue)) { SStreamRefDataBlock* pItem = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0); @@ -424,6 +436,8 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { int32_t code = TSDB_CODE_SUCCESS; + SStreamMeta* pMeta = pTask->pMeta; + ASSERT(pTask->status.appendTranstateBlock == 1); int32_t level = pTask->info.taskLevel; @@ -433,8 +447,14 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states. code = streamDoTransferStateToStreamTask(pTask); - } else { // drop fill-history task - streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pTask->pMeta->vgId, &pTask->id); + } else { // drop fill-history task and open inputQ of sink task + SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId); + if (pStreamTask != NULL) { + streamTaskOpenAllUpstreamInput(pStreamTask); + streamMetaReleaseTask(pMeta, pStreamTask); + } + + streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id); } return code; @@ -490,16 +510,17 @@ static void doSetStreamInputBlock(SStreamTask* pTask, const void* pInput, int64_ } } -int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) { +int32_t streamProcessTransstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) { const char* id = pTask->id.idStr; int32_t code = TSDB_CODE_SUCCESS; + int32_t level = pTask->info.taskLevel; - int32_t level = pTask->info.taskLevel; if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SINK) { int32_t remain = streamAlignTransferState(pTask); + if (remain > 0) { streamFreeQitem((SStreamQueueItem*)pBlock); - stDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain); + stDebug("s-task:%s receive upstream trans-state msg, not sent remain:%d", id, remain); return 0; } } @@ -530,7 +551,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock } } else { // non-dispatch task, do task state transfer directly streamFreeQitem((SStreamQueueItem*)pBlock); - stDebug("s-task:%s non-dispatch task, level:%d start to transfer state directly", id, pTask->info.taskLevel); + stDebug("s-task:%s non-dispatch task, level:%d start to transfer state directly", id, level); ASSERT(pTask->info.fillHistory == 1); code = streamTransferStateToStreamTask(pTask); @@ -542,6 +563,10 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock return code; } +static void setTaskSchedInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; } +static void clearTaskSchedInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; } +static void setLastExecTs(SStreamTask* pTask, int64_t ts) { pTask->status.lastExecTs = ts; } + /** * todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the * appropriate batch of blocks should be handled in 5 to 10 sec. @@ -556,9 +581,28 @@ int32_t doStreamExecTask(SStreamTask* pTask) { int32_t blockSize = 0; int32_t numOfBlocks = 0; SStreamQueueItem* pInput = NULL; - if (streamTaskShouldStop(pTask) || (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__UNINIT)) { + + if (streamTaskShouldStop(pTask) || (streamTaskGetStatus(pTask)->state == TASK_STATUS__UNINIT)) { stDebug("s-task:%s stream task is stopped", id); - break; + return 0; + } + + if (streamQueueIsFull(pTask->outputq.queue)) { + stWarn("s-task:%s outputQ is full, idle for 500ms and retry", id); + setTaskSchedInfo(pTask, 500); + return 0; + } + + if (pTask->inputq.status == TASK_INPUT_STATUS__BLOCKED) { + stWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry", id); + setTaskSchedInfo(pTask, 1000); + return 0; + } + + if (taosGetTimestampMs() - pTask->status.lastExecTs < 50) { + stDebug("s-task:%s invoke with high frequency, idle and retry exec in 50ms", id); + setTaskSchedInfo(pTask, 50); + return 0; } /*int32_t code = */ streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize); @@ -567,16 +611,15 @@ int32_t doStreamExecTask(SStreamTask* pTask) { return 0; } - int32_t type = pInput->type; - // dispatch checkpoint msg to all downstream tasks + int32_t type = pInput->type; if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) { streamProcessCheckpointBlock(pTask, (SStreamDataBlock*)pInput); continue; } - if (pInput->type == STREAM_INPUT__TRANS_STATE) { - streamProcessTranstateBlock(pTask, (SStreamDataBlock*)pInput); + if (type == STREAM_INPUT__TRANS_STATE) { + streamProcessTransstateBlock(pTask, (SStreamDataBlock*)pInput); continue; } @@ -616,7 +659,7 @@ int32_t doStreamExecTask(SStreamTask* pTask) { if (ver != pInfo->processedVer) { stDebug("s-task:%s update processedVer(unsaved) from %" PRId64 " to %" PRId64 " nextProcessVer:%" PRId64 " ckpt:%" PRId64, - pTask->id.idStr, pInfo->processedVer, ver, pInfo->nextProcessVer, pInfo->checkpointVer); + id, pInfo->processedVer, ver, pInfo->nextProcessVer, pInfo->checkpointVer); pInfo->processedVer = ver; } @@ -625,10 +668,28 @@ int32_t doStreamExecTask(SStreamTask* pTask) { // todo other thread may change the status // do nothing after sync executor state to storage backend, untill the vnode-level checkpoint is completed. if (type == STREAM_INPUT__CHECKPOINT) { - char* p = NULL; - streamTaskGetStatus(pTask, &p); - stDebug("s-task:%s checkpoint block received, set status:%s", pTask->id.idStr, p); - streamTaskBuildCheckpoint(pTask); + + // todo add lock + SStreamTaskState* pState = streamTaskGetStatus(pTask); + if (pState->state == TASK_STATUS__CK) { + stDebug("s-task:%s checkpoint block received, set status:%s", id, pState->name); + streamTaskBuildCheckpoint(pTask); + } else { + // todo refactor + int32_t code = 0; + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + code = streamTaskSendCheckpointSourceRsp(pTask); + } else { + code = streamTaskSendCheckpointReadyMsg(pTask); + } + + if (code != TSDB_CODE_SUCCESS) { + // todo: let's retry send rsp to upstream/mnode + stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%d, code:%s", id, 0, + tstrerror(code)); + } + } + return 0; } } @@ -639,15 +700,117 @@ int32_t doStreamExecTask(SStreamTask* pTask) { // the task may be set dropping/stopping, while it is still in the task queue, therefore, the sched-status can not // be updated by tryExec function, therefore, the schedStatus will always be the TASK_SCHED_STATUS__WAITING. bool streamTaskIsIdle(const SStreamTask* pTask) { - ETaskStatus status = streamTaskGetStatus(pTask, NULL); + ETaskStatus status = streamTaskGetStatus(pTask)->state; return (pTask->status.schedStatus == TASK_SCHED_STATUS__INACTIVE || status == TASK_STATUS__STOP || status == TASK_STATUS__DROPPING); } bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus) { - ETaskStatus st = streamTaskGetStatus(pTask, NULL); - return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__STREAM_SCAN_HISTORY || - st == TASK_STATUS__CK); + SStreamTaskState* pState = streamTaskGetStatus(pTask); + + ETaskStatus st = pState->state; + if (pStatus != NULL) { + *pStatus = pState->name; + } + + return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK); +} + +static void doStreamExecTaskHelper(void* param, void* tmrId) { + SStreamTask* pTask = (SStreamTask*)param; + + SStreamTaskState* p = streamTaskGetStatus(pTask); + if (p->state == TASK_STATUS__DROPPING || p->state == TASK_STATUS__STOP) { + streamTaskSetSchedStatusInactive(pTask); + + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:%s status:%s not resume task, ref:%d", pTask->id.idStr, p->name, ref); + + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + + // task resume running + SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); + if (pRunReq == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + /*int8_t status = */streamTaskSetSchedStatusInactive(pTask); + + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stError("failed to create msg to resume s-task:%s, reason out of memory, ref:%d", pTask->id.idStr, ref); + + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + + pRunReq->head.vgId = pTask->info.nodeId; + pRunReq->streamId = pTask->id.streamId; + pRunReq->taskId = pTask->id.taskId; + pRunReq->reqType = STREAM_EXEC_T_RESUME_TASK; + + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stDebug("trigger to resume s-task:%s after being idled for %dms, ref:%d", pTask->id.idStr, pTask->status.schedIdleTime, ref); + + SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; + tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg); + + // release the task ref count + clearTaskSchedInfo(pTask); + streamMetaReleaseTask(pTask->pMeta, pTask); +} + +static int32_t schedTaskInFuture(SStreamTask* pTask) { + int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:%s task should idle, add into timer to retry in %dms, ref:%d", pTask->id.idStr, + pTask->status.schedIdleTime, ref); + + // add one ref count for task + // todo this may be failed, and add ref may be failed. + SStreamTask* pAddRefTask = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId); + + if (pTask->schedInfo.pIdleTimer == NULL) { + pTask->schedInfo.pIdleTimer = taosTmrStart(doStreamExecTaskHelper, pTask->status.schedIdleTime, pTask, streamTimer); + } else { + taosTmrReset(doStreamExecTaskHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t streamResumeTask(SStreamTask* pTask) { + ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__ACTIVE); + const char* id = pTask->id.idStr; + + while (1) { + /*int32_t code = */doStreamExecTask(pTask); + taosThreadMutexLock(&pTask->lock); + + // check if this task needs to be idle for a while + if (pTask->status.schedIdleTime > 0) { + schedTaskInFuture(pTask); + + taosThreadMutexUnlock(&pTask->lock); + setLastExecTs(pTask, taosGetTimestampMs()); + return 0; + } else { + int32_t numOfItems = streamQueueGetNumOfItems(pTask->inputq.queue); + + if ((numOfItems == 0) || streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) { + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + taosThreadMutexUnlock(&pTask->lock); + + setLastExecTs(pTask, taosGetTimestampMs()); + + char* p = streamTaskGetStatus(pTask)->name; + stDebug("s-task:%s exec completed, status:%s, sched-status:%d, lastExecTs:%" PRId64, id, p, + pTask->status.schedStatus, pTask->status.lastExecTs); + + return 0; + } + } + + taosThreadMutexUnlock(&pTask->lock); + } } int32_t streamExecTask(SStreamTask* pTask) { @@ -656,29 +819,9 @@ int32_t streamExecTask(SStreamTask* pTask) { int8_t schedStatus = streamTaskSetSchedStatusActive(pTask); if (schedStatus == TASK_SCHED_STATUS__WAITING) { - while (1) { - int32_t code = doStreamExecTask(pTask); - if (code < 0) { // todo this status should be removed - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__FAILED); - return -1; - } - - taosThreadMutexLock(&pTask->lock); - if ((streamQueueGetNumOfItems(pTask->inputq.queue) == 0) || streamTaskShouldStop(pTask) || - streamTaskShouldPause(pTask)) { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - taosThreadMutexUnlock(&pTask->lock); - - char* p = NULL; - streamTaskGetStatus(pTask, &p); - stDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, p, pTask->status.schedStatus); - return 0; - } - taosThreadMutexUnlock(&pTask->lock); - } + streamResumeTask(pTask); } else { - char* p = NULL; - streamTaskGetStatus(pTask, &p); + char* p = streamTaskGetStatus(pTask)->name; stDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id, p, pTask->status.schedStatus); } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 254b6de5b8..ee2b70eebe 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -61,9 +61,10 @@ static void streamMetaEnvInit() { streamMetaId = taosOpenRef(64, streamMetaCloseImpl); metaRefMgtInit(); + streamTimerInit(); } -void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); } +void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit);} void streamMetaCleanup() { taosCloseRef(streamBackendId); @@ -71,6 +72,7 @@ void streamMetaCleanup() { taosCloseRef(streamMetaId); metaRefMgtCleanup(); + streamTimerCleanUp(); } void metaRefMgtInit() { @@ -201,7 +203,7 @@ int32_t streamMetaCvtDbFormat(SStreamMeta* pMeta) { void* key = taosHashGetKey(pIter, NULL); code = streamStateCvtDataFormat(pMeta->path, key, *(void**)pIter); if (code != 0) { - qError("failed to cvt data"); + stError("failed to cvt data"); goto _EXIT; } @@ -225,11 +227,11 @@ int32_t streamMetaMayCvtDbFormat(SStreamMeta* pMeta) { if (compatible == STREAM_STATA_COMPATIBLE) { return 0; } else if (compatible == STREAM_STATA_NEED_CONVERT) { - qInfo("stream state need covert backend format"); + stInfo("stream state need covert backend format"); return streamMetaCvtDbFormat(pMeta); } else if (compatible == STREAM_STATA_NO_COMPATIBLE) { - qError( + stError( "stream read incompatible data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild stream " "manually", tsDataDir); @@ -262,9 +264,18 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, void* arg, char* key) { } STaskDbWrapper* pBackend = taskDbOpen(pMeta->path, key, chkpId); - if (pBackend == NULL) { - taosThreadMutexUnlock(&pMeta->backendMutex); - return -1; + while (1) { + if (pBackend == NULL) { + taosThreadMutexUnlock(&pMeta->backendMutex); + taosMsleep(1000); + stDebug("backend held by other task, restart later, path:%s, key:%s", pMeta->path, key); + } else { + taosThreadMutexUnlock(&pMeta->backendMutex); + break; + } + + taosThreadMutexLock(&pMeta->backendMutex); + pBackend = taskDbOpen(pMeta->path, key, chkpId); } int64_t tref = taosAddRef(taskDbWrapperId, pBackend); @@ -289,7 +300,9 @@ void streamMetaRemoveDB(void* arg, char* key) { taosThreadMutexUnlock(&pMeta->backendMutex); } -SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage) { + +SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage, + startComplete_fn_t fn) { int32_t code = -1; SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta)); if (pMeta == NULL) { @@ -348,27 +361,16 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF goto _err; } - pMeta->walScanCounter = 0; + pMeta->scanInfo.scanCounter = 0; pMeta->vgId = vgId; pMeta->ahandle = ahandle; pMeta->expandFunc = expandFunc; pMeta->stage = stage; pMeta->role = (vgId == SNODE_HANDLE) ? NODE_ROLE_LEADER : NODE_ROLE_UNINIT; + pMeta->startInfo.completeFn = fn; pMeta->pTaskDbUnique = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); - // pMeta->chkpId = streamGetLatestCheckpointId(pMeta); - // pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId); - // while (pMeta->streamBackend == NULL) { - // qError("vgId:%d failed to init stream backend", pMeta->vgId); - // taosMsleep(2 * 1000); - // qInfo("vgId:%d retry to init stream backend", pMeta->vgId); - // pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId); - // if (pMeta->streamBackend == NULL) { - // } - // } - // pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend); - pMeta->numOfPausedTasks = 0; pMeta->numOfStreamTasks = 0; stInfo("vgId:%d open stream meta successfully, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, @@ -376,6 +378,17 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->rid = taosAddRef(streamMetaId, pMeta); + // set the attribute when running on Linux OS + TdThreadRwlockAttr attr; + taosThreadRwlockAttrInit(&attr); + +#ifdef LINUX + pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); +#endif + + taosThreadRwlockInit(&pMeta->lock, &attr); + taosThreadRwlockAttrDestroy(&attr); + int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid)); metaRefMgtAdd(pMeta->vgId, pRid); @@ -386,6 +399,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->qHandle = taosInitScheduler(32, 1, "stream-chkp", NULL); pMeta->bkdChkptMgt = bkdMgtCreate(tpath); + taosThreadMutexInit(&pMeta->backendMutex, NULL); return pMeta; @@ -407,6 +421,7 @@ _err: } // todo refactor: the lock shoud be restricted in one function +#ifdef BUILD_NO_CALL void streamMetaInitBackend(SStreamMeta* pMeta) { pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId, pMeta->vgId); if (pMeta->streamBackend == NULL) { @@ -428,6 +443,7 @@ void streamMetaInitBackend(SStreamMeta* pMeta) { pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend); streamBackendLoadCheckpointInfo(pMeta); } +#endif void streamMetaClear(SStreamMeta* pMeta) { // remove all existed tasks in this vnode @@ -438,7 +454,7 @@ void streamMetaClear(SStreamMeta* pMeta) { // release the ref by timer if (p->info.triggerParam != 0 && p->info.fillHistory == 0) { // one more ref in timer stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt); - taosTmrStop(p->schedInfo.pTimer); + taosTmrStop(p->schedInfo.pDelayTimer); p->info.triggerParam = 0; streamMetaReleaseTask(pMeta, p); } @@ -449,11 +465,11 @@ void streamMetaClear(SStreamMeta* pMeta) { taosRemoveRef(streamBackendId, pMeta->streamBackendRid); taosHashClear(pMeta->pTasksMap); - taosHashClear(pMeta->pTaskDbUnique); taosArrayClear(pMeta->pTaskList); taosArrayClear(pMeta->chkpSaved); taosArrayClear(pMeta->chkpInUse); + pMeta->numOfStreamTasks = 0; pMeta->numOfPausedTasks = 0; @@ -608,22 +624,23 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta) { return (int32_t)size; } -SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { - streamMetaRLock(pMeta); - +SStreamTask* streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { STaskId id = {.streamId = streamId, .taskId = taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask != NULL) { - if (!streamTaskShouldStop(*ppTask)) { - int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1); - streamMetaRUnLock(pMeta); - stTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref); - return *ppTask; - } + if (ppTask == NULL || streamTaskShouldStop(*ppTask)) { + return NULL; } + int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1); + stTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref); + return *ppTask; +} + +SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { + streamMetaRLock(pMeta); + SStreamTask* p = streamMetaAcquireTaskNoLock(pMeta, streamId, taskId); streamMetaRUnLock(pMeta); - return NULL; + return p; } void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { @@ -716,7 +733,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) { stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); - taosTmrStop(pTask->schedInfo.pTimer); + taosTmrStop(pTask->schedInfo.pDelayTimer); pTask->info.triggerParam = 0; streamMetaReleaseTask(pMeta, pTask); } @@ -941,6 +958,7 @@ int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq) { if (tEncodeI64(pEncoder, ps->verEnd) < 0) return -1; if (tEncodeI64(pEncoder, ps->activeCheckpointId) < 0) return -1; if (tEncodeI8(pEncoder, ps->checkpointFailed) < 0) return -1; + if (tEncodeI32(pEncoder, ps->chkpointTransId) < 0) return -1; } int32_t numOfVgs = taosArrayGetSize(pReq->pUpdateNodes); @@ -979,6 +997,7 @@ int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq) { if (tDecodeI64(pDecoder, &entry.verEnd) < 0) return -1; if (tDecodeI64(pDecoder, &entry.activeCheckpointId) < 0) return -1; if (tDecodeI8(pDecoder, (int8_t*)&entry.checkpointFailed) < 0) return -1; + if (tDecodeI32(pDecoder, &entry.chkpointTransId) < 0) return -1; entry.id.taskId = taskId; taosArrayPush(pReq->pTaskStatus, &entry); @@ -1007,10 +1026,18 @@ static bool waitForEnoughDuration(SMetaHbInfo* pInfo) { return false; } -static void clearHbMsg(SStreamHbMsg* pMsg, SArray* pIdList) { - taosArrayDestroy(pMsg->pTaskStatus); - taosArrayDestroy(pMsg->pUpdateNodes); - taosArrayDestroy(pIdList); +void streamMetaClearHbMsg(SStreamHbMsg* pMsg) { + if (pMsg == NULL) { + return; + } + + if (pMsg->pUpdateNodes != NULL) { + taosArrayDestroy(pMsg->pUpdateNodes); + } + + if (pMsg->pTaskStatus != NULL) { + taosArrayDestroy(pMsg->pTaskStatus); + } } static bool existInHbMsg(SStreamHbMsg* pMsg, SDownstreamTaskEpset* pTaskEpset) { @@ -1044,68 +1071,20 @@ static void addUpdateNodeIntoHbMsg(SStreamTask* pTask, SStreamHbMsg* pMsg) { taosThreadMutexUnlock(&pTask->lock); } -void metaHbToMnode(void* param, void* tmrId) { - int64_t rid = *(int64_t*)param; - - SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid); - if (pMeta == NULL) { - return; - } - - // need to stop, stop now - if (pMeta->pHbInfo->stopFlag == STREAM_META_WILL_STOP) { - pMeta->pHbInfo->stopFlag = STREAM_META_OK_TO_STOP; - stDebug("vgId:%d jump out of meta timer", pMeta->vgId); - taosReleaseRef(streamMetaId, rid); - return; - } - - // not leader not send msg - if (pMeta->role != NODE_ROLE_LEADER) { - stInfo("vgId:%d role:%d not leader not send hb to mnode", pMeta->vgId, pMeta->role); - taosReleaseRef(streamMetaId, rid); - pMeta->pHbInfo->hbStart = 0; - return; - } - - // set the hb start time - if (pMeta->pHbInfo->hbStart == 0) { - pMeta->pHbInfo->hbStart = taosGetTimestampMs(); - } - - if (!waitForEnoughDuration(pMeta->pHbInfo)) { - taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr); - taosReleaseRef(streamMetaId, rid); - return; - } - - stDebug("vgId:%d build stream task hb, leader:%d", pMeta->vgId, (pMeta->role == NODE_ROLE_LEADER)); - +static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) { SStreamHbMsg hbMsg = {0}; SEpSet epset = {0}; bool hasMnodeEpset = false; - int64_t stage = 0; + int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); - streamMetaRLock(pMeta); - - int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); hbMsg.vgId = pMeta->vgId; - stage = pMeta->stage; - - SArray* pIdList = taosArrayDup(pMeta->pTaskList, NULL); - - streamMetaRUnLock(pMeta); - hbMsg.pTaskStatus = taosArrayInit(numOfTasks, sizeof(STaskStatusEntry)); hbMsg.pUpdateNodes = taosArrayInit(numOfTasks, sizeof(int32_t)); for (int32_t i = 0; i < numOfTasks; ++i) { - STaskId* pId = taosArrayGet(pIdList, i); + STaskId* pId = taosArrayGet(pMeta->pTaskList, i); - streamMetaRLock(pMeta); SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId)); - streamMetaRUnLock(pMeta); - if (pTask == NULL) { continue; } @@ -1117,13 +1096,13 @@ void metaHbToMnode(void* param, void* tmrId) { STaskStatusEntry entry = { .id = *pId, - .status = streamTaskGetStatus(*pTask, NULL), + .status = streamTaskGetStatus(*pTask)->state, .nodeId = hbMsg.vgId, - .stage = stage, + .stage = pMeta->stage, .inputQUsed = SIZE_IN_MiB(streamQueueGetItemSize((*pTask)->inputq.queue)), }; - entry.inputRate = entry.inputQUsed * 100.0 / STREAM_TASK_QUEUE_CAPACITY_IN_SIZE; + entry.inputRate = entry.inputQUsed * 100.0 / (2*STREAM_TASK_QUEUE_CAPACITY_IN_SIZE); if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) { entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate; entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize); @@ -1132,6 +1111,11 @@ void metaHbToMnode(void* param, void* tmrId) { if ((*pTask)->chkInfo.checkpointingId != 0) { entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId); entry.activeCheckpointId = (*pTask)->chkInfo.checkpointingId; + entry.chkpointTransId = (*pTask)->chkInfo.transId; + + if (entry.checkpointFailed) { + stInfo("s-task:%s send kill checkpoint trans info, transId:%d", (*pTask)->id.idStr, (*pTask)->chkInfo.transId); + } } if ((*pTask)->exec.pWalReader != NULL) { @@ -1174,29 +1158,70 @@ void metaHbToMnode(void* param, void* tmrId) { } tEncoderClear(&encoder); - SRpcMsg msg = { - .info.noResp = 1, - }; + SRpcMsg msg = {.info.noResp = 1}; initRpcMsg(&msg, TDMT_MND_STREAM_HEARTBEAT, buf, tlen); pMeta->pHbInfo->hbCount += 1; - stDebug("vgId:%d build and send hb to mnode, numOfTasks:%d total:%d", pMeta->vgId, hbMsg.numOfTasks, pMeta->pHbInfo->hbCount); + tmsgSendReq(&epset, &msg); } else { stDebug("vgId:%d no tasks and no mnd epset, not send stream hb to mnode", pMeta->vgId); } -_end: - clearHbMsg(&hbMsg, pIdList); + _end: + streamMetaClearHbMsg(&hbMsg); + return TSDB_CODE_SUCCESS; +} + +void metaHbToMnode(void* param, void* tmrId) { + int64_t rid = *(int64_t*)param; + + SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid); + if (pMeta == NULL) { + return; + } + + // need to stop, stop now + if (pMeta->pHbInfo->stopFlag == STREAM_META_WILL_STOP) { + pMeta->pHbInfo->stopFlag = STREAM_META_OK_TO_STOP; + stDebug("vgId:%d jump out of meta timer", pMeta->vgId); + taosReleaseRef(streamMetaId, rid); + return; + } + + // not leader not send msg + if (pMeta->role != NODE_ROLE_LEADER) { + stInfo("vgId:%d role:%d not leader not send hb to mnode", pMeta->vgId, pMeta->role); + taosReleaseRef(streamMetaId, rid); + pMeta->pHbInfo->hbStart = 0; + return; + } + + // set the hb start time + if (pMeta->pHbInfo->hbStart == 0) { + pMeta->pHbInfo->hbStart = taosGetTimestampMs(); + } + + if (!waitForEnoughDuration(pMeta->pHbInfo)) { + taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr); + taosReleaseRef(streamMetaId, rid); + return; + } + + stDebug("vgId:%d build stream task hb, leader:%d", pMeta->vgId, (pMeta->role == NODE_ROLE_LEADER)); + streamMetaRLock(pMeta); + metaHeartbeatToMnodeImpl(pMeta); + streamMetaRUnLock(pMeta); + taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr); taosReleaseRef(streamMetaId, rid); } bool streamMetaTaskInTimer(SStreamMeta* pMeta) { bool inTimer = false; - streamMetaWLock(pMeta); + streamMetaRLock(pMeta); void* pIter = NULL; while (1) { @@ -1207,11 +1232,12 @@ bool streamMetaTaskInTimer(SStreamMeta* pMeta) { SStreamTask* pTask = *(SStreamTask**)pIter; if (pTask->status.timerActive >= 1) { + stDebug("s-task:%s in timer, blocking tasks in vgId:%d restart", pTask->id.idStr, pMeta->vgId); inTimer = true; } } - streamMetaWUnLock(pMeta); + streamMetaRUnLock(pMeta); return inTimer; } @@ -1239,11 +1265,11 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) { // wait for the stream meta hb function stopping if (pMeta->role == NODE_ROLE_LEADER) { - pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP; - while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) { - taosMsleep(100); - stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId); - } +// pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP; +// while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) { +// taosMsleep(100); +// stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId); +// } } stDebug("vgId:%d start to check all tasks", vgId); @@ -1270,26 +1296,37 @@ void streamMetaResetStartInfo(STaskStartInfo* pStartInfo) { taosHashClear(pStartInfo->pFailedTaskSet); pStartInfo->tasksWillRestart = 0; pStartInfo->readyTs = 0; + // reset the sentinel flag value to be 0 - atomic_store_32(&pStartInfo->taskStarting, 0); + pStartInfo->taskStarting = 0; } void streamMetaRLock(SStreamMeta* pMeta) { stTrace("vgId:%d meta-rlock", pMeta->vgId); - taosRLockLatch(&pMeta->lock); + taosThreadRwlockRdlock(&pMeta->lock); } + void streamMetaRUnLock(SStreamMeta* pMeta) { stTrace("vgId:%d meta-runlock", pMeta->vgId); - taosRUnLockLatch(&pMeta->lock); + int32_t code = taosThreadRwlockUnlock(&pMeta->lock); + if (code != TSDB_CODE_SUCCESS) { + stError("vgId:%d meta-runlock failed, code:%d", pMeta->vgId, code); + } else { + stDebug("vgId:%d meta-runlock completed", pMeta->vgId); + } } + void streamMetaWLock(SStreamMeta* pMeta) { stTrace("vgId:%d meta-wlock", pMeta->vgId); - taosWLockLatch(&pMeta->lock); + taosThreadRwlockWrlock(&pMeta->lock); + stTrace("vgId:%d meta-wlock completed", pMeta->vgId); } + void streamMetaWUnLock(SStreamMeta* pMeta) { stTrace("vgId:%d meta-wunlock", pMeta->vgId); - taosWUnLockLatch(&pMeta->lock); + taosThreadRwlockUnlock(&pMeta->lock); } + static void execHelper(struct SSchedMsg* pSchedMsg) { __async_exec_fn_t execFn = (__async_exec_fn_t)pSchedMsg->ahandle; int32_t code = execFn(pSchedMsg->thandle); @@ -1306,3 +1343,208 @@ int32_t streamMetaAsyncExec(SStreamMeta* pMeta, __stream_async_exec_fn_t fn, voi schedMsg.msg = code; return taosScheduleTask(pMeta->qHandle, &schedMsg); } + +SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta) { + SArray* pTaskList = taosArrayDup(pMeta->pTaskList, NULL); + + bool sendMsg = pMeta->sendMsgBeforeClosing; + if (!sendMsg) { + stDebug("vgId:%d no need to send msg to mnode before closing tasks", pMeta->vgId); + return pTaskList; + } + + stDebug("vgId:%d send msg to mnode before closing all tasks", pMeta->vgId); + + // send hb msg to mnode before closing all tasks. + int32_t numOfTasks = taosArrayGetSize(pTaskList); + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId); + if (pTask == NULL) { + continue; + } + + taosThreadMutexLock(&pTask->lock); + + SStreamTaskState* pState = streamTaskGetStatus(pTask); + if (pState->state == TASK_STATUS__CK) { + streamTaskSetCheckpointFailedId(pTask); + stDebug("s-task:%s mark the checkpoint:%"PRId64" failed", pTask->id.idStr, pTask->chkInfo.checkpointingId); + } else { + stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, pState->name); + } + + taosThreadMutexUnlock(&pTask->lock); + streamMetaReleaseTask(pMeta, pTask); + } + + metaHeartbeatToMnodeImpl(pMeta); + pMeta->sendMsgBeforeClosing = false; + return pTaskList; +} + +void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader) { + streamMetaWLock(pMeta); + + int64_t prevStage = pMeta->stage; + pMeta->stage = stage; + + // mark the sign to send msg before close all tasks + if ((!isLeader) && (pMeta->role == NODE_ROLE_LEADER)) { + pMeta->sendMsgBeforeClosing = true; + } + + pMeta->role = (isLeader)? NODE_ROLE_LEADER:NODE_ROLE_FOLLOWER; + streamMetaWUnLock(pMeta); + + if (isLeader) { + stInfo("vgId:%d update meta stage:%" PRId64 ", prev:%" PRId64 " leader:%d, start to send Hb", pMeta->vgId, + prevStage, stage, isLeader); + streamMetaStartHb(pMeta); + } else { + stInfo("vgId:%d update meta stage:%" PRId64 " prev:%" PRId64 " leader:%d sendMsg beforeClosing:%d", pMeta->vgId, + prevStage, stage, isLeader, pMeta->sendMsgBeforeClosing); + } +} + +int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { + streamMetaRLock(pMeta); + + int32_t num = taosArrayGetSize(pMeta->pTaskList); + stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); + if (num == 0) { + streamMetaRUnLock(pMeta); + return TSDB_CODE_SUCCESS; + } + + // send hb msg to mnode before closing all tasks. + SArray* pTaskList = streamMetaSendMsgBeforeCloseTasks(pMeta); + int32_t numOfTasks = taosArrayGetSize(pTaskList); + + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId); + if (pTask == NULL) { + continue; + } + + streamTaskStop(pTask); + streamMetaReleaseTask(pMeta, pTask); + } + + taosArrayDestroy(pTaskList); + + streamMetaRUnLock(pMeta); + return 0; +} + +int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t vgId = pMeta->vgId; + + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + stInfo("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks); + if (numOfTasks == 0) { + stInfo("vgId:%d start tasks completed", pMeta->vgId); + return TSDB_CODE_SUCCESS; + } + + SArray* pTaskList = NULL; + streamMetaWLock(pMeta); + pTaskList = taosArrayDup(pMeta->pTaskList, NULL); + taosHashClear(pMeta->startInfo.pReadyTaskSet); + taosHashClear(pMeta->startInfo.pFailedTaskSet); + pMeta->startInfo.startTs = taosGetTimestampMs(); + streamMetaWUnLock(pMeta); + + // broadcast the check downstream tasks msg + int64_t now = taosGetTimestampMs(); + + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId); + if (pTask == NULL) { + stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); + streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, 0, now, false); + continue; + } + + // todo: may be we should find the related fill-history task and set it failed. + + // fill-history task can only be launched by related stream tasks. + STaskExecStatisInfo* pInfo = &pTask->execInfo; + if (pTask->info.fillHistory == 1) { + stDebug("s-task:%s fill-history task wait related stream task start", pTask->id.idStr); + streamMetaReleaseTask(pMeta, pTask); + continue; + } + + if (pTask->status.downstreamReady == 1) { + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + stDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task", + pTask->id.idStr); + streamLaunchFillHistoryTask(pTask); + } + + streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->init, pInfo->start, true); + streamMetaReleaseTask(pMeta, pTask); + continue; + } + + int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); + if (ret != TSDB_CODE_SUCCESS) { + stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT); + code = ret; + + streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->init, pInfo->start, false); + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + STaskId* pId = &pTask->hTaskInfo.id; + streamMetaAddTaskLaunchResult(pMeta, pId->streamId, pId->taskId, pInfo->init, pInfo->start, false); + } + } + + streamMetaReleaseTask(pMeta, pTask); + } + + stInfo("vgId:%d start tasks completed", pMeta->vgId); + taosArrayDestroy(pTaskList); + return code; +} + +int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { + int32_t vgId = pMeta->vgId; + stInfo("vgId:%d start to task:0x%x by checking downstream status", vgId, taskId); + + SStreamTask* pTask = streamMetaAcquireTask(pMeta, streamId, taskId); + if (pTask == NULL) { + stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, taskId); + streamMetaAddTaskLaunchResult(pMeta, streamId, taskId, 0, taosGetTimestampMs(), false); + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } + + // todo: may be we should find the related fill-history task and set it failed. + + // fill-history task can only be launched by related stream tasks. + STaskExecStatisInfo* pInfo = &pTask->execInfo; + if (pTask->info.fillHistory == 1) { + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_SUCCESS; + } + + ASSERT(pTask->status.downstreamReady == 0); + + int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); + if (ret != TSDB_CODE_SUCCESS) { + stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT); + + streamMetaAddTaskLaunchResult(pMeta, streamId, taskId, pInfo->init, pInfo->start, false); + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + STaskId* pId = &pTask->hTaskInfo.id; + streamMetaAddTaskLaunchResult(pMeta, pId->streamId, pId->taskId, pInfo->init, pInfo->start, false); + } + } + + streamMetaReleaseTask(pMeta, pTask); + return ret; +} \ No newline at end of file diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index d1610362f9..90823cd937 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -17,8 +17,7 @@ #define MAX_STREAM_EXEC_BATCH_NUM 32 #define MAX_SMOOTH_BURST_RATIO 5 // 5 sec -#define WAIT_FOR_DURATION 40 -#define OUTPUT_QUEUE_FULL_WAIT_DURATION 500 // 500 ms +#define WAIT_FOR_DURATION 10 // todo refactor: // read data from input queue @@ -148,8 +147,6 @@ const char* streamQueueItemGetTypeStr(int32_t type) { int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, int32_t* blockSize) { - int32_t retryTimes = 0; - int32_t MAX_RETRY_TIMES = 5; const char* id = pTask->id.idStr; int32_t taskLevel = pTask->info.taskLevel; @@ -157,10 +154,10 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu *numOfBlocks = 0; *blockSize = 0; + // todo remove it // no available token in bucket for sink task, let's wait for a little bit if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->outputInfo.pTokenBucket, pTask->id.idStr))) { stDebug("s-task:%s no available token in bucket for sink data, wait for 10ms", id); - taosMsleep(10); return TSDB_CODE_SUCCESS; } @@ -172,10 +169,6 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputq.queue); if (qItem == NULL) { - if ((taskLevel == TASK_LEVEL__SOURCE || taskLevel == TASK_LEVEL__SINK) && (++retryTimes) < MAX_RETRY_TIMES) { - taosMsleep(WAIT_FOR_DURATION); - continue; - } // restore the token to bucket if (*numOfBlocks > 0) { @@ -342,23 +335,6 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) // the result should be put into the outputQ in any cases, the result may be lost otherwise. int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock) { STaosQueue* pQueue = pTask->outputq.queue->pQueue; - - // wait for the output queue is available for new data to dispatch - while (streamQueueIsFull(pTask->outputq.queue)) { - if (streamTaskShouldStop(pTask)) { - stInfo("s-task:%s discard result block due to task stop", pTask->id.idStr); - return TSDB_CODE_STREAM_EXEC_CANCELLED; - } - - int32_t total = streamQueueGetNumOfItems(pTask->outputq.queue); - double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue)); - // let's wait for there are enough space to hold this result pBlock - stDebug("s-task:%s outputQ is full, wait for %dms and retry, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, - OUTPUT_QUEUE_FULL_WAIT_DURATION, total, size); - - taosMsleep(OUTPUT_QUEUE_FULL_WAIT_DURATION); - } - int32_t code = taosWriteQitem(pQueue, pBlock); int32_t total = streamQueueGetNumOfItems(pTask->outputq.queue); @@ -367,7 +343,14 @@ int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBloc stError("s-task:%s failed to put res into outputQ, outputQ items:%d, size:%.2fMiB code:%s, result lost", pTask->id.idStr, total + 1, size, tstrerror(code)); } else { - stDebug("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size); + if (streamQueueIsFull(pTask->outputq.queue)) { + stWarn( + "s-task:%s outputQ is full(outputQ items:%d, size:%.2fMiB), set the output status BLOCKING, wait for 500ms " + "after handle this batch of blocks", + pTask->id.idStr, total, size); + } else { + stDebug("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size); + } } return TSDB_CODE_SUCCESS; diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index 45d656c456..2cb77c60dc 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -17,17 +17,14 @@ #include "query.h" #include "streamBackendRocksdb.h" -#include "taos.h" #include "tcommon.h" -#include "thash.h" #include "tsimplehash.h" - -typedef int (*__session_compare_fn_t) (const SSessionKey* pWin, const void* pDatas, int pos); +typedef int (*__session_compare_fn_t)(const SSessionKey* pWin, const void* pDatas, int pos); int sessionStateKeyCompare(const SSessionKey* pWin1, const void* pDatas, int pos) { SRowBuffPos* pPos2 = taosArrayGetP(pDatas, pos); - SSessionKey* pWin2 = (SSessionKey*) pPos2->pKey; + SSessionKey* pWin2 = (SSessionKey*)pPos2->pKey; return sessionWinKeyCmpr(pWin1, pWin2); } @@ -61,12 +58,12 @@ int32_t binarySearch(void* keyList, int num, const void* key, __session_compare_ int64_t getSessionWindowEndkey(void* data, int32_t index) { SArray* pWinInfos = (SArray*)data; SRowBuffPos** ppos = taosArrayGet(pWinInfos, index); - SSessionKey* pWin = (SSessionKey*)((*ppos)->pKey); + SSessionKey* pWin = (SSessionKey*)((*ppos)->pKey); return pWin->win.ekey; } bool inSessionWindow(SSessionKey* pKey, TSKEY ts, int64_t gap) { - if (ts + gap >= pKey->win.skey && ts - gap <= pKey->win.ekey) { + if (ts + gap >= pKey->win.skey && ts - gap <= pKey->win.ekey) { return true; } return false; @@ -80,7 +77,8 @@ static SRowBuffPos* addNewSessionWindow(SStreamFileState* pFileState, SArray* pW return pNewPos; } -static SRowBuffPos* insertNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, const SSessionKey* pKey, int32_t index) { +static SRowBuffPos* insertNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, const SSessionKey* pKey, + int32_t index) { SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); ASSERT(pNewPos->pRowBuff); memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey)); @@ -97,11 +95,12 @@ SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKe return pNewPos; } -int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen) { - int32_t code = TSDB_CODE_SUCCESS; +int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, + int32_t* pVLen) { + int32_t code = TSDB_CODE_SUCCESS; SSHashObj* pSessionBuff = getRowStateBuff(pFileState); - SArray* pWinStates = NULL; - void** ppBuff = tSimpleHashGet(pSessionBuff, &pKey->groupId, sizeof(uint64_t)); + SArray* pWinStates = NULL; + void** ppBuff = tSimpleHashGet(pSessionBuff, &pKey->groupId, sizeof(uint64_t)); if (ppBuff) { pWinStates = (SArray*)(*ppBuff); } else { @@ -117,7 +116,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* pFileStore = getStateFileStore(pFileState); void* p = NULL; int32_t code_file = streamStateSessionAddIfNotExist_rocksdb(pFileStore, pKey, gap, &p, pVLen); - if (code_file == TSDB_CODE_SUCCESS) { + if (code_file == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) { (*pVal) = createSessionWinBuff(pFileState, pKey, p, pVLen); code = code_file; qDebug("===stream===0 get session win:%" PRId64 ",%" PRId64 " from disc, res %d", startTs, endTs, code_file); @@ -146,7 +145,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, if (index + 1 < size) { pPos = taosArrayGetP(pWinStates, index + 1); - if (inSessionWindow(pPos->pKey, startTs, gap) || (endTs != INT64_MIN && inSessionWindow(pPos->pKey, endTs, gap)) ) { + if (inSessionWindow(pPos->pKey, startTs, gap) || (endTs != INT64_MIN && inSessionWindow(pPos->pKey, endTs, gap))) { (*pVal) = pPos; SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey; pPos->beUsed = true; @@ -157,9 +156,9 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, if (index + 1 == 0) { if (!isDeteled(pFileState, endTs) && isFlushedState(pFileState, endTs, gap)) { - void* p = NULL; - void* pFileStore = getStateFileStore(pFileState); - int32_t code_file = streamStateSessionAddIfNotExist_rocksdb(pFileStore, pKey, gap, &p, pVLen); + void* p = NULL; + void* pFileStore = getStateFileStore(pFileState); + int32_t code_file = streamStateSessionAddIfNotExist_rocksdb(pFileStore, pKey, gap, &p, pVLen); if (code_file == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) { (*pVal) = createSessionWinBuff(pFileState, pKey, p, pVLen); code = code_file; @@ -184,10 +183,10 @@ _end: } int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos) { - SSHashObj* pSessionBuff = getRowStateBuff(pFileState); + SSHashObj* pSessionBuff = getRowStateBuff(pFileState); SSessionKey* pKey = pPos->pKey; - SArray* pWinStates = NULL; - void** ppBuff = tSimpleHashGet(pSessionBuff, &pKey->groupId, sizeof(uint64_t)); + SArray* pWinStates = NULL; + void** ppBuff = tSimpleHashGet(pSessionBuff, &pKey->groupId, sizeof(uint64_t)); if (ppBuff) { pWinStates = (SArray*)(*ppBuff); } else { @@ -202,7 +201,7 @@ int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos) } // find the first position which is smaller than the pKey - int32_t index = binarySearch(pWinStates, size, pKey, sessionStateKeyCompare); + int32_t index = binarySearch(pWinStates, size, pKey, sessionStateKeyCompare); if (index >= 0) { taosArrayInsert(pWinStates, index, &pPos); } else { @@ -218,7 +217,7 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey)); pNewPos->needFree = true; - void* pBuff = NULL; + void* pBuff = NULL; int32_t code = streamStateSessionGet_rocksdb(getStateFileStore(pFileState), pKey, &pBuff, pVLen); if (code != TSDB_CODE_SUCCESS) { return code; @@ -229,16 +228,16 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v return TSDB_CODE_SUCCESS; } -int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen) { - SSHashObj* pSessionBuff = (SSHashObj*) pBuff; - SSessionKey* pWinKey = (SSessionKey*) key; - void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); +int32_t deleteSessionWinStateBuffFn(void* pBuff, const void* key, size_t keyLen) { + SSHashObj* pSessionBuff = (SSHashObj*)pBuff; + SSessionKey* pWinKey = (SSessionKey*)key; + void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); if (!ppBuff) { return TSDB_CODE_SUCCESS; } SArray* pWinStates = (SArray*)(*ppBuff); int32_t size = taosArrayGetSize(pWinStates); - TSKEY gap = 0; + TSKEY gap = 0; int32_t index = binarySearch(pWinStates, size, pWinKey, sessionStateKeyCompare); if (index >= 0) { SRowBuffPos* pPos = taosArrayGetP(pWinStates, index); @@ -251,15 +250,15 @@ int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen) } int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos) { - SSHashObj* pSessionBuff = getRowStateBuff(pFileState); - SSessionKey* pWinKey = (SSessionKey*) pPos->pKey; - void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); + SSHashObj* pSessionBuff = getRowStateBuff(pFileState); + SSessionKey* pWinKey = (SSessionKey*)pPos->pKey; + void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); if (!ppBuff) { return TSDB_CODE_SUCCESS; } SArray* pWinStates = (SArray*)(*ppBuff); int32_t size = taosArrayGetSize(pWinStates); - TSKEY gap = 0; + TSKEY gap = 0; int32_t index = binarySearch(pWinStates, size, pWinKey, sessionStateKeyCompare); if (index >= 0) { SRowBuffPos* pItemPos = taosArrayGetP(pWinStates, index); @@ -270,11 +269,12 @@ int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffP return TSDB_CODE_SUCCESS; } -int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStreamStateCur* pCur, const SSessionKey* pWinKey, void** ppVal, int32_t* pVLen) { +int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStreamStateCur* pCur, + const SSessionKey* pWinKey, void** ppVal, int32_t* pVLen) { SRowBuffPos* pNewPos = NULL; - SSHashObj* pSessionBuff = getRowStateBuff(pFileState); - void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); - SArray* pWinStates = NULL; + SSHashObj* pSessionBuff = getRowStateBuff(pFileState); + void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); + SArray* pWinStates = NULL; if (!ppBuff) { pWinStates = taosArrayInit(16, POINTER_BYTES); tSimpleHashPut(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t), &pWinStates, POINTER_BYTES); @@ -297,10 +297,10 @@ int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStream } else { if (size > 0) { SRowBuffPos* pPos = taosArrayGetP(pWinStates, 0); - if (sessionWinKeyCmpr(pWinKey, pPos->pKey) >=0) { + if (sessionWinKeyCmpr(pWinKey, pPos->pKey) >= 0) { // pCur is invalid SSessionKey pTmpKey = *pWinKey; - int32_t code = getSessionWinResultBuff(pFileState, &pTmpKey, 0, (void**)&pNewPos, pVLen); + int32_t code = getSessionWinResultBuff(pFileState, &pTmpKey, 0, (void**)&pNewPos, pVLen); ASSERT(code == TSDB_CODE_FAILED); goto _end; } @@ -320,7 +320,7 @@ void sessionWinStateClear(SStreamFileState* pFileState) { void* pIte = NULL; size_t keyLen = 0; int32_t iter = 0; - void* pBuff = getRowStateBuff(pFileState); + void* pBuff = getRowStateBuff(pFileState); while ((pIte = tSimpleHashIterate(pBuff, pIte, &iter)) != NULL) { SArray* pWinStates = *((void**)pIte); int32_t size = taosArrayGetSize(pWinStates); @@ -336,7 +336,7 @@ void sessionWinStateCleanup(void* pBuff) { size_t keyLen = 0; int32_t iter = 0; while ((pIte = tSimpleHashIterate(pBuff, pIte, &iter)) != NULL) { - SArray* pWinStates = (SArray*) (*(void**)pIte); + SArray* pWinStates = (SArray*)(*(void**)pIte); taosArrayDestroy(pWinStates); } tSimpleHashCleanup(pBuff); @@ -395,11 +395,13 @@ static void transformCursor(SStreamFileState* pFileState, SStreamStateCur* pCur) pCur->pStreamFileState = pFileState; } -static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t groupId, SArray* pWinStates, SStreamStateCur** ppCur) { +static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t groupId, SArray* pWinStates, + SStreamStateCur** ppCur) { SSessionKey key = {.groupId = groupId}; - int32_t code = streamStateSessionGetKVByCur_rocksdb(*ppCur, &key, NULL, NULL); - if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) { - if ( !(*ppCur) ) { + int32_t code = streamStateSessionGetKVByCur_rocksdb(*ppCur, &key, NULL, NULL); + if (taosArrayGetSize(pWinStates) > 0 && + (code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) { + if (!(*ppCur)) { (*ppCur) = createStreamStateCursor(); } transformCursor(pFileState, *ppCur); @@ -410,8 +412,8 @@ static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t } SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey) { - SArray* pWinStates = NULL; - int32_t index = -1; + SArray* pWinStates = NULL; + int32_t index = -1; SStreamStateCur* pCur = seekKeyCurrentPrev_buff(pFileState, pWinKey, &pWinStates, &index); if (pCur) { if (sessionStateKeyCompare(pWinKey, pWinStates, index) > 0) { @@ -427,8 +429,8 @@ SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, } SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey) { - SArray* pWinStates = NULL; - int32_t index = -1; + SArray* pWinStates = NULL; + int32_t index = -1; SStreamStateCur* pCur = seekKeyCurrentPrev_buff(pFileState, pWinKey, &pWinStates, &index); if (pCur) { sessionWinStateMoveToNext(pCur); @@ -449,13 +451,13 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void SSHashObj* pSessionBuff = getRowStateBuff(pCur->pStreamFileState); void** ppBuff = tSimpleHashGet(pSessionBuff, &pKey->groupId, sizeof(uint64_t)); - if (!ppBuff) { - return TSDB_CODE_FAILED; + SArray* pWinStates = NULL; + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); } - SArray* pWinStates = (SArray*)(*ppBuff); - int32_t size = taosArrayGetSize(pWinStates); if (pCur->buffIndex >= 0) { + int32_t size = taosArrayGetSize(pWinStates); if (pCur->buffIndex >= size) { return TSDB_CODE_FAILED; } @@ -467,7 +469,8 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void } else { void* pData = NULL; code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, &pData, pVLen); - if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateKeyCompare(pKey, pWinStates, 0) >= 0)) { + if (taosArrayGetSize(pWinStates) > 0 && + (code == TSDB_CODE_FAILED || sessionStateKeyCompare(pKey, pWinStates, 0) >= 0)) { transformCursor(pCur->pStreamFileState, pCur); SRowBuffPos* pPos = taosArrayGetP(pWinStates, pCur->buffIndex); if (pVal) { @@ -498,9 +501,9 @@ int32_t sessionWinStateMoveToNext(SStreamStateCur* pCur) { int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey) { SStreamStateCur* pCur = sessionWinStateSeekKeyCurrentPrev(pFileState, key); - SSessionKey tmpKey = *key; - int32_t code = sessionWinStateGetKVByCur(pCur, &tmpKey, NULL, NULL); - bool hasCurrentPrev = true; + SSessionKey tmpKey = *key; + int32_t code = sessionWinStateGetKVByCur(pCur, &tmpKey, NULL, NULL); + bool hasCurrentPrev = true; if (code == TSDB_CODE_FAILED) { streamStateFreeCur(pCur); pCur = sessionWinStateSeekKeyNext(pFileState, key); @@ -535,13 +538,13 @@ _end: } int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, - state_key_cmpr_fn fn, void** pVal, int32_t* pVLen) { + state_key_cmpr_fn fn, void** pVal, int32_t* pVLen) { SSessionKey* pWinKey = key; - TSKEY gap = 0; - int32_t code = TSDB_CODE_SUCCESS; - SSHashObj* pSessionBuff = getRowStateBuff(pFileState); - SArray* pWinStates = NULL; - void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); + TSKEY gap = 0; + int32_t code = TSDB_CODE_SUCCESS; + SSHashObj* pSessionBuff = getRowStateBuff(pFileState); + SArray* pWinStates = NULL; + void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t)); if (ppBuff) { pWinStates = (SArray*)(*ppBuff); } else { @@ -557,10 +560,11 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch void* pFileStore = getStateFileStore(pFileState); void* p = NULL; int32_t code_file = streamStateStateAddIfNotExist_rocksdb(pFileStore, pWinKey, pKeyData, keyDataLen, fn, &p, pVLen); - if (code_file == TSDB_CODE_SUCCESS) { + if (code_file == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) { (*pVal) = createSessionWinBuff(pFileState, pWinKey, p, pVLen); code = code_file; - qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, pWinKey->win.ekey, code_file); + qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, + pWinKey->win.ekey, code_file); } else { (*pVal) = addNewSessionWindow(pFileState, pWinStates, key); code = TSDB_CODE_FAILED; @@ -589,7 +593,8 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch if (index + 1 < size) { pPos = taosArrayGetP(pWinStates, index + 1); void* stateKey = (char*)(pPos->pRowBuff) + (valSize - keyDataLen); - if (inSessionWindow(pPos->pKey, startTs, gap) || (endTs != INT64_MIN && inSessionWindow(pPos->pKey, endTs, gap)) || fn(pKeyData, stateKey) == true) { + if (inSessionWindow(pPos->pKey, startTs, gap) || (endTs != INT64_MIN && inSessionWindow(pPos->pKey, endTs, gap)) || + fn(pKeyData, stateKey) == true) { (*pVal) = pPos; SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey; pPos->beUsed = true; @@ -607,7 +612,8 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch if (code_file == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) { (*pVal) = createSessionWinBuff(pFileState, pWinKey, p, pVLen); code = code_file; - qDebug("===stream===1 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, pWinKey->win.ekey, code_file); + qDebug("===stream===1 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, + pWinKey->win.ekey, code_file); goto _end; } else { taosMemoryFree(p); diff --git a/source/libs/stream/src/streamSnapshot.c b/source/libs/stream/src/streamSnapshot.c index e29f2ba7de..a69cb75dad 100644 --- a/source/libs/stream/src/streamSnapshot.c +++ b/source/libs/stream/src/streamSnapshot.c @@ -15,10 +15,8 @@ #include "streamSnapshot.h" #include "query.h" -#include "rocksdb/c.h" #include "streamBackendRocksdb.h" #include "streamInt.h" -#include "tcommon.h" enum SBackendFileType { ROCKSDB_OPTIONS_TYPE = 1, @@ -158,7 +156,7 @@ void snapFileDebugInfo(SBackendSnapFile2* pSnapFile) { } sprintf(buf + strlen(buf) - 1, "]"); - qInfo("%s %" PRId64 "-%" PRId64 " get file list: %s", STREAM_STATE_TRANSFER, pSnapFile->snapInfo.streamId, + stInfo("%s %" PRId64 "-%" PRId64 " get file list: %s", STREAM_STATE_TRANSFER, pSnapFile->snapInfo.streamId, pSnapFile->snapInfo.taskId, buf); taosMemoryFree(buf); } @@ -203,7 +201,7 @@ int32_t snapFileGenMeta(SBackendSnapFile2* pSnapFile) { int32_t snapFileReadMeta(SBackendSnapFile2* pSnapFile) { TdDirPtr pDir = taosOpenDir(pSnapFile->path); if (NULL == pDir) { - qError("%s failed to open %s", STREAM_STATE_TRANSFER, pSnapFile->path); + stError("%s failed to open %s", STREAM_STATE_TRANSFER, pSnapFile->path); return -1; } @@ -397,7 +395,7 @@ _NEXT: } item = taosArrayGet(pSnapFile->pFileList, pSnapFile->currFileIdx); - qDebug("%s start to read file %s, current offset:%" PRId64 ", size:%" PRId64 + stDebug("%s start to read file %s, current offset:%" PRId64 ", size:%" PRId64 ", file no.%d, total set:%d, current set idx: %d", STREAM_STATE_TRANSFER, item->name, (int64_t)pSnapFile->offset, item->size, pSnapFile->currFileIdx, (int)taosArrayGetSize(pHandle->pDbSnapSet), pHandle->currIdx); @@ -515,7 +513,7 @@ int32_t streamSnapWriteImpl(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t stError("%s failed to write snap, file name:%s, reason:%s", STREAM_STATE_TRANSFER, pHdr->name, tstrerror(code)); return code; } else { - qInfo("succ to write data %s", pItem->name); + stInfo("succ to write data %s", pItem->name); } pSnapFile->offset += bytes; } else { @@ -538,7 +536,7 @@ int32_t streamSnapWriteImpl(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t } taosPWriteFile(pSnapFile->fd, pHdr->data, pHdr->size, pSnapFile->offset); - qInfo("succ to write data %s", pItem->name); + stInfo("succ to write data %s", pItem->name); pSnapFile->offset += pHdr->size; } code = 0; @@ -563,7 +561,7 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa "checkpoint", snapInfo.chkpId); if (!taosIsDir(path)) { code = taosMulMkDir(path); - qInfo("%s mkdir %s", STREAM_STATE_TRANSFER, path); + stInfo("%s mkdir %s", STREAM_STATE_TRANSFER, path); ASSERT(code == 0); } diff --git a/source/libs/stream/src/streamStart.c b/source/libs/stream/src/streamStart.c index 309f377621..84f1dbb4d7 100644 --- a/source/libs/stream/src/streamStart.c +++ b/source/libs/stream/src/streamStart.c @@ -14,18 +14,19 @@ */ #include "streamInt.h" +#include "streamsm.h" #include "trpc.h" #include "ttimer.h" #include "wal.h" -#include "streamsm.h" -#define SCANHISTORY_IDLE_TIME_SLICE 100 // 100ms -#define SCANHISTORY_MAX_IDLE_TIME 10 // 10 sec -#define SCANHISTORY_IDLE_TICK ((SCANHISTORY_MAX_IDLE_TIME * 1000) / SCANHISTORY_IDLE_TIME_SLICE) +#define SCANHISTORY_IDLE_TIME_SLICE 100 // 100ms +#define SCANHISTORY_MAX_IDLE_TIME 10 // 10 sec +#define SCANHISTORY_IDLE_TICK ((SCANHISTORY_MAX_IDLE_TIME * 1000) / SCANHISTORY_IDLE_TIME_SLICE) typedef struct SLaunchHTaskInfo { SStreamMeta* pMeta; STaskId id; + STaskId hTaskId; } SLaunchHTaskInfo; typedef struct STaskRecheckInfo { @@ -43,20 +44,19 @@ typedef struct STaskInitTs { static int32_t streamSetParamForScanHistory(SStreamTask* pTask); static void streamTaskSetRangeStreamCalc(SStreamTask* pTask); static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated); -static SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); +static SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t hStreamId, + int32_t hTaskId); static void tryLaunchHistoryTask(void* param, void* tmrId); static void doProcessDownstreamReadyRsp(SStreamTask* pTask); int32_t streamTaskSetReady(SStreamTask* pTask) { - char* p = NULL; int32_t numOfDowns = streamTaskGetNumOfDownstream(pTask); - ETaskStatus status = streamTaskGetStatus(pTask, &p); + SStreamTaskState* p = streamTaskGetStatus(pTask); - if ((status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__STREAM_SCAN_HISTORY) && - pTask->info.taskLevel != TASK_LEVEL__SOURCE) { + if ((p->state == TASK_STATUS__SCAN_HISTORY) && pTask->info.taskLevel != TASK_LEVEL__SOURCE) { pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->upstreamInfo.pList); stDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s", - pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream, p); + pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream, p->name); } ASSERT(pTask->status.downstreamReady == 0); @@ -65,10 +65,7 @@ int32_t streamTaskSetReady(SStreamTask* pTask) { pTask->execInfo.start = taosGetTimestampMs(); int64_t el = (pTask->execInfo.start - pTask->execInfo.init); stDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%" PRId64 "ms, task status:%s", - pTask->id.idStr, numOfDowns, el, p); - - streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, pTask->execInfo.init, - pTask->execInfo.start, true); + pTask->id.idStr, numOfDowns, el, p->name); return TSDB_CODE_SUCCESS; } @@ -77,7 +74,7 @@ int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) { initScanHistoryReq(pTask, &req, igUntreated); int32_t len = sizeof(SStreamScanHistoryReq); - void* serializedReq = rpcMallocCont(len); + void* serializedReq = rpcMallocCont(len); if (serializedReq == NULL) { return -1; } @@ -96,12 +93,11 @@ static void doReExecScanhistory(void* param, void* tmrId) { SStreamTask* pTask = param; pTask->schedHistoryInfo.numOfTicks -= 1; - char* p = NULL; - ETaskStatus status = streamTaskGetStatus(pTask, &p); - if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) { + SStreamTaskState* p = streamTaskGetStatus(pTask); + if (p->state == TASK_STATUS__DROPPING || p->state == TASK_STATUS__STOP) { streamMetaReleaseTask(pTask->pMeta, pTask); int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p, ref); + stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p->name, ref); return; } @@ -115,8 +111,7 @@ static void doReExecScanhistory(void* param, void* tmrId) { // release the task. streamMetaReleaseTask(pTask->pMeta, pTask); } else { - taosTmrReset(doReExecScanhistory, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer, - &pTask->schedHistoryInfo.pTimer); + taosTmrReset(doReExecScanhistory, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer, &pTask->schedHistoryInfo.pTimer); } } @@ -135,7 +130,7 @@ int32_t streamReExecScanHistoryFuture(SStreamTask* pTask, int32_t idleDuration) pTask->schedHistoryInfo.numOfTicks = numOfTicks; int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s scan-history resumed in %.2fs, ref:%d", pTask->id.idStr, numOfTicks*0.1, ref); + stDebug("s-task:%s scan-history resumed in %.2fs, ref:%d", pTask->id.idStr, numOfTicks * 0.1, ref); if (pTask->schedHistoryInfo.pTimer == NULL) { pTask->schedHistoryInfo.pTimer = taosTmrStart(doReExecScanhistory, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer); @@ -158,11 +153,10 @@ static int32_t doStartScanHistoryTask(SStreamTask* pTask) { } int32_t streamTaskStartScanHistory(SStreamTask* pTask) { - int32_t level = pTask->info.taskLevel; - ETaskStatus status = streamTaskGetStatus(pTask, NULL); + int32_t level = pTask->info.taskLevel; + ETaskStatus status = streamTaskGetStatus(pTask)->state; - ASSERT(pTask->status.downstreamReady == 1 && - ((status == TASK_STATUS__SCAN_HISTORY) || (status == TASK_STATUS__STREAM_SCAN_HISTORY))); + ASSERT((pTask->status.downstreamReady == 1) && (status == TASK_STATUS__SCAN_HISTORY)); if (level == TASK_LEVEL__SOURCE) { return doStartScanHistoryTask(pTask); @@ -210,10 +204,14 @@ void streamTaskCheckDownstream(SStreamTask* pTask) { int32_t numOfVgs = taosArrayGetSize(vgInfo); pTask->notReadyTasks = numOfVgs; - pTask->checkReqIds = taosArrayInit(numOfVgs, sizeof(int64_t)); + if (pTask->checkReqIds == NULL) { + pTask->checkReqIds = taosArrayInit(numOfVgs, sizeof(int64_t)); + } else { + taosArrayClear(pTask->checkReqIds); + } stDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64, - pTask->id.idStr, numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey); + pTask->id.idStr, numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey); for (int32_t i = 0; i < numOfVgs; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); @@ -263,7 +261,7 @@ static void destroyRecheckInfo(STaskRecheckInfo* pInfo) { static void recheckDownstreamTasks(void* param, void* tmrId) { STaskRecheckInfo* pInfo = param; - SStreamTask* pTask = pInfo->pTask; + SStreamTask* pTask = pInfo->pTask; SStreamTaskCheckReq* pReq = &pInfo->req; @@ -290,7 +288,8 @@ static void recheckDownstreamTasks(void* param, void* tmrId) { stDebug("s-task:%s complete send check in timer, ref:%d", pTask->id.idStr, ref); } -int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage, int64_t* oldStage) { +int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage, + int64_t* oldStage) { SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, upstreamTaskId); ASSERT(pInfo != NULL); @@ -312,9 +311,24 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_ stError("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), new stage received:%" PRId64 ", prev:%" PRId64, id, upstreamTaskId, vgId, stage, pInfo->stage); + // record the checkpoint failure id and sent to mnode + taosThreadMutexLock(&pTask->lock); + ETaskStatus status = streamTaskGetStatus(pTask)->state; + if (status == TASK_STATUS__CK) { + streamTaskSetCheckpointFailedId(pTask); + } + taosThreadMutexUnlock(&pTask->lock); } if (pInfo->stage != stage) { + + taosThreadMutexLock(&pTask->lock); + ETaskStatus status = streamTaskGetStatus(pTask)->state; + if (status == TASK_STATUS__CK) { + streamTaskSetCheckpointFailedId(pTask); + } + taosThreadMutexUnlock(&pTask->lock); + return TASK_UPSTREAM_NEW_STAGE; } else if (pTask->status.downstreamReady != 1) { stDebug("s-task:%s vgId:%d leader:%d, downstream not ready", id, vgId, (pTask->pMeta->role == NODE_ROLE_LEADER)); @@ -330,20 +344,20 @@ int32_t streamTaskOnNormalTaskReady(SStreamTask* pTask) { streamTaskSetReady(pTask); streamTaskSetRangeStreamCalc(pTask); - char* p = NULL; - ETaskStatus status = streamTaskGetStatus(pTask, &p); - ASSERT(status == TASK_STATUS__READY); + SStreamTaskState* p = streamTaskGetStatus(pTask); + ASSERT(p->state == TASK_STATUS__READY); + int8_t schedStatus = pTask->status.schedStatus; if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { int64_t startVer = walReaderGetCurrentVer(pTask->exec.pWalReader); if (startVer == -1) { startVer = pTask->chkInfo.nextProcessVer; } - stDebug("s-task:%s no need to scan-history data, status:%s, sched-status:%d, ready for data from wal ver:%" PRId64, - id, p, pTask->status.schedStatus, startVer); + stDebug("s-task:%s status:%s, sched-status:%d, ready for data from wal ver:%" PRId64, id, p->name, schedStatus, + startVer); } else { - stDebug("s-task:%s level:%d status:%s sched-status:%d", id, pTask->info.taskLevel, p, pTask->status.schedStatus); + stDebug("s-task:%s level:%d status:%s sched-status:%d", id, pTask->info.taskLevel, p->name, schedStatus); } return TSDB_CODE_SUCCESS; @@ -356,30 +370,40 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) { streamTaskSetReady(pTask); streamTaskSetRangeStreamCalc(pTask); - char* p = NULL; - ETaskStatus status = streamTaskGetStatus(pTask, &p); - ASSERT(status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__STREAM_SCAN_HISTORY); + SStreamTaskState* p = streamTaskGetStatus(pTask); + ASSERT(p->state == TASK_STATUS__SCAN_HISTORY); - stDebug("s-task:%s enter into scan-history data stage, status:%s", id, p); - streamTaskStartScanHistory(pTask); - - // start the related fill-history task, when current task is ready - if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - streamLaunchFillHistoryTask(pTask); + if (pTask->info.fillHistory == 1) { + stDebug("s-task:%s fill-history task enters into scan-history data stage, status:%s", id, p->name); + streamTaskStartScanHistory(pTask); + } else { + stDebug("s-task:%s scan wal data, status:%s", id, p->name); } + // NOTE: there will be an deadlock if launch fill history here. +// // start the related fill-history task, when current task is ready +// if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { +// streamLaunchFillHistoryTask(pTask); +// } + return TSDB_CODE_SUCCESS; } void doProcessDownstreamReadyRsp(SStreamTask* pTask) { - EStreamTaskEvent event; - if (pTask->info.fillHistory == 0) { - event = HAS_RELATED_FILLHISTORY_TASK(pTask)? TASK_EVENT_INIT_STREAM_SCANHIST:TASK_EVENT_INIT; - } else { - event = TASK_EVENT_INIT_SCANHIST; - } + EStreamTaskEvent event = (pTask->info.fillHistory == 0) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST; streamTaskOnHandleEventSuccess(pTask->status.pSM, event); + + int64_t initTs = pTask->execInfo.init; + int64_t startTs = pTask->execInfo.start; + streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, initTs, startTs, true); + + // start the related fill-history task, when current task is ready + // not invoke in success callback due to the deadlock. + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + stDebug("s-task:%s try to launch related fill-history task", pTask->id.idStr); + streamLaunchFillHistoryTask(pTask); + } } static void addIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) { @@ -418,8 +442,7 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs if (pRsp->status == TASK_DOWNSTREAM_READY) { if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - - bool found = false; + bool found = false; int32_t numOfReqs = taosArrayGetSize(pTask->checkReqIds); for (int32_t i = 0; i < numOfReqs; i++) { int64_t reqId = *(int64_t*)taosArrayGet(pTask->checkReqIds, i); @@ -437,8 +460,7 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs ASSERT(left >= 0); if (left == 0) { - taosArrayDestroy(pTask->checkReqIds); - pTask->checkReqIds = NULL; + pTask->checkReqIds = taosArrayDestroy(pTask->checkReqIds);; doProcessDownstreamReadyRsp(pTask); } else { @@ -457,10 +479,10 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs } else { // not ready, wait for 100ms and retry if (pRsp->status == TASK_UPSTREAM_NEW_STAGE || pRsp->status == TASK_DOWNSTREAM_NOT_LEADER) { if (pRsp->status == TASK_UPSTREAM_NEW_STAGE) { - stError( - "s-task:%s vgId:%d self vnode-transfer/leader-change/restart detected, old stage:%"PRId64", current stage:%"PRId64", " - "not check wait for downstream task nodeUpdate, and all tasks restart", - id, pRsp->upstreamNodeId, pRsp->oldStage, pTask->pMeta->stage); + stError("s-task:%s vgId:%d self vnode-transfer/leader-change/restart detected, old stage:%" PRId64 + ", current stage:%" PRId64 + ", not check wait for downstream task nodeUpdate, and all tasks restart", + id, pRsp->upstreamNodeId, pRsp->oldStage, pTask->pMeta->stage); addIntoNodeUpdateList(pTask, pRsp->upstreamNodeId); } else { stError( @@ -470,23 +492,20 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs addIntoNodeUpdateList(pTask, pRsp->downstreamNodeId); } - streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, pTask->execInfo.init, - taosGetTimestampMs(), false); + int32_t startTs = pTask->execInfo.init; + int64_t now = taosGetTimestampMs(); + streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, startTs, now, false); // automatically set the related fill-history task to be failed. if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { STaskId* pId = &pTask->hTaskInfo.id; - - SStreamTask* pHTask = streamMetaAcquireTask(pTask->pMeta, pId->streamId, pId->taskId); - streamMetaUpdateTaskDownstreamStatus(pHTask->pMeta, pId->streamId, pId->taskId, pHTask->execInfo.init, - taosGetTimestampMs(), false); - streamMetaReleaseTask(pTask->pMeta, pHTask); + streamMetaAddTaskLaunchResult(pTask->pMeta, pId->streamId, pId->taskId, startTs, now, false); } } else { // TASK_DOWNSTREAM_NOT_READY, let's retry in 100ms STaskRecheckInfo* pInfo = createRecheckInfo(pTask, pRsp); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s downstream taskId:0x%x (vgId:%d) not ready, stage:%"PRId64", retry in 100ms, ref:%d ", id, + stDebug("s-task:%s downstream taskId:0x%x (vgId:%d) not ready, stage:%" PRId64 ", retry in 100ms, ref:%d ", id, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->oldStage, ref); pInfo->checkTimer = taosTmrStart(recheckDownstreamTasks, CHECK_DOWNSTREAM_INTERVAL, pInfo, streamTimer); } @@ -496,7 +515,7 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs } int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp, - SRpcHandleInfo *pRpcInfo, int32_t taskId) { + SRpcHandleInfo* pRpcInfo, int32_t taskId) { SEncoder encoder; int32_t code; int32_t len; @@ -527,17 +546,26 @@ int32_t streamSetParamForScanHistory(SStreamTask* pTask) { return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor); } +int32_t streamResetParamForScanHistory(SStreamTask* pTask) { + stDebug("s-task:%s reset operator option for scan-history data", pTask->id.idStr); + if (pTask->exec.pExecutor != NULL) { + return qResetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor); + } else { + return TSDB_CODE_SUCCESS; + } +} + int32_t streamRestoreParam(SStreamTask* pTask) { stDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr); return qRestoreStreamOperatorOption(pTask->exec.pExecutor); } // source -int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange *pVerRange, STimeWindow* pWindow) { +int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow) { return qStreamSourceScanParamForHistoryScanStep1(pTask->exec.pExecutor, pVerRange, pWindow); } -int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange *pVerRange, STimeWindow* pWindow) { +int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow) { return qStreamSourceScanParamForHistoryScanStep2(pTask->exec.pExecutor, pVerRange, pWindow); } @@ -567,7 +595,7 @@ int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask) { pBlock->info.rows = 1; pBlock->info.childId = pTask->info.selfChildId; - pTranstate->blocks = taosArrayInit(4, sizeof(SSDataBlock));//pBlock; + pTranstate->blocks = taosArrayInit(4, sizeof(SSDataBlock)); // pBlock; taosArrayPush(pTranstate->blocks, pBlock); taosMemoryFree(pBlock); @@ -597,12 +625,11 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory ASSERT(taskLevel == TASK_LEVEL__AGG || taskLevel == TASK_LEVEL__SINK); const char* id = pTask->id.idStr; - char* p = NULL; - ETaskStatus status = streamTaskGetStatus(pTask, &p); + SStreamTaskState* p = streamTaskGetStatus(pTask); - if (status != TASK_STATUS__SCAN_HISTORY && status != TASK_STATUS__STREAM_SCAN_HISTORY) { - stError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly", - id, p, pReq->upstreamTaskId); + if (p->state != TASK_STATUS__SCAN_HISTORY) { + stError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly", id, + p->name, pReq->upstreamTaskId); void* pBuf = NULL; int32_t len = 0; @@ -612,8 +639,8 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory initRpcMsg(&msg, 0, pBuf, sizeof(SMsgHead) + len); tmsgSendRsp(&msg); - stDebug("s-task:%s level:%d notify upstream:0x%x(vgId:%d) to continue process data in WAL", id, - taskLevel, pReq->upstreamTaskId, pReq->upstreamNodeId); + stDebug("s-task:%s level:%d notify upstream:0x%x(vgId:%d) to continue process data in WAL", id, taskLevel, + pReq->upstreamTaskId, pReq->upstreamNodeId); return 0; } @@ -642,22 +669,27 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory // mnode will not send the pause/resume message to the sink task, so no need to enable the pause for sink tasks. if (taskLevel == TASK_LEVEL__AGG) { - /*int32_t code = */streamTaskScanHistoryDataComplete(pTask); + /*int32_t code = */ streamTaskScanHistoryDataComplete(pTask); } else { // for sink task, set normal streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE); } } else { - stDebug("s-task:%s receive scan-history data finish msg from upstream:0x%x(index:%d), unfinished:%d", - id, pReq->upstreamTaskId, pReq->childId, left); + stDebug("s-task:%s receive scan-history data finish msg from upstream:0x%x(index:%d), unfinished:%d", id, + pReq->upstreamTaskId, pReq->childId, left); } return 0; } int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) { - ETaskStatus status = streamTaskGetStatus(pTask, NULL); + ETaskStatus status = streamTaskGetStatus(pTask)->state; - ASSERT(status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__STREAM_SCAN_HISTORY); + // task restart now, not handle the scan-history finish rsp + if (status == TASK_STATUS__UNINIT) { + return TSDB_CODE_INVALID_MSG; + } + + ASSERT(status == TASK_STATUS__SCAN_HISTORY/* || status == TASK_STATUS__STREAM_SCAN_HISTORY*/); SStreamMeta* pMeta = pTask->pMeta; // execute in the scan history complete call back msg, ready to process data from inputQ @@ -687,9 +719,9 @@ static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask) if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { stDebug("s-task:%s set the launch condition for fill-history s-task:%s, window:%" PRId64 " - %" PRId64 - " verRange:%" PRId64 " - %" PRId64", init:%"PRId64, - pTask->id.idStr, pHTask->id.idStr, pRange->window.skey, pRange->window.ekey, - pRange->range.minVer, pRange->range.maxVer, pHTask->execInfo.init); + " verRange:%" PRId64 " - %" PRId64 ", init:%" PRId64, + pTask->id.idStr, pHTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, + pRange->range.maxVer, pHTask->execInfo.init); } else { stDebug("s-task:%s no fill-history condition for non-source task:%s", pTask->id.idStr, pHTask->id.idStr); } @@ -698,33 +730,80 @@ static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask) streamTaskHandleEvent(pHTask->status.pSM, TASK_EVENT_INIT_SCANHIST); } +static void noRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, int64_t now) { + SStreamMeta* pMeta = pTask->pMeta; + SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; + + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); + + stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d", + pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref); + + pHTaskInfo->id.taskId = 0; + pHTaskInfo->id.streamId = 0; +} + +static void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, int64_t now) { + SStreamMeta* pMeta = pTask->pMeta; + SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; + + if (streamTaskShouldStop(pTask)) { // record the failure + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64 ", ref:%d", pInfo->id.taskId, + pInfo->hTaskId.taskId, ref); + + streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); + taosMemoryFree(pInfo); + } else { + char* p = streamTaskGetStatus(pTask)->name; + int32_t hTaskId = pHTaskInfo->id.taskId; + + stDebug("s-task:%s status:%s failed to launch fill-history task:0x%x, retry launch:%dms, retryCount:%d", + pTask->id.idStr, p, hTaskId, pHTaskInfo->waitInterval, pHTaskInfo->retryTimes); + + taosTmrReset(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer); + } +} + static void tryLaunchHistoryTask(void* param, void* tmrId) { SLaunchHTaskInfo* pInfo = param; SStreamMeta* pMeta = pInfo->pMeta; + int64_t now = taosGetTimestampMs(); streamMetaWLock(pMeta); SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id)); - if (ppTask) { + if (ppTask == NULL || *ppTask == NULL) { + stError("s-task:0x%x and rel fill-history task:0x%" PRIx64 " all have been destroyed, not launch", + (int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId); + streamMetaWUnLock(pMeta); + + // already dropped, no need to set the failure info into the stream task meta. + taosMemoryFree(pInfo); + return; + } + + if (streamTaskShouldStop(*ppTask)) { ASSERT((*ppTask)->status.timerActive >= 1); - if (streamTaskShouldStop(*ppTask)) { - char* p = NULL; - streamTaskGetStatus((*ppTask), &p); - int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); - stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d", - (*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref); + char* p = streamTaskGetStatus(*ppTask)->name; + int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); + stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d", + (*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref); - taosMemoryFree(pInfo); - streamMetaWUnLock(pMeta); - return; - } + streamMetaWUnLock(pMeta); + + // record the related fill-history task failed + streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); + taosMemoryFree(pInfo); + return; } + + SStreamTask* pTask = streamMetaAcquireTaskNoLock(pMeta, pInfo->id.streamId, pInfo->id.taskId); streamMetaWUnLock(pMeta); - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pInfo->id.streamId, pInfo->id.taskId); if (pTask != NULL) { - SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; pHTaskInfo->tickCount -= 1; @@ -735,119 +814,155 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) { } if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - streamMetaReleaseTask(pMeta, pTask); - - stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d", - pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref); - - pHTaskInfo->id.taskId = 0; - pHTaskInfo->id.streamId = 0; + noRetryLaunchFillHistoryTask(pTask, pInfo, now); } else { // not reach the limitation yet, let's continue retrying launch related fill-history task. streamTaskSetRetryInfoForLaunch(pHTaskInfo); ASSERT(pTask->status.timerActive >= 1); // abort the timer if intend to stop task SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId); - if (pHTask == NULL && (!streamTaskShouldStop(pTask))) { - char* p = NULL; - int32_t hTaskId = pHTaskInfo->id.taskId; - - streamTaskGetStatus(pTask, &p); - stDebug("s-task:%s status:%s failed to launch fill-history task:0x%x, retry launch:%dms, retryCount:%d", - pTask->id.idStr, p, hTaskId, pHTaskInfo->waitInterval, pHTaskInfo->retryTimes); - - taosTmrReset(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer); + if (pHTask == NULL) { + doRetryLaunchFillHistoryTask(pTask, pInfo, now); streamMetaReleaseTask(pMeta, pTask); return; - } - - if (pHTask != NULL) { + } else { checkFillhistoryTaskStatus(pTask, pHTask); streamMetaReleaseTask(pMeta, pHTask); - } - // not in timer anymore - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId, - pHTaskInfo->retryTimes, ref); - streamMetaReleaseTask(pMeta, pTask); + // not in timer anymore + int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId, + pHTaskInfo->retryTimes, ref); + } } + + streamMetaReleaseTask(pMeta, pTask); } else { - stError("s-task:0x%x failed to load task, it may have been destroyed, not launch related fill-history task", - (int32_t)pInfo->id.taskId); + streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); + + int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); + stError("s-task:0x%x rel fill-history task:0x%" PRIx64 " may have been destroyed, not launch, ref:%d", + (int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId, ref); } taosMemoryFree(pInfo); } -SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { +SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t hStreamId, + int32_t hTaskId) { SLaunchHTaskInfo* pInfo = taosMemoryCalloc(1, sizeof(SLaunchHTaskInfo)); if (pInfo == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - pInfo->id.taskId = taskId; pInfo->id.streamId = streamId; + pInfo->id.taskId = taskId; + + pInfo->hTaskId.streamId = hStreamId; + pInfo->hTaskId.taskId = hTaskId; + pInfo->pMeta = pMeta; return pInfo; } -// an fill history task needs to be started. -int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { - SStreamMeta* pMeta = pTask->pMeta; - int32_t hTaskId = pTask->hTaskInfo.id.taskId; +static int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) { + SStreamMeta* pMeta = pTask->pMeta; + STaskExecStatisInfo* pExecInfo = &pTask->execInfo; + const char* idStr = pTask->id.idStr; + int64_t hStreamId = pTask->hTaskInfo.id.streamId; + int32_t hTaskId = pTask->hTaskInfo.id.taskId; + ASSERT(hTaskId != 0); - ASSERT((hTaskId != 0) && (pTask->status.downstreamReady == 1)); - stDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr, - pTask->hTaskInfo.id.streamId, hTaskId); + stWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since not built yet", idStr, pMeta->vgId, hTaskId); - // Set the execute conditions, including the query time window and the version range - SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); - if (pHTask == NULL) { - stWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since not built yet", pTask->id.idStr, pMeta->vgId, - hTaskId); + SLaunchHTaskInfo* pInfo = createHTaskLaunchInfo(pMeta, pTask->id.streamId, pTask->id.taskId, hStreamId, hTaskId); + if (pInfo == NULL) { + stError("s-task:%s failed to launch related fill-history task, since Out Of Memory", idStr); - SLaunchHTaskInfo* pInfo = createHTaskLaunchInfo(pTask->pMeta, pTask->id.streamId, pTask->id.taskId); - if (pInfo == NULL) { - stError("s-task:%s failed to launch related fill-history task, since Out Of Memory", pTask->id.idStr); + streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false); + return terrno; + } + + // set the launch time info + streamTaskInitForLaunchHTask(&pTask->hTaskInfo); + + // check for the timer + if (pTask->hTaskInfo.pTimer == NULL) { + int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); + pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer); + + if (pTask->hTaskInfo.pTimer == NULL) { + ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", idStr, ref); + + taosMemoryFree(pInfo); + streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false); return terrno; } - streamTaskInitForLaunchHTask(&pTask->hTaskInfo); - if (pTask->hTaskInfo.pTimer == NULL) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer); - if (pTask->hTaskInfo.pTimer == NULL) { - atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", pTask->id.idStr, - pTask->status.timerActive); - taosMemoryFree(pInfo); - } else { - ASSERT(ref >= 1); - stDebug("s-task:%s set timer active flag, ref:%d", pTask->id.idStr, ref); - } - } else { // timer exists - ASSERT(pTask->status.timerActive >= 1); - stDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr); - taosTmrReset(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer); - } + ASSERT(ref >= 1); - return TSDB_CODE_SUCCESS; - } - - if ((*pHTask)->status.downstreamReady == 1) { - stDebug("s-task:%s fill-history task is ready, no need to check downstream", (*pHTask)->id.idStr); - } else { - checkFillhistoryTaskStatus(pTask, *pHTask); + stDebug("s-task:%s set timer active flag, ref:%d", idStr, ref); + } else { // timer exists + ASSERT(pTask->status.timerActive >= 1); + stDebug("s-task:%s set timer active flag, task timer not null", idStr); + taosTmrReset(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer); } return TSDB_CODE_SUCCESS; } +// an fill history task needs to be started. +int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { + SStreamMeta* pMeta = pTask->pMeta; + STaskExecStatisInfo* pExecInfo = &pTask->execInfo; + const char* idStr = pTask->id.idStr; + int64_t hStreamId = pTask->hTaskInfo.id.streamId; + int32_t hTaskId = pTask->hTaskInfo.id.taskId; + ASSERT(hTaskId != 0); + + // check stream task status in the first place. + SStreamTaskState* pStatus = streamTaskGetStatus(pTask); + if (pStatus->state != TASK_STATUS__READY) { + stDebug("s-task:%s not launch related fill-history task:0x%" PRIx64 "-0x%x, status:%s", idStr, hStreamId, hTaskId, + pStatus->name); + + streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false); + return -1; // todo set the correct error code + } + + stDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", idStr, hStreamId, hTaskId); + + // Set the execute conditions, including the query time window and the version range + streamMetaRLock(pMeta); + SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); + streamMetaRUnLock(pMeta); + + if (pHTask != NULL) { // it is already added into stream meta store. + SStreamTask* pHisTask = streamMetaAcquireTask(pMeta, hStreamId, hTaskId); + if (pHisTask == NULL) { + stDebug("s-task:%s failed acquire and start fill-history task, it may have been dropped/stopped", idStr); + streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false); + } else { + if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing + stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr); + streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, true); + } else { // exist, but not ready, continue check downstream task status + checkFillhistoryTaskStatus(pTask, pHisTask); + } + + streamMetaReleaseTask(pMeta, pHisTask); + } + + return TSDB_CODE_SUCCESS; + } else { + return launchNotBuiltFillHistoryTask(pTask); + } +} + int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) { - if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__DROPPING) { + if (streamTaskGetStatus(pTask)->state == TASK_STATUS__DROPPING) { return 0; } @@ -990,6 +1105,11 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) { pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer); } } else { + ASSERT(pTask->info.fillHistory == 0); + if (pTask->info.taskLevel >= TASK_LEVEL__AGG) { + return; + } + int64_t ekey = 0; if (pRange->window.ekey < INT64_MAX) { ekey = pRange->window.ekey + 1; @@ -1004,10 +1124,13 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) { pRange->range.minVer = 0; pRange->range.maxVer = ver; - stDebug("s-task:%s level:%d related fill-history task exists, update stream calc time window:%" PRId64 " - %" PRId64 + stDebug("s-task:%s level:%d related fill-history task exists, set stream task timeWindow:%" PRId64 " - %" PRId64 ", verRang:%" PRId64 " - %" PRId64, - pTask->id.idStr, pTask->info.taskLevel, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, - pRange->range.maxVer); + pTask->id.idStr, pTask->info.taskLevel, pRange->window.skey, pRange->window.ekey, ver, INT64_MAX); + + SVersionRange verRange = {.minVer = ver, .maxVer = INT64_MAX}; + STimeWindow win = pRange->window; + streamSetParamForStreamScannerStep2(pTask, &verRange, &win); } } @@ -1027,23 +1150,21 @@ void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta) { } void streamTaskResume(SStreamTask* pTask) { - char* p = NULL; - ETaskStatus status = streamTaskGetStatus(pTask, &p); + SStreamTaskState prevState = *streamTaskGetStatus(pTask); SStreamMeta* pMeta = pTask->pMeta; - if (status == TASK_STATUS__PAUSE || status == TASK_STATUS__HALT) { + if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) { streamTaskRestoreStatus(pTask); - char* pNew = NULL; - streamTaskGetStatus(pTask, &pNew); - if (status == TASK_STATUS__PAUSE) { + char* pNew = streamTaskGetStatus(pTask)->name; + if (prevState.state == TASK_STATUS__PAUSE) { int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1); - stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, p, num); + stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num); } else { - stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, p); + stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, prevState.name); } } else { - stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, p); + stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, prevState.name); } } @@ -1069,7 +1190,7 @@ static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) } } -int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, +int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int64_t endTs, bool ready) { STaskStartInfo* pStartInfo = &pMeta->startInfo; STaskId id = {.streamId = streamId, .taskId = taskId}; @@ -1087,19 +1208,23 @@ int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamI pStartInfo->readyTs = taosGetTimestampMs(); pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0; - stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x startTs:%" PRId64 + stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64 ", readyTs:%" PRId64 " total elapsed time:%.2fs", - pMeta->vgId, numOfTotal, taskId, pStartInfo->startTs, pStartInfo->readyTs, + pMeta->vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, pStartInfo->elapsedTime / 1000.0); // print the initialization elapsed time and info displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true); displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false); streamMetaResetStartInfo(pStartInfo); + streamMetaWUnLock(pMeta); + + pStartInfo->completeFn(pMeta); } else { - stDebug("vgId:%d recv check down results:%d, total:%d", pMeta->vgId, numOfRecv, numOfTotal); + streamMetaWUnLock(pMeta); + stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", pMeta->vgId, taskId, + ready, numOfRecv, numOfTotal); } - streamMetaWUnLock(pMeta); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 1ded350f89..387803273a 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -309,7 +309,9 @@ void tFreeStreamTask(SStreamTask* pTask) { ETaskStatus status1 = TASK_STATUS__UNINIT; taosThreadMutexLock(&pTask->lock); if (pTask->status.pSM != NULL) { - status1 = streamTaskGetStatus(pTask, &p); + SStreamTaskState* pStatus = streamTaskGetStatus(pTask); + p = pStatus->name; + status1 = pStatus->state; } taosThreadMutexUnlock(&pTask->lock); @@ -328,9 +330,9 @@ void tFreeStreamTask(SStreamTask* pTask) { taosMsleep(100); } - if (pTask->schedInfo.pTimer != NULL) { - taosTmrStop(pTask->schedInfo.pTimer); - pTask->schedInfo.pTimer = NULL; + if (pTask->schedInfo.pDelayTimer != NULL) { + taosTmrStop(pTask->schedInfo.pDelayTimer); + pTask->schedInfo.pDelayTimer = NULL; } if (pTask->hTaskInfo.pTimer != NULL) { @@ -796,5 +798,6 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc) pDst->sinkDataSize = pSrc->sinkDataSize; pDst->activeCheckpointId = pSrc->activeCheckpointId; pDst->checkpointFailed = pSrc->checkpointFailed; + pDst->chkpointTransId = pSrc->chkpointTransId; } diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 68ae1cce36..f0dcc75c4c 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -31,14 +31,13 @@ SStreamTaskState StreamTaskStatusList[9] = { {.state = TASK_STATUS__HALT, .name = "halt"}, {.state = TASK_STATUS__PAUSE, .name = "paused"}, {.state = TASK_STATUS__CK, .name = "checkpoint"}, - {.state = TASK_STATUS__STREAM_SCAN_HISTORY, .name = "stream-scan-history"}, +// {.state = TASK_STATUS__STREAM_SCAN_HISTORY, .name = "stream-scan-history"}, }; SStreamEventInfo StreamTaskEventList[12] = { {.event = 0, .name = ""}, // dummy event, place holder {.event = TASK_EVENT_INIT, .name = "initialize"}, {.event = TASK_EVENT_INIT_SCANHIST, .name = "scan-history-init"}, - {.event = TASK_EVENT_INIT_STREAM_SCANHIST, .name = "stream-scan-history-init"}, {.event = TASK_EVENT_SCANHIST_DONE, .name = "scan-history-completed"}, {.event = TASK_EVENT_STOP, .name = "stopping"}, {.event = TASK_EVENT_GEN_CHECKPOINT, .name = "checkpoint"}, @@ -65,8 +64,7 @@ static STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus nex static int32_t dummyFn(SStreamTask* UNUSED_PARAM(p)) { return TSDB_CODE_SUCCESS; } static int32_t attachEvent(SStreamTask* pTask, SAttachedEventInfo* pEvtInfo) { - char* p = NULL; - streamTaskGetStatus(pTask, &p); + char* p = streamTaskGetStatus(pTask)->name; stDebug("s-task:%s status:%s attach event:%s required status:%s, since not allowed to handle it", pTask->id.idStr, p, GET_EVT_NAME(pEvtInfo->event), StreamTaskStatusList[pEvtInfo->status].name); @@ -110,12 +108,12 @@ int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) { // todo check rsp code for handle Event:TASK_EVENT_SCANHIST_DONE static bool isInvalidStateTransfer(ETaskStatus state, const EStreamTaskEvent event) { - if (event == TASK_EVENT_INIT_STREAM_SCANHIST || event == TASK_EVENT_INIT || event == TASK_EVENT_INIT_SCANHIST) { + if (event == TASK_EVENT_INIT || event == TASK_EVENT_INIT_SCANHIST) { return (state != TASK_STATUS__UNINIT); } if (event == TASK_EVENT_SCANHIST_DONE) { - return (state != TASK_STATUS__SCAN_HISTORY && state != TASK_STATUS__STREAM_SCAN_HISTORY); + return (state != TASK_STATUS__SCAN_HISTORY); } if (event == TASK_EVENT_GEN_CHECKPOINT) { @@ -160,6 +158,45 @@ static STaskStateTrans* streamTaskFindTransform(ETaskStatus state, const EStream return NULL; } +static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName, SStreamTask* pTask) { + int32_t code = TSDB_CODE_SUCCESS; + int64_t el = (taosGetTimestampMs() - pSM->startTs); + stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", pTask->id.idStr, + pEventName, el, pSM->prev.state.name, pSM->current.name); + + SAttachedEventInfo* pEvtInfo = taosArrayGet(pSM->pWaitingEventList, 0); + + // OK, let's handle the attached event, since the task has reached the required status now + if (pSM->current.state == pEvtInfo->status) { + stDebug("s-task:%s handle the event:%s in waiting list, state:%s", pTask->id.idStr, + GET_EVT_NAME(pEvtInfo->event), pSM->current.name); + + // remove it + taosArrayPop(pSM->pWaitingEventList); + + STaskStateTrans* pNextTrans = streamTaskFindTransform(pSM->current.state, pEvtInfo->event); + ASSERT(pSM->pActiveTrans == NULL && pNextTrans != NULL); + + pSM->pActiveTrans = pNextTrans; + pSM->startTs = taosGetTimestampMs(); + taosThreadMutexUnlock(&pTask->lock); + + code = pNextTrans->pAction(pSM->pTask); + if (pNextTrans->autoInvokeEndFn) { + return streamTaskOnHandleEventSuccess(pSM, pNextTrans->event); + } else { + return code; + } + } else { + taosThreadMutexUnlock(&pTask->lock); + stDebug("s-task:%s state:%s event:%s in waiting list, req state:%s not fulfilled, put it back", pTask->id.idStr, + pSM->current.name, GET_EVT_NAME(pEvtInfo->event), + StreamTaskStatusList[pEvtInfo->status].name); + } + + return code; +} + void streamTaskRestoreStatus(SStreamTask* pTask) { SStreamTaskSM* pSM = pTask->status.pSM; @@ -175,7 +212,13 @@ void streamTaskRestoreStatus(SStreamTask* pTask) { pSM->prev.evt = 0; pSM->startTs = taosGetTimestampMs(); - stDebug("s-task:%s restore status, %s -> %s", pTask->id.idStr, pSM->prev.state.name, pSM->current.name); + + if (taosArrayGetSize(pSM->pWaitingEventList) > 0) { + stDebug("s-task:%s restore status, %s -> %s, and then handle waiting event", pTask->id.idStr, pSM->prev.state.name, pSM->current.name); + doHandleWaitingEvent(pSM, "restore-pause/halt", pTask); + } else { + stDebug("s-task:%s restore status, %s -> %s", pTask->id.idStr, pSM->prev.state.name, pSM->current.name); + } taosThreadMutexUnlock(&pTask->lock); } @@ -230,13 +273,13 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt while (1) { // wait for the task to be here taosThreadMutexLock(&pTask->lock); - ETaskStatus s = streamTaskGetStatus(pTask, NULL); + ETaskStatus s = streamTaskGetStatus(pTask)->state; taosThreadMutexUnlock(&pTask->lock); - if ((s == pTrans->next.state) && (pSM->prev.evt == pTrans->event)) { + if ((s == pTrans->next.state) && (pSM->prev.evt == pTrans->event)) {// this event has been handled already stDebug("s-task:%s attached event:%s handled", id, GET_EVT_NAME(pTrans->event)); return TSDB_CODE_SUCCESS; - } else if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__STOP) { // this event has been handled already + } else if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT) { stDebug("s-task:%s not handle event:%s yet, wait for 100ms and recheck", id, GET_EVT_NAME(event)); taosMsleep(100); } else { @@ -343,39 +386,7 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even pTrans->pSuccAction(pTask); if (taosArrayGetSize(pSM->pWaitingEventList) > 0) { - int64_t el = (taosGetTimestampMs() - pSM->startTs); - stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", pTask->id.idStr, - GET_EVT_NAME(pTrans->event), el, pSM->prev.state.name, pSM->current.name); - - SAttachedEventInfo* pEvtInfo = taosArrayGet(pSM->pWaitingEventList, 0); - - // OK, let's handle the attached event, since the task has reached the required status now - if (pSM->current.state == pEvtInfo->status) { - stDebug("s-task:%s handle the event:%s in waiting list, state:%s", pTask->id.idStr, - GET_EVT_NAME(pEvtInfo->event), pSM->current.name); - - // remove it - taosArrayPop(pSM->pWaitingEventList); - - STaskStateTrans* pNextTrans = streamTaskFindTransform(pSM->current.state, pEvtInfo->event); - ASSERT(pSM->pActiveTrans == NULL && pNextTrans != NULL); - - pSM->pActiveTrans = pNextTrans; - pSM->startTs = taosGetTimestampMs(); - taosThreadMutexUnlock(&pTask->lock); - - int32_t code = pNextTrans->pAction(pSM->pTask); - if (pNextTrans->autoInvokeEndFn) { - return streamTaskOnHandleEventSuccess(pSM, pNextTrans->event); - } else { - return code; - } - } else { - taosThreadMutexUnlock(&pTask->lock); - stDebug("s-task:%s state:%s event:%s in waiting list, req state:%s not fulfilled, put it back", pTask->id.idStr, - pSM->current.name, GET_EVT_NAME(pEvtInfo->event), - StreamTaskStatusList[pEvtInfo->status].name); - } + doHandleWaitingEvent(pSM, GET_EVT_NAME(pTrans->event), pTask); } else { taosThreadMutexUnlock(&pTask->lock); @@ -387,12 +398,8 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even return TSDB_CODE_SUCCESS; } -ETaskStatus streamTaskGetStatus(const SStreamTask* pTask, char** pStr) { - SStreamTaskState s = pTask->status.pSM->current; // copy one obj in case of multi-thread environment - if (pStr != NULL) { - *pStr = s.name; - } - return s.state; +SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask) { + return &pTask->status.pSM->current; // copy one obj in case of multi-thread environment } const char* streamTaskGetStatusStr(ETaskStatus status) { @@ -469,14 +476,10 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STREAM_SCAN_HISTORY, TASK_EVENT_INIT_STREAM_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false); - taosArrayPush(streamTaskSMTrans, &trans); // scan-history related event trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true); - taosArrayPush(streamTaskSMTrans, &trans); // halt stream task, from other task status trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true); @@ -486,8 +489,6 @@ void doInitStateTransferTable(void) { SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT}; - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true); - taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true); taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true); @@ -506,8 +507,6 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); info = (SAttachedEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE}; - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); - taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); @@ -541,8 +540,6 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true); - taosArrayPush(streamTaskSMTrans, &trans); // dropping related event trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true); @@ -561,7 +558,5 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL, NULL, true); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true); - taosArrayPush(streamTaskSMTrans, &trans); } //clang-format on \ No newline at end of file diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index f78f6f4df1..d607f26de3 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -13,8 +13,6 @@ * along with this program. If not, see . */ -#include "query.h" -#include "tdatablock.h" #include "tencode.h" #include "tstreamUpdate.h" #include "ttime.h" @@ -387,8 +385,8 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) { bool isIncrementalTimeStamp(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) { TSKEY *pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t)); - bool res = true; - if ( pMapMaxTs && ts < *pMapMaxTs ) { + bool res = true; + if (pMapMaxTs && ts < *pMapMaxTs) { res = false; } else { taosHashPut(pInfo->pMap, &tableId, sizeof(uint64_t), &ts, sizeof(TSKEY)); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index d31e4cbfcd..aac78cf03b 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -26,6 +26,7 @@ #define FLUSH_NUM 4 #define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024) #define MIN_NUM_OF_ROW_BUFF 10240 +#define MIN_NUM_OF_RECOVER_ROW_BUFF 128 #define TASK_KEY "streamFileState" #define STREAM_STATE_INFO_NAME "StreamStateCheckPoint" @@ -660,7 +661,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId) { if (pCur == NULL) { return -1; } - int32_t recoverNum = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount); + int32_t recoverNum = TMIN(MIN_NUM_OF_RECOVER_ROW_BUFF, pFileState->maxRowCount); while (code == TSDB_CODE_SUCCESS) { if (pFileState->curRowCount >= recoverNum) { break; @@ -694,7 +695,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) { if (pCur == NULL) { return -1; } - int32_t recoverNum = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount); + int32_t recoverNum = TMIN(MIN_NUM_OF_RECOVER_ROW_BUFF, pFileState->maxRowCount); while (code == TSDB_CODE_SUCCESS) { if (pFileState->curRowCount >= recoverNum) { break; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index b8740a2858..9352d5662e 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -662,14 +662,14 @@ ESyncRole syncGetRole(int64_t rid) { int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_t* seq) { if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { terrno = TSDB_CODE_SYN_NOT_LEADER; - sNError(pSyncNode, "sync propose not leader, type:%s", TMSG_INFO(pMsg->msgType)); + sNWarn(pSyncNode, "sync propose not leader, type:%s", TMSG_INFO(pMsg->msgType)); return -1; } if (!pSyncNode->restoreFinish) { terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY; - sNError(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64, - TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); + sNWarn(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64, + TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); return -1; } @@ -790,7 +790,7 @@ int32_t syncNodeLogStoreRestoreOnNeed(SSyncNode* pNode) { SyncIndex commitIndex = snapshot.lastApplyIndex; SyncIndex firstVer = pNode->pLogStore->syncLogBeginIndex(pNode->pLogStore); SyncIndex lastVer = pNode->pLogStore->syncLogLastIndex(pNode->pLogStore); - if (lastVer < commitIndex || firstVer > commitIndex + 1) { + if ((lastVer < commitIndex || firstVer > commitIndex + 1) || pNode->fsmState == SYNC_FSM_STATE_INCOMPLETE) { if (pNode->pLogStore->syncLogRestoreFromSnapshot(pNode->pLogStore, commitIndex)) { sError("vgId:%d, failed to restore log store from snapshot since %s. lastVer:%" PRId64 ", snapshotVer:%" PRId64, pNode->vgId, terrstr(), lastVer, commitIndex); @@ -1162,9 +1162,12 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) { ASSERTS(pSyncNode->pLogStore != NULL, "log store not created"); ASSERTS(pSyncNode->pLogBuf != NULL, "ring log buffer not created"); + taosThreadMutexLock(&pSyncNode->pLogBuf->mutex); SyncIndex lastVer = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); SyncIndex commitIndex = pSyncNode->pLogStore->syncLogCommitIndex(pSyncNode->pLogStore); SyncIndex endIndex = pSyncNode->pLogBuf->endIndex; + taosThreadMutexUnlock(&pSyncNode->pLogBuf->mutex); + if (lastVer != -1 && endIndex != lastVer + 1) { terrno = TSDB_CODE_WAL_LOG_INCOMPLETE; sError("vgId:%d, failed to restore sync node since %s. expected lastLogIndex:%" PRId64 ", lastVer:%" PRId64 "", diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index f2386797c1..70bdd4a837 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -206,7 +206,7 @@ int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) { } if (pLogStore->syncLogGetEntry(pLogStore, index, &pEntry) < 0) { - sError("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); + sWarn("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); break; } @@ -558,22 +558,31 @@ int32_t syncFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, SyncTe pEntry->term); } - SRpcMsg rpcMsg = {.code = applyCode}; - syncEntry2OriginalRpc(pEntry, &rpcMsg); + int32_t code = 0; + bool retry = false; + do { + SRpcMsg rpcMsg = {.code = applyCode}; + syncEntry2OriginalRpc(pEntry, &rpcMsg); - SFsmCbMeta cbMeta = {0}; - cbMeta.index = pEntry->index; - cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(pNode, pEntry->index); - cbMeta.isWeak = pEntry->isWeak; - cbMeta.code = applyCode; - cbMeta.state = role; - cbMeta.seqNum = pEntry->seqNum; - cbMeta.term = pEntry->term; - cbMeta.currentTerm = term; - cbMeta.flag = -1; + SFsmCbMeta cbMeta = {0}; + cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(pNode, pEntry->index); + cbMeta.isWeak = pEntry->isWeak; + cbMeta.code = applyCode; + cbMeta.state = role; + cbMeta.seqNum = pEntry->seqNum; + cbMeta.term = pEntry->term; + cbMeta.currentTerm = term; + cbMeta.flag = -1; - (void)syncRespMgrGetAndDel(pNode->pSyncRespMgr, cbMeta.seqNum, &rpcMsg.info); - int32_t code = pFsm->FpCommitCb(pFsm, &rpcMsg, &cbMeta); + (void)syncRespMgrGetAndDel(pNode->pSyncRespMgr, cbMeta.seqNum, &rpcMsg.info); + code = pFsm->FpCommitCb(pFsm, &rpcMsg, &cbMeta); + retry = (code != 0) && (terrno == TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE); + if (retry) { + taosMsleep(10); + sError("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + } + } while (retry); return code; } @@ -1228,7 +1237,7 @@ SSyncRaftEntry* syncLogBufferGetOneEntry(SSyncLogBuffer* pBuf, SSyncNode* pNode, } else { *pInBuf = false; if (pNode->pLogStore->syncLogGetEntry(pNode->pLogStore, index, &pEntry) < 0) { - sError("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); + sWarn("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); } } return pEntry; @@ -1244,7 +1253,7 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind pEntry = syncLogBufferGetOneEntry(pBuf, pNode, index, &inBuf); if (pEntry == NULL) { - sError("vgId:%d, failed to get raft entry for index:%" PRId64 "", pNode->vgId, index); + sWarn("vgId:%d, failed to get raft entry for index:%" PRId64 "", pNode->vgId, index); if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { SSyncLogReplMgr* pMgr = syncNodeGetLogReplMgr(pNode, pDestId); if (pMgr) { diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index f060e9da13..578e6798e0 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -22,6 +22,7 @@ #include "syncRaftStore.h" #include "syncReplication.h" #include "syncUtil.h" +#include "tglobal.h" static SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths); @@ -806,6 +807,13 @@ static int32_t syncNodeOnSnapshotBegin(SSyncNode *pSyncNode, SyncSnapshotSend *p goto _SEND_REPLY; } + SyncIndex beginIndex = syncNodeGetSnapBeginIndex(pSyncNode); + if (pReceiver->snapshotParam.start != beginIndex) { + sRError(pReceiver, "snapshot begin index is changed unexpectedly. sver:%" PRId64 ", beginIndex:%" PRId64, + pReceiver->snapshotParam.start, beginIndex); + goto _SEND_REPLY; + } + code = 0; _SEND_REPLY: if (code != 0 && terrno != 0) { @@ -994,6 +1002,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, SRpcMsg *pRpcMsg) { sRError(pReceiver, "reject snap replication with smaller term. msg term:%" PRId64 ", seq:%d", pMsg->term, pMsg->seq); terrno = TSDB_CODE_SYN_MISMATCHED_SIGNATURE; + syncSnapSendRsp(pReceiver, pMsg, NULL, 0, 0, terrno); return -1; } @@ -1087,7 +1096,10 @@ static int32_t syncSnapSenderExchgSnapInfo(SSyncNode *pSyncNode, SSyncSnapshotSe // sender static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { + int32_t code = -1; SSnapshot snapshot = {0}; + + taosThreadMutexLock(&pSender->pSndBuf->mutex); pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); // prepare @@ -1103,20 +1115,24 @@ static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSend // start reader if (pMsg->payloadType == TDMT_SYNC_PREP_SNAPSHOT_REPLY) { if (syncSnapSenderExchgSnapInfo(pSyncNode, pSender, pMsg) != 0) { - return -1; + goto _out; } } - int32_t code = pSyncNode->pFsm->FpSnapshotStartRead(pSyncNode->pFsm, &pSender->snapshotParam, &pSender->pReader); + code = pSyncNode->pFsm->FpSnapshotStartRead(pSyncNode->pFsm, &pSender->snapshotParam, &pSender->pReader); if (code != 0) { sSError(pSender, "prepare snapshot failed since %s", terrstr()); - return -1; + goto _out; } // update next index syncIndexMgrSetIndex(pSyncNode->pNextIndex, &pMsg->srcId, snapshot.lastApplyIndex + 1); - return snapshotSend(pSender); + code = snapshotSend(pSender); + +_out: + taosThreadMutexUnlock(&pSender->pSndBuf->mutex); + return code; } static int32_t snapshotSenderSignatureCmp(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { @@ -1171,7 +1187,7 @@ static int32_t syncSnapBufferSend(SSyncSnapshotSender *pSender, SyncSnapshotRsp pSndBuf->start = ack + 1; } - while (pSender->seq != SYNC_SNAPSHOT_SEQ_END && pSender->seq - pSndBuf->start < (pSndBuf->size >> 2)) { + while (pSender->seq != SYNC_SNAPSHOT_SEQ_END && pSender->seq - pSndBuf->start < tsSnapReplMaxWaitN) { if (snapshotSend(pSender) != 0) { code = terrno; goto _out; diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 19b2dfc300..7483588593 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -122,6 +122,8 @@ typedef struct SExHandle { typedef struct { STransMsg* pRsp; + SEpSet epSet; + int8_t hasEpSet; tsem_t* pSem; int8_t inited; SRWLatch latch; @@ -317,7 +319,8 @@ int transReleaseSrvHandle(void* handle); int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransCtx* pCtx); int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp); -int transSendRecvWithTimeout(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp, int32_t timeoutMs); +int transSendRecvWithTimeout(void* shandle, SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp, int8_t* epUpdated, + int32_t timeoutMs); int transSendResponse(const STransMsg* msg); int transRegisterMsg(const STransMsg* msg); int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn); diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index 96537a950e..6de10cbb9e 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -28,7 +28,7 @@ static int32_t httpRefMgt = 0; static int64_t httpRef = -1; -static int32_t FAST_FAILURE_LIMIT = 120; +static int32_t FAST_FAILURE_LIMIT = 1; typedef struct SHttpModule { uv_loop_t* loop; SAsyncPool* asyncPool; diff --git a/source/libs/transport/src/tmsgcb.c b/source/libs/transport/src/tmsgcb.c index 1b1fa8cc1c..e44328c683 100644 --- a/source/libs/transport/src/tmsgcb.c +++ b/source/libs/transport/src/tmsgcb.c @@ -48,6 +48,14 @@ int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) { } return code; } +int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg) { + int32_t code = (*defaultMsgCb.sendSyncReqFp)(epSet, pMsg); + if (code != 0) { + rpcFreeCont(pMsg->pCont); + pMsg->pCont = NULL; + } + return code; +} void tmsgSendRsp(SRpcMsg* pMsg) { #if 1 diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index b23d229931..64302a78fc 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -169,8 +169,9 @@ int rpcSendRequestWithCtx(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, in int rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) { return transSendRecv(shandle, pEpSet, pMsg, pRsp); } -int rpcSendRecvWithTimeout(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp, int32_t timeoutMs) { - return transSendRecvWithTimeout(shandle, pEpSet, pMsg, pRsp, timeoutMs); +int rpcSendRecvWithTimeout(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp, int8_t* epUpdated, + int32_t timeoutMs) { + return transSendRecvWithTimeout(shandle, pEpSet, pMsg, pRsp, epUpdated, timeoutMs); } int rpcSendResponse(const SRpcMsg* pMsg) { return transSendResponse(pMsg); } @@ -197,6 +198,13 @@ int32_t rpcUtilSWhiteListToStr(SIpWhiteList* pWhiteList, char** ppBuf) { return transUtilSWhiteListToStr(pWhiteList, ppBuf); } +int32_t rpcCvtErrCode(int32_t code) { + if (code == TSDB_CODE_RPC_BROKEN_LINK || code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + return TSDB_CODE_RPC_NETWORK_ERROR; + } + return code; +} + int32_t rpcInit() { transInit(); return 0; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 936d11c151..246605694b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1907,7 +1907,12 @@ bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) { static void* cliWorkThread(void* arg) { SCliThrd* pThrd = (SCliThrd*)arg; pThrd->pid = taosGetSelfPthreadId(); - setThreadName("trans-cli-work"); + + char threadName[TSDB_LABEL_LEN] = {0}; + STrans* pInst = pThrd->pTransInst; + strtolower(threadName, pInst->label); + setThreadName(threadName); + uv_run(pThrd->loop, UV_RUN_DEFAULT); tDebug("thread quit-thread:%08" PRId64, pThrd->pid); @@ -2425,6 +2430,10 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { STransSyncMsg* pSyncMsg = taosAcquireRef(transGetSyncMsgMgt(), pCtx->syncMsgRef); if (pSyncMsg != NULL) { memcpy(pSyncMsg->pRsp, (char*)pResp, sizeof(*pResp)); + if (cliIsEpsetUpdated(pResp->code, pCtx)) { + pSyncMsg->hasEpSet = 1; + epsetAssign(&pSyncMsg->epSet, &pCtx->epSet); + } tsem_post(pSyncMsg->pSem); taosReleaseRef(transGetSyncMsgMgt(), pCtx->syncMsgRef); } else { @@ -2640,10 +2649,12 @@ int64_t transCreateSyncMsg(STransMsg* pTransMsg) { pSyncMsg->inited = 0; pSyncMsg->pRsp = pTransMsg; pSyncMsg->pSem = sem; + pSyncMsg->hasEpSet = 0; return taosAddRef(transGetSyncMsgMgt(), pSyncMsg); } -int transSendRecvWithTimeout(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp, int32_t timeoutMs) { +int transSendRecvWithTimeout(void* shandle, SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp, int8_t* epUpdated, + int32_t timeoutMs) { STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); STransMsg* pTransMsg = taosMemoryCalloc(1, sizeof(STransMsg)); if (pTransInst == NULL) { @@ -2695,6 +2706,11 @@ int transSendRecvWithTimeout(void* shandle, const SEpSet* pEpSet, STransMsg* pRe ret = TSDB_CODE_TIMEOUT_ERROR; } else { memcpy(pRsp, pSyncMsg->pRsp, sizeof(STransMsg)); + pSyncMsg->pRsp->pCont = NULL; + if (pSyncMsg->hasEpSet == 1) { + epsetAssign(pEpSet, &pSyncMsg->epSet); + *epUpdated = 1; + } ret = 0; } _RETURN: diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 759a4d79db..b1fb9a2450 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -667,7 +667,7 @@ void transDestroySyncMsg(void* msg) { STransSyncMsg* pSyncMsg = msg; tsem_destroy(pSyncMsg->pSem); taosMemoryFree(pSyncMsg->pSem); - + transFreeMsg(pSyncMsg->pRsp->pCont); taosMemoryFree(pSyncMsg->pRsp); taosMemoryFree(pSyncMsg); } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 017969b4e5..362d38b505 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -1498,6 +1498,7 @@ int transSendResponse(const STransMsg* msg) { return 0; } SExHandle* exh = msg->info.handle; + if (exh == NULL) { rpcFreeCont(msg->pCont); return 0; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index c0435ca774..6748d161ae 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -259,11 +259,6 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t contLen; bool seeked = false; - wDebug("vgId:%d, try to fetch ver %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 - ", applied ver:%" PRId64", 0x%"PRIx64, - pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, - pRead->pWal->vers.appliedVer, pRead->readerId); - // TODO: valid ver if (ver > pRead->pWal->vers.commitVer) { return -1; @@ -310,8 +305,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) { } int32_t walSkipFetchBody(SWalReader *pRead) { - wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 - ", applied ver:%" PRId64", 0x%"PRIx64, + wDebug("vgId:%d, skip fetch body:%" PRId64 ", first:%" PRId64 ", commit:%" PRId64 ", last:%" PRId64 + ", applied:%" PRId64 ", 0x%" PRIx64, pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId); @@ -330,11 +325,11 @@ int32_t walFetchBody(SWalReader *pRead) { int64_t ver = pReadHead->version; int32_t vgId = pRead->pWal->cfg.vgId; int64_t id = pRead->readerId; + SWalVer *pVer = &pRead->pWal->vers; - wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 - ", applied ver:%" PRId64 ", 0x%" PRIx64, - vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, - pRead->pWal->vers.appliedVer, id); + wDebug("vgId:%d, fetch body:%" PRId64 ", first:%" PRId64 ", commit:%" PRId64 ", last:%" PRId64 ", applied:%" PRId64 + ", 0x%" PRIx64, + vgId, ver, pVer->firstVer, pVer->commitVer, pVer->lastVer, pVer->appliedVer, id); if (pRead->capacity < pReadHead->bodyLen) { SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); diff --git a/source/os/test/osTests.cpp b/source/os/test/osTests.cpp index 16660a9477..4d9ad9b5bd 100644 --- a/source/os/test/osTests.cpp +++ b/source/os/test/osTests.cpp @@ -72,7 +72,7 @@ TEST(osTest, osSystem) { const int sysLen = 64; char osSysName[sysLen]; int ret = taosGetOsReleaseName(osSysName, NULL, NULL, sysLen); - printf("os systeme name:%s\n", osSysName); + printf("os system name:%s\n", osSysName); ASSERT_EQ(ret, 0); } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index bc98461592..88eb51d500 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -18,6 +18,9 @@ #include "tcoding.h" // todo refactor API +#define BOUNDARY_SIZE 1024*1024*1024 // 1G +#define BOUNDARY_SMALL_FACTOR 1.2 +#define BOUNDARY_BIG_FACTOR 2 SArray* taosArrayInit(size_t size, size_t elemSize) { if (elemSize == 0) { @@ -85,9 +88,13 @@ static int32_t taosArrayResize(SArray* pArray) { int32_t taosArrayEnsureCap(SArray* pArray, size_t newCap) { if (newCap > pArray->capacity) { - size_t tsize = (pArray->capacity << 1u); + float factor = BOUNDARY_BIG_FACTOR; + if(newCap * pArray->elemSize > BOUNDARY_SIZE){ + factor = BOUNDARY_SMALL_FACTOR; + } + size_t tsize = (pArray->capacity * factor); while (newCap > tsize) { - tsize = (tsize << 1u); + tsize = (tsize * factor); } pArray->pData = taosMemoryRealloc(pArray->pData, tsize * pArray->elemSize); diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index b6b71855f3..53fdc96c37 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -1070,6 +1070,11 @@ int32_t patternMatch(const char *pattern, size_t psize, const char *str, size_t return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */ } + if (c == '\\' && (pattern[i] == '_' || pattern[i] == '%')) { + c = pattern[i]; + i++; + } + char rejectList[2] = {toupper(c), tolower(c)}; str += nMatchChar; @@ -1097,10 +1102,13 @@ int32_t patternMatch(const char *pattern, size_t psize, const char *str, size_t c1 = str[j++]; ++nMatchChar; - if (c == '\\' && pattern[i] == c1 && - (c1 == '_' || c1 == '%')) { - i++; - continue; + if (c == '\\' && (pattern[i] == '_' || pattern[i] == '%')) { + if (c1 != pattern[i]) { + return TSDB_PATTERN_NOMATCH; + } else { + i++; + continue; + } } if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { @@ -1139,6 +1147,11 @@ int32_t wcsPatternMatch(const TdUcs4 *pattern, size_t psize, const TdUcs4 *str, return TSDB_PATTERN_MATCH; } + if (c == '\\' && (pattern[i] == '_' || pattern[i] == '%')) { + c = pattern[i]; + i++; + } + TdUcs4 rejectList[2] = {towupper(c), towlower(c)}; str += nMatchChar; @@ -1166,9 +1179,13 @@ int32_t wcsPatternMatch(const TdUcs4 *pattern, size_t psize, const TdUcs4 *str, c1 = str[j++]; nMatchChar++; - if (c == L'\\' && pattern[i] == L'_' && c1 == L'_') { - i++; - continue; + if (c == '\\' && (pattern[i] == '_' || pattern[i] == '%')) { + if (c1 != pattern[i]) { + return TSDB_PATTERN_NOMATCH; + } else { + i++; + continue; + } } if (c == c1 || towlower(c) == towlower(c1) || (c == pInfo->umatchOne && c1 != 0)) { diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index a78d930326..d656f0c14e 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -307,6 +307,34 @@ static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, return 0; } +static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool resetArray) { + SConfigItem *pDebugFlagItem = cfgGetItem(pCfg, "debugFlag"); + if (resetArray) { + // reset + if (pDebugFlagItem == NULL) return -1; + + // logflag names that should 'not' be set by 'debugFlag' + if (pDebugFlagItem->array == NULL) { + pDebugFlagItem->array = taosArrayInit(16, sizeof(SLogVar)); + if (pDebugFlagItem->array == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + } + taosArrayClear(pDebugFlagItem->array); + return 0; + } + + // update + if (pDebugFlagItem == NULL) return -1; + if (pDebugFlagItem->array != NULL) { + SLogVar logVar = {0}; + strncpy(logVar.name, name, TSDB_LOG_VAR_LEN - 1); + taosArrayPush(pDebugFlagItem->array, &logVar); + } + return 0; +} + int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype) { GRANT_CFG_SET; SConfigItem *pItem = cfgGetItem(pCfg, name); @@ -661,7 +689,6 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) { SConfigItem *pItem = taosArrayGet(pCfg->array, i); if (tsc && pItem->scope == CFG_SCOPE_SERVER) continue; if (dump && strcmp(pItem->name, "scriptDir") == 0) continue; - if (dump && strcmp(pItem->name, "simDebugFlag") == 0) continue; tstrncpy(src, cfgStypeStr(pItem->stype), CFG_SRC_PRINT_LEN); for (int32_t j = 0; j < CFG_SRC_PRINT_LEN; ++j) { if (src[j] == 0) src[j] = ' '; @@ -931,6 +958,14 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) { code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_CFG_FILE); if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break; } + + size_t len = strlen(name); + const char *debugFlagStr = "debugFlag"; + const size_t debugFlagLen = strlen(debugFlagStr); + if (len >= debugFlagLen && strcasecmp(name + len - debugFlagLen, debugFlagStr) == 0) { + code = cfgUpdateDebugFlagItem(pConfig, name, len == debugFlagLen); + if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break; + } } taosCloseFile(&pFile); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index dc5f44cf43..360689cef3 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -53,6 +53,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_BROKEN_LINK, "Conn is broken") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TIMEOUT, "Conn read timeout") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED, "some vnode/qnode/mnode(s) out of service") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, "rpc open too many session") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_ERROR, "rpc network error") //common & util TAOS_DEFINE_ERROR(TSDB_CODE_TIME_UNSYNCED, "Client and server's time is not synchronized") @@ -286,6 +287,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background") TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CTX_SWITCH, "Transaction context switch") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error") // mnode-mq @@ -622,6 +624,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_DUP_TIMESTAMP, "Duplicate timestamps TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_FORMAT_ERR, "Func to_timestamp failed for format mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_TS_ERR, "Func to_timestamp failed for wrong timestamp") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_NOT_SUPPORTED, "Func to_timestamp failed for unsupported timestamp format") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_TO_CHAR_NOT_SUPPORTED, "Func to_char failed for unsupported format") //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 184e18fc67..bd6c37a7b5 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -111,6 +111,7 @@ int32_t udfDebugFlag = 131; int32_t smaDebugFlag = 131; int32_t idxDebugFlag = 131; int32_t sndDebugFlag = 131; +int32_t simDebugFlag = 131; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; @@ -337,14 +338,10 @@ static int32_t taosOpenNewLogFile() { } void taosResetLog() { - char lastName[LOG_FILE_NAME_LEN + 20]; - sprintf(lastName, "%s.%d", tsLogObj.logName, tsLogObj.flag); - // force create a new log file tsLogObj.lines = tsNumOfLogLines + 10; taosOpenNewLogFile(); - (void)taosRemoveFile(lastName); uInfo("=================================="); uInfo(" reset log file "); diff --git a/source/util/test/utilTests.cpp b/source/util/test/utilTests.cpp index 01a55ae710..6e32167f88 100644 --- a/source/util/test/utilTests.cpp +++ b/source/util/test/utilTests.cpp @@ -76,6 +76,16 @@ TEST(utilTest, wchar_pattern_match_test) { const TdWchar* str12 = L""; ret = wcsPatternMatch(reinterpret_cast(pattern12), 4, reinterpret_cast(str12), 0, &pInfo); ASSERT_EQ(ret, TSDB_PATTERN_NOMATCH); + + const TdWchar* pattern13 = L"%\\_6 "; + const TdWchar* str13 = L"6a6 "; + ret = wcsPatternMatch(reinterpret_cast(pattern13), 6, reinterpret_cast(str13), 4, &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_NOWILDCARDMATCH); + + const TdWchar* pattern14 = L"%\\%6 "; + const TdWchar* str14 = L"6a6 "; + ret = wcsPatternMatch(reinterpret_cast(pattern14), 6, reinterpret_cast(str14), 4, &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_NOWILDCARDMATCH); } TEST(utilTest, wchar_pattern_match_no_terminated) { @@ -126,14 +136,24 @@ TEST(utilTest, wchar_pattern_match_no_terminated) { ret = wcsPatternMatch(reinterpret_cast(pattern8), 8, reinterpret_cast(str8), 6, &pInfo); ASSERT_EQ(ret, TSDB_PATTERN_NOWILDCARDMATCH); - const TdWchar* pattern9 = L"6\\_6 "; + const TdWchar* pattern9 = L"6\\_6 "; const TdWchar* str9 = L"6_6 "; - ret = wcsPatternMatch(reinterpret_cast(pattern9), 4, reinterpret_cast(str9), 3, &pInfo); + ret = wcsPatternMatch(reinterpret_cast(pattern9), 6, reinterpret_cast(str9), 4, &pInfo); ASSERT_EQ(ret, TSDB_PATTERN_MATCH); const TdWchar* pattern10 = L"% "; const TdWchar* str10 = L"6_6 "; - ret = wcsPatternMatch(reinterpret_cast(pattern10), 1, reinterpret_cast(str10), 3, &pInfo); + ret = wcsPatternMatch(reinterpret_cast(pattern10), 2, reinterpret_cast(str10), 4, &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_MATCH); + + const TdWchar* pattern11 = L"%\\_6 "; + const TdWchar* str11 = L"6_6 "; + ret = wcsPatternMatch(reinterpret_cast(pattern11), 6, reinterpret_cast(str11), 4, &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_MATCH); + + const TdWchar* pattern12 = L"%\\%6 "; + const TdWchar* str12 = L"6%6 "; + ret = wcsPatternMatch(reinterpret_cast(pattern12), 6, reinterpret_cast(str12), 4, &pInfo); ASSERT_EQ(ret, TSDB_PATTERN_MATCH); } @@ -209,6 +229,36 @@ TEST(utilTest, char_pattern_match_test) { const char* str13 = "a%c"; ret = patternMatch(pattern13, 5, str13, strlen(str13), &pInfo); ASSERT_EQ(ret, TSDB_PATTERN_MATCH); + + const char* pattern14 = "%a\\%c"; + const char* str14 = "a%c"; + ret = patternMatch(pattern14, strlen(pattern14), str14, strlen(str14), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_MATCH); + + const char* pattern15 = "_a\\%c"; + const char* str15 = "ba%c"; + ret = patternMatch(pattern15, strlen(pattern15), str15, strlen(str15), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_MATCH); + + const char* pattern16 = "_\\%c"; + const char* str16 = "a%c"; + ret = patternMatch(pattern16, strlen(pattern16), str16, strlen(str16), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_MATCH); + + const char* pattern17 = "_\\%c"; + const char* str17 = "ba%c"; + ret = patternMatch(pattern17, strlen(pattern17), str17, strlen(str17), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_NOMATCH); + + const char* pattern18 = "%\\%c"; + const char* str18 = "abc"; + ret = patternMatch(pattern18, strlen(pattern18), str18, strlen(str18), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_NOWILDCARDMATCH); + + const char* pattern19 = "%\\_c"; + const char* str19 = "abc"; + ret = patternMatch(pattern19, strlen(pattern19), str19, strlen(str19), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_NOWILDCARDMATCH); } TEST(utilTest, char_pattern_match_no_terminated) { diff --git a/tests/army/community/cluster/snapshot.json b/tests/army/community/cluster/snapshot.json new file mode 100644 index 0000000000..64bb2aaf3c --- /dev/null +++ b/tests/army/community/cluster/snapshot.json @@ -0,0 +1,60 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 2000, + "thread_count": 2, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 2, + "replica": 3, + "duration":"1d", + "wal_retention_period": 1, + "wal_retention_size": 1, + "keep": "3d,6d,30d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 10, + "insert_rows": 100000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 10000, + "start_timestamp":"now-12d", + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" }, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 16}, + { "type": "nchar", "name": "nch", "len": 32} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py new file mode 100644 index 0000000000..92ecc00726 --- /dev/null +++ b/tests/army/community/cluster/snapshot.py @@ -0,0 +1,96 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import random + +import taos +import frame +import frame.etool + + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * + + +class TDTestCase(TBase): + + + def insertData(self): + tdLog.info(f"insert data.") + # taosBenchmark run + jfile = etool.curFile(__file__, "snapshot.json") + etool.benchMark(json=jfile) + + tdSql.execute(f"use {self.db}") + # set insert data information + self.childtable_count = 10 + self.insert_rows = 100000 + self.timestamp_step = 10000 + + def doAction(self): + tdLog.info(f"do action.") + self.flushDb() + + # split vgroups + self.splitVGroups() + self.trimDb() + self.checkAggCorrect() + + # balance vgroups + self.balanceVGroupLeader() + + # replica to 1 + self.alterReplica(1) + self.checkAggCorrect() + self.compactDb() + self.alterReplica(3) + + vgids = self.getVGroup(self.db) + selid = random.choice(vgids) + self.balanceVGroupLeaderOn(selid) + + + + # run + def run(self): + tdLog.debug(f"start to excute {__file__}") + + # insert data + self.insertData() + + # check insert data correct + self.checkInsertCorrect() + + # save + self.snapshotAgg() + + # do action + self.doAction() + + # check save agg result correct + self.checkAggCorrect() + + # check insert correct again + self.checkInsertCorrect() + + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/community/cmdline/fullopt.py b/tests/army/community/cmdline/fullopt.py new file mode 100644 index 0000000000..22667149ae --- /dev/null +++ b/tests/army/community/cmdline/fullopt.py @@ -0,0 +1,144 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import random + +import taos +import frame.etool + +from frame.log import * +from frame.sql import * +from frame.cases import * +from frame.caseBase import * +from frame.srvCtl import * +from frame import * + + +class TDTestCase(TBase): + def insertData(self): + tdLog.info(f"insert data.") + + # set insert data information + self.childtable_count = 10 + self.insert_rows = 10000 + self.timestamp_step = 1000 + + # taosBenchmark run + etool.benchMark(command = f"-d {self.db} -t {self.childtable_count} -n {self.insert_rows} -v 2 -y") + + def doTaos(self): + tdLog.info(f"check taos command options...") + # help + rets = etool.runBinFile("taos", "--help") + self.checkListNotEmpty(rets) + # b r w s + sql = f"select * from {self.db}.{self.stb} limit 10" + rets = etool.runBinFile("taos", f'-B -r -w 100 -s "{sql}" ') + self.checkListNotEmpty(rets) + # -C + rets = etool.runBinFile("taos", "-C") + self.checkListNotEmpty(rets) + # -t + rets = etool.runBinFile("taos", "-t") + self.checkListNotEmpty(rets) + # -v + rets = etool.runBinFile("taos", "-V") + self.checkListNotEmpty(rets) + # -? + rets = etool.runBinFile("taos", "-?") + self.checkListNotEmpty(rets) + + # TSDB_FQDN_LEN = 128 + lname = "testhostnamelength" + lname.rjust(130, 'a') + + # except test + sql = f"show vgroups;" + etool.exeBinFile("taos", f'-h {lname} -s "{sql}" ', wait=False) + etool.exeBinFile("taos", f'-u {lname} -s "{sql}" ', wait=False) + etool.exeBinFile("taos", f'-d {lname} -s "{sql}" ', wait=False) + etool.exeBinFile("taos", f'-a {lname} -s "{sql}" ', wait=False) + etool.exeBinFile("taos", f'-p{lname} -s "{sql}" ', wait=False) + etool.exeBinFile("taos", f'-w -s "{sql}" ', wait=False) + + # others + etool.exeBinFile("taos", f'-N 200 -l 2048 -s "{sql}" ', wait=False) + etool.exeBinFile("taos", f'-n server', wait=False) + + + def doTaosd(self): + tdLog.info(f"check taosd command options...") + idx = 1 # dnode1 + cfg = sc.dnodeCfgPath(idx) + + # -s + sdb = "./sdb.json" + eos.delFile(sdb) + etool.runBinFile("taosd", f"-s -c {cfg}") + self.checkFileExist(sdb) + + # -C + etool.runBinFile("taosd", "-C") + # -k + rets = etool.runBinFile("taosd", "-C") + self.checkListNotEmpty(rets) + # -V + rets = etool.runBinFile("taosd", "-V") + self.checkListNotEmpty(rets) + # --help + rets = etool.runBinFile("taosd", "--help") + self.checkListNotEmpty(rets) + + # except input + etool.exeBinFile("taosd", "-c") + etool.exeBinFile("taosd", "-e") + + # stop taosd + sc.dnodeStop(idx) + # other + etool.exeBinFile("taosd", f"-dm -c {cfg}", False) + sc.dnodeStop(idx) + etool.exeBinFile("taosd", "-a http://192.168.1.10") + + #exe + etool.exeBinFile("taosd", f"-E abc -c {cfg}", False) + sc.dnodeStop(idx) + etool.exeBinFile("taosd", f"-e def -c {cfg}", False) + + # stop taosd test taos as server + sc.dnodeStop(idx) + etool.exeBinFile("taos", f'-n server', wait=False) + + # run + def run(self): + tdLog.debug(f"start to excute {__file__}") + + # insert data + self.insertData() + + # do taos + self.doTaos() + + # do action + self.doTaosd() + + + + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/enterprise/multi-level/mlevel_basic.json b/tests/army/enterprise/multi-level/mlevel_basic.json new file mode 100644 index 0000000000..1c2b9274d2 --- /dev/null +++ b/tests/army/enterprise/multi-level/mlevel_basic.json @@ -0,0 +1,58 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 2000, + "thread_count": 2, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 2, + "replica": 1, + "duration":"1d", + "keep": "3d,6d,30d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 4, + "insert_rows": 1000000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 1000, + "start_timestamp":"now-12d", + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti", "values":["1"]}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" }, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 32}, + { "type": "nchar", "name": "nch", "len": 64} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/army/enterprise/multi-level/mlevel_basic.py b/tests/army/enterprise/multi-level/mlevel_basic.py index e3a3f57c74..081a1bb9bd 100644 --- a/tests/army/enterprise/multi-level/mlevel_basic.py +++ b/tests/army/enterprise/multi-level/mlevel_basic.py @@ -15,26 +15,62 @@ import sys import time import taos +import frame +import frame.etool + from frame.log import * from frame.cases import * from frame.sql import * +from frame.caseBase import * +from frame import * -class TDTestCase: - # init - def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) - tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), True) + +class TDTestCase(TBase): + + + def insertData(self): + tdLog.info(f"insert data.") + # taosBenchmark run + json = etool.curFile(__file__, "mlevel_basic.json") + etool.benchMark(json=json) + + tdSql.execute(f"use {self.db}") + # set insert data information + self.childtable_count = 4 + self.insert_rows = 1000000 + self.timestamp_step = 1000 + + def doAction(self): + tdLog.info(f"do action.") + self.flushDb() + self.trimDb() + self.compactDb() # run def run(self): - # check two db query result same - tdLog.info(f"hello world.") + tdLog.debug(f"start to excute {__file__}") + + # insert data + self.insertData() + + # check insert data correct + self.checkInsertCorrect() + + # save + self.snapshotAgg() + + # do action + self.doAction() + + # check save agg result correct + self.checkAggCorrect() + + # check insert correct again + self.checkInsertCorrect() - # stop - def stop(self): - tdSql.close() tdLog.success(f"{__file__} successfully executed") + + tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/enterprise/s3/s3_basic.json b/tests/army/enterprise/s3/s3_basic.json new file mode 100644 index 0000000000..e56a18e757 --- /dev/null +++ b/tests/army/enterprise/s3/s3_basic.json @@ -0,0 +1,58 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 2000, + "thread_count": 2, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 2, + "replica": 1, + "duration":"1d", + "keep": "3d,6d,30d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 4, + "insert_rows": 1000000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 1000, + "start_timestamp":"now-13d", + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti", "values":["1"]}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" }, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 32}, + { "type": "nchar", "name": "nch", "len": 64} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/army/enterprise/s3/s3_basic.py b/tests/army/enterprise/s3/s3_basic.py new file mode 100644 index 0000000000..976ad85747 --- /dev/null +++ b/tests/army/enterprise/s3/s3_basic.py @@ -0,0 +1,144 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time + +import taos +import frame +import frame.etool +import frame.eos + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.srvCtl import * +from frame import * +from frame.eos import * + +# +# 192.168.1.52 MINIO S3 API KEY: MQCEIoaPGUs1mhXgpUAu:XTgpN2dEMInnYgqN4gj3G5zgb39ROtsisKKy0GFa +# + +''' +s3EndPoint http://192.168.1.52:9000 +s3AccessKey MQCEIoaPGUs1mhXgpUAu:XTgpN2dEMInnYgqN4gj3G5zgb39ROtsisKKy0GFa +s3BucketName ci-bucket +s3UploadDelaySec 60 +''' + + +class TDTestCase(TBase): + updatecfgDict = { + 's3EndPoint': 'http://192.168.1.52:9000', + 's3AccessKey': 'MQCEIoaPGUs1mhXgpUAu:XTgpN2dEMInnYgqN4gj3G5zgb39ROtsisKKy0GFa', + 's3BucketName': 'ci-bucket', + 's3BlockSize': '10240', + 's3BlockCacheSize': '320', + 's3PageCacheSize': '10240', + 's3UploadDelaySec':'60' + } + + def insertData(self): + tdLog.info(f"insert data.") + # taosBenchmark run + json = etool.curFile(__file__, "s3_basic.json") + etool.benchMark(json=json) + + tdSql.execute(f"use {self.db}") + # come from s3_basic.json + self.childtable_count = 4 + self.insert_rows = 1000000 + self.timestamp_step = 1000 + + def createStream(self, sname): + sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);" + tdSql.execute(sql) + + def doAction(self): + tdLog.info(f"do action.") + + self.flushDb() + self.compactDb() + + # sleep 70s + tdLog.info(f"wait 65s ...") + time.sleep(65) + self.trimDb(True) + + rootPath = sc.clusterRootPath() + cmd = f"ls {rootPath}/dnode1/data20/vnode/vnode*/tsdb/*.data" + tdLog.info(cmd) + loop = 0 + while len(eos.runRetList(cmd)) > 0 and loop < 40: + time.sleep(5) + self.trimDb(True) + loop += 1 + tdLog.info(f"loop={loop} wait 5s...") + + def checkStreamCorrect(self): + sql = f"select count(*) from {self.db}.stm1" + count = 0 + for i in range(120): + tdSql.query(sql) + count = tdSql.getData(0, 0) + if count == 100000 or count == 100001: + return True + time.sleep(1) + + tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}") + + # run + def run(self): + tdLog.debug(f"start to excute {__file__}") + self.sname = "stream1" + if eos.isArm64Cpu(): + tdLog.success(f"{__file__} arm64 ignore executed") + else: + # insert data + self.insertData() + + # creat stream + self.createStream(self.sname) + + # check insert data correct + self.checkInsertCorrect() + + # save + self.snapshotAgg() + + # do action + self.doAction() + + # check save agg result correct + self.checkAggCorrect() + + # check insert correct again + self.checkInsertCorrect() + + # check stream correct and drop stream + self.checkStreamCorrect() + + # drop stream + self.dropStream(self.sname) + + # drop database and free s3 file + self.dropDb() + + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/frame/caseBase.py b/tests/army/frame/caseBase.py new file mode 100644 index 0000000000..ec6b36aa1b --- /dev/null +++ b/tests/army/frame/caseBase.py @@ -0,0 +1,204 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import random + +from frame.log import * +from frame.sql import * + +# test case base +class TBase: + +# +# frame call +# + + # init + def init(self, conn, logSql, replicaVar=1): + # save param + self.replicaVar = int(replicaVar) + tdSql.init(conn.cursor(), True) + + # record server information + self.dnodeNum = 0 + self.mnodeNum = 0 + self.mLevel = 0 + self.mLevelDisk = 0 + + # test case information + self.db = "db" + self.stb = "stb" + + # sql + self.sqlSum = f"select sum(ic) from {self.stb}" + self.sqlMax = f"select max(ic) from {self.stb}" + self.sqlMin = f"select min(ic) from {self.stb}" + self.sqlAvg = f"select avg(ic) from {self.stb}" + self.sqlFirst = f"select first(ts) from {self.stb}" + self.sqlLast = f"select last(ts) from {self.stb}" + + # stop + def stop(self): + tdSql.close() + + +# +# db action +# + + def trimDb(self, show = False): + tdSql.execute(f"trim database {self.db}", show = show) + + def compactDb(self, show = False): + tdSql.execute(f"compact database {self.db}", show = show) + + def flushDb(self, show = False): + tdSql.execute(f"flush database {self.db}", show = show) + + def dropDb(self, show = False): + tdSql.execute(f"drop database {self.db}", show = show) + + def dropStream(self, sname, show = False): + tdSql.execute(f"drop stream {sname}", show = show) + + def splitVGroups(self): + vgids = self.getVGroup(self.db) + selid = random.choice(vgids) + sql = f"split vgroup {selid}" + tdSql.execute(sql, show=True) + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + return True + + + def alterReplica(self, replica): + sql = f"alter database {self.db} replica {replica}" + tdSql.execute(sql, show=True) + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + return True + + def balanceVGroup(self): + sql = f"balance vgroup" + tdSql.execute(sql, show=True) + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + return True + + def balanceVGroupLeader(self): + sql = f"balance vgroup leader" + tdSql.execute(sql, show=True) + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + return True + + + def balanceVGroupLeaderOn(self, vgId): + sql = f"balance vgroup leader on {vgId}" + tdSql.execute(sql, show=True) + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + return True + + +# +# check db correct +# + + # basic + def checkInsertCorrect(self): + # check count + sql = f"select count(*) from {self.stb}" + tdSql.checkAgg(sql, self.childtable_count * self.insert_rows) + + # check child table count + sql = f" select count(*) from (select count(*) as cnt , tbname from {self.stb} group by tbname) where cnt = {self.insert_rows} " + tdSql.checkAgg(sql, self.childtable_count) + + # check step + sql = f"select count(*) from (select diff(ts) as dif from {self.stb} partition by tbname) where dif != {self.timestamp_step}" + tdSql.checkAgg(sql, 0) + + # save agg result + def snapshotAgg(self): + self.sum = tdSql.getFirstValue(self.sqlSum) + self.avg = tdSql.getFirstValue(self.sqlAvg) + self.min = tdSql.getFirstValue(self.sqlMin) + self.max = tdSql.getFirstValue(self.sqlMax) + self.first = tdSql.getFirstValue(self.sqlFirst) + self.last = tdSql.getFirstValue(self.sqlLast) + + # check agg + def checkAggCorrect(self): + tdSql.checkAgg(self.sqlSum, self.sum) + tdSql.checkAgg(self.sqlAvg, self.avg) + tdSql.checkAgg(self.sqlMin, self.min) + tdSql.checkAgg(self.sqlMax, self.max) + tdSql.checkAgg(self.sqlFirst, self.first) + tdSql.checkAgg(self.sqlLast, self.last) + + +# +# get db information +# + + # get vgroups + def getVGroup(self, db_name): + vgidList = [] + sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{db_name}'" + res = tdSql.getResult(sql) + rows = len(res) + for i in range(rows): + vgidList.append(res[i][0]) + + return vgidList + + + +# +# util +# + + # wait transactions count to zero , return False is translation not finished + def waitTransactionZero(self, seconds = 300, interval = 1): + # wait end + for i in range(seconds): + sql ="show transactions;" + rows = tdSql.query(sql) + if rows == 0: + tdLog.info("transaction count became zero.") + return True + #tdLog.info(f"i={i} wait ...") + time.sleep(interval) + + return False + + # check file exist + def checkFileExist(self, pathFile): + if os.path.exists(pathFile) == False: + tdLog.exit(f"file not exist {pathFile}") + + # check list not exist + def checkListNotEmpty(self, lists, tips=""): + if len(lists) == 0: + tdLog.exit(f"list is empty {tips}") + diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index 93059ff078..5cdb3f9f46 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -23,7 +23,7 @@ import taos from frame.log import * from frame.sql import * from frame.cases import * -from frame.dnodes import * +from frame.server.dnodes import * from frame.common import * from frame.constant import * from dataclasses import dataclass,field diff --git a/tests/army/frame/dnodes-default.py b/tests/army/frame/dnodes-default.py deleted file mode 100644 index a1d1698b00..0000000000 --- a/tests/army/frame/dnodes-default.py +++ /dev/null @@ -1,502 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -import os.path -import subprocess -from frame.log import * - - -class TDSimClient: - def __init__(self): - self.testCluster = False - - self.cfgDict = { - "numOfLogLines": "100000000", - "numOfThreadsPerCore": "2.0", - "locale": "en_US.UTF-8", - "charset": "UTF-8", - "asyncLog": "0", - "minTablesPerVnode": "4", - "maxTablesPerVnode": "1000", - "tableIncStepPerVnode": "10000", - "maxVgroupsPerDb": "1000", - "sdbDebugFlag": "143", - "rpcDebugFlag": "135", - "tmrDebugFlag": "131", - "cDebugFlag": "135", - "udebugFlag": "135", - "jnidebugFlag": "135", - "qdebugFlag": "135", - "telemetryReporting": "0", - } - def init(self, path): - self.__init__() - self.path = path - - def getLogDir(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - return self.logDir - - def getCfgDir(self): - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - return self.cfgDir - - def setTestCluster(self, value): - self.testCluster = value - - def addExtraCfg(self, option, value): - self.cfgDict.update({option: value}) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def deploy(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("logDir", self.logDir) - - for key, value in self.cfgDict.items(): - self.cfg(key, value) - - tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) - - -class TDDnode: - def __init__(self, index): - self.index = index - self.running = 0 - self.deployed = 0 - self.testCluster = False - self.valgrind = 0 - - def init(self, path): - self.path = path - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def getDataSize(self): - totalSize = 0 - - if (self.deployed == 1): - for dirpath, dirnames, filenames in os.walk(self.dataDir): - for f in filenames: - fp = os.path.join(dirpath, f) - - if not os.path.islink(fp): - totalSize = totalSize + os.path.getsize(fp) - - return totalSize - - def deploy(self): - self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") - self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") - self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") - self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") - - cmd = "rm -rf " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.startIP() - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("publicIp", "192.168.0.%d" % (self.index)) - self.cfg("internalIp", "192.168.0.%d" % (self.index)) - self.cfg("privateIp", "192.168.0.%d" % (self.index)) - self.cfg("dataDir", self.dataDir) - self.cfg("logDir", self.logDir) - self.cfg("numOfLogLines", "100000000") - self.cfg("mnodeEqualVnodeNum", "0") - self.cfg("walLevel", "2") - self.cfg("fsync", "1000") - self.cfg("statusInterval", "1") - self.cfg("numOfMnodes", "3") - self.cfg("numOfThreadsPerCore", "2.0") - self.cfg("monitor", "0") - self.cfg("maxVnodeConnections", "30000") - self.cfg("maxMgmtConnections", "30000") - self.cfg("maxMeterConnections", "30000") - self.cfg("maxShellConns", "30000") - self.cfg("locale", "en_US.UTF-8") - self.cfg("charset", "UTF-8") - self.cfg("asyncLog", "0") - self.cfg("anyIp", "0") - self.cfg("dDebugFlag", "135") - self.cfg("mDebugFlag", "135") - self.cfg("sdbDebugFlag", "135") - self.cfg("rpcDebugFlag", "135") - self.cfg("tmrDebugFlag", "131") - self.cfg("cDebugFlag", "135") - self.cfg("httpDebugFlag", "135") - self.cfg("monitorDebugFlag", "135") - self.cfg("udebugFlag", "135") - self.cfg("jnidebugFlag", "135") - self.cfg("qdebugFlag", "135") - self.deployed = 1 - tdLog.debug( - "dnode:%d is deployed and configured by %s" % - (self.index, self.cfgPath)) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def start(self): - buildPath = self.getBuildPath() - - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - - binPath = buildPath + "/build/bin/taosd" - - if self.deployed == 0: - tdLog.exit("dnode:%d is not deployed" % (self.index)) - - if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) - else: - valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" - - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) - - print(cmd) - - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - - tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) - time.sleep(5) - - def stop(self): - if self.valgrind == 0: - toBeKilled = "taosd" - else: - toBeKilled = "valgrind.bin" - - if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -INT %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) - if self.valgrind: - time.sleep(2) - - self.running = 0 - tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) - - def forcestop(self): - if self.valgrind == 0: - toBeKilled = "taosd" - else: - toBeKilled = "valgrind.bin" - - if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) - if self.valgrind: - time.sleep(2) - - self.running = 0 - tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) - - def startIP(self): - cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def stopIP(self): - cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( - self.index, self.index) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def getDnodeRootDir(self, index): - dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) - return dnodeRootDir - - def getDnodesRootDir(self): - dnodesRootDir = os.path.join(self.path,"sim","psim") - return dnodesRootDir - - -class TDDnodes: - def __init__(self): - self.dnodes = [] - self.dnodes.append(TDDnode(1)) - self.dnodes.append(TDDnode(2)) - self.dnodes.append(TDDnode(3)) - self.dnodes.append(TDDnode(4)) - self.dnodes.append(TDDnode(5)) - self.dnodes.append(TDDnode(6)) - self.dnodes.append(TDDnode(7)) - self.dnodes.append(TDDnode(8)) - self.dnodes.append(TDDnode(9)) - self.dnodes.append(TDDnode(10)) - self.simDeployed = False - - def init(self, path): - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - binPath = os.path.dirname(os.path.realpath(__file__)) - binPath = binPath + "/../../../debug/" - tdLog.debug("binPath %s" % (binPath)) - binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) - - # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) - # tdLog.debug(cmd) - # os.system(cmd) - - # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - # tdLog.debug("execute %s" % (cmd)) - - # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - # tdLog.debug("execute %s" % (cmd)) - - if path == "": - # self.path = os.path.expanduser('~') - self.path = os.path.abspath(binPath + "../../") - else: - self.path = os.path.realpath(path) - - for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) - - self.sim = TDSimClient() - self.sim.init(self.path) - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def deploy(self, index): - self.sim.setTestCluster(self.testCluster) - - if (self.simDeployed == False): - self.sim.deploy() - self.simDeployed = True - - self.check(index) - self.dnodes[index - 1].setTestCluster(self.testCluster) - self.dnodes[index - 1].setValgrind(self.valgrind) - self.dnodes[index - 1].deploy() - - def cfg(self, index, option, value): - self.check(index) - self.dnodes[index - 1].cfg(option, value) - - def start(self, index): - self.check(index) - self.dnodes[index - 1].start() - - def stop(self, index): - self.check(index) - self.dnodes[index - 1].stop() - - def getDataSize(self, index): - self.check(index) - return self.dnodes[index - 1].getDataSize() - - def forcestop(self, index): - self.check(index) - self.dnodes[index - 1].forcestop() - - def startIP(self, index): - self.check(index) - - if self.testCluster: - self.dnodes[index - 1].startIP() - - def stopIP(self, index): - self.check(index) - - if self.dnodes[index - 1].testCluster: - self.dnodes[index - 1].stopIP() - - def check(self, index): - if index < 1 or index > 10: - tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) - - def stopAll(self): - tdLog.info("stop all dnodes") - for i in range(len(self.dnodes)): - self.dnodes[i].stop() - - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - if processID: - cmd = "sudo systemctl stop taosd" - os.system(cmd) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - - def getDnodesRootDir(self): - dnodesRootDir = "%s/sim" % (self.path) - return dnodesRootDir - - def getSimCfgPath(self): - return self.sim.getCfgDir() - - def getSimLogPath(self): - return self.sim.getLogDir() - - def addSimExtraCfg(self, option, value): - self.sim.addExtraCfg(option, value) - - -tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes-no-random-fail.py b/tests/army/frame/dnodes-no-random-fail.py deleted file mode 100644 index 3b3083396b..0000000000 --- a/tests/army/frame/dnodes-no-random-fail.py +++ /dev/null @@ -1,500 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -import os.path -import subprocess -from frame.log import * - - -class TDSimClient: - def __init__(self): - self.testCluster = False - - self.cfgDict = { - "numOfLogLines": "100000000", - "numOfThreadsPerCore": "2.0", - "locale": "en_US.UTF-8", - "charset": "UTF-8", - "asyncLog": "0", - "anyIp": "0", - "sdbDebugFlag": "135", - "rpcDebugFlag": "135", - "tmrDebugFlag": "131", - "cDebugFlag": "135", - "udebugFlag": "135", - "jnidebugFlag": "135", - "qdebugFlag": "135", - "telemetryReporting": "0", - } - - def init(self, path): - self.__init__() - self.path = path - - def getLogDir(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - return self.logDir - - def getCfgDir(self): - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - return self.cfgDir - - def setTestCluster(self, value): - self.testCluster = value - - def addExtraCfg(self, option, value): - self.cfgDict.update({option: value}) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def deploy(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("logDir", self.logDir) - - for key, value in self.cfgDict.items(): - self.cfg(key, value) - - tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) - - -class TDDnode: - def __init__(self, index): - self.index = index - self.running = 0 - self.deployed = 0 - self.testCluster = False - self.valgrind = 0 - - def init(self, path): - self.path = path - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def getDataSize(self): - totalSize = 0 - - if (self.deployed == 1): - for dirpath, dirnames, filenames in os.walk(self.dataDir): - for f in filenames: - fp = os.path.join(dirpath, f) - - if not os.path.islink(fp): - totalSize = totalSize + os.path.getsize(fp) - - return totalSize - - def deploy(self): - self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") - self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") - self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") - self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") - - cmd = "rm -rf " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.startIP() - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("publicIp", "192.168.0.%d" % (self.index)) - self.cfg("internalIp", "192.168.0.%d" % (self.index)) - self.cfg("privateIp", "192.168.0.%d" % (self.index)) - self.cfg("dataDir", self.dataDir) - self.cfg("logDir", self.logDir) - self.cfg("numOfLogLines", "100000000") - self.cfg("mnodeEqualVnodeNum", "0") - self.cfg("walLevel", "2") - self.cfg("fsync", "1000") - self.cfg("statusInterval", "1") - self.cfg("numOfMnodes", "3") - self.cfg("numOfThreadsPerCore", "2.0") - self.cfg("monitor", "0") - self.cfg("maxVnodeConnections", "30000") - self.cfg("maxMgmtConnections", "30000") - self.cfg("maxMeterConnections", "30000") - self.cfg("maxShellConns", "30000") - self.cfg("locale", "en_US.UTF-8") - self.cfg("charset", "UTF-8") - self.cfg("asyncLog", "0") - self.cfg("anyIp", "0") - self.cfg("dDebugFlag", "135") - self.cfg("mDebugFlag", "135") - self.cfg("sdbDebugFlag", "135") - self.cfg("rpcDebugFlag", "135") - self.cfg("tmrDebugFlag", "131") - self.cfg("cDebugFlag", "135") - self.cfg("httpDebugFlag", "135") - self.cfg("monitorDebugFlag", "135") - self.cfg("udebugFlag", "135") - self.cfg("jnidebugFlag", "135") - self.cfg("qdebugFlag", "135") - self.deployed = 1 - tdLog.debug( - "dnode:%d is deployed and configured by %s" % - (self.index, self.cfgPath)) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def start(self): - buildPath = self.getBuildPath() - - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - - binPath = buildPath + "/build/bin/taosd" - - if self.deployed == 0: - tdLog.exit("dnode:%d is not deployed" % (self.index)) - - if self.valgrind == 0: - cmd = "nohup %s -c %s --random-file-fail-factor 0 > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) - else: - valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" - - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) - - print(cmd) - - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - - tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) - time.sleep(5) - - def stop(self): - if self.valgrind == 0: - toBeKilled = "taosd" - else: - toBeKilled = "valgrind.bin" - - if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -INT %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) - if self.valgrind: - time.sleep(2) - - self.running = 0 - tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) - - def forcestop(self): - if self.valgrind == 0: - toBeKilled = "taosd" - else: - toBeKilled = "valgrind.bin" - - if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) - if self.valgrind: - time.sleep(2) - - self.running = 0 - tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) - - def startIP(self): - cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def stopIP(self): - cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( - self.index, self.index) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def getDnodeRootDir(self, index): - dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) - return dnodeRootDir - - def getDnodesRootDir(self): - dnodesRootDir = os.path.join(self.path,"sim","psim") - return dnodesRootDir - - -class TDDnodes: - def __init__(self): - self.dnodes = [] - self.dnodes.append(TDDnode(1)) - self.dnodes.append(TDDnode(2)) - self.dnodes.append(TDDnode(3)) - self.dnodes.append(TDDnode(4)) - self.dnodes.append(TDDnode(5)) - self.dnodes.append(TDDnode(6)) - self.dnodes.append(TDDnode(7)) - self.dnodes.append(TDDnode(8)) - self.dnodes.append(TDDnode(9)) - self.dnodes.append(TDDnode(10)) - self.simDeployed = False - - def init(self, path): - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - binPath = os.path.dirname(os.path.realpath(__file__)) - binPath = binPath + "/../../../debug/" - tdLog.debug("binPath %s" % (binPath)) - binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) - - # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) - # tdLog.debug(cmd) - # os.system(cmd) - - # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - # tdLog.debug("execute %s" % (cmd)) - - # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - # tdLog.debug("execute %s" % (cmd)) - - if path == "": - # self.path = os.path.expanduser('~') - self.path = os.path.abspath(binPath + "../../") - else: - self.path = os.path.realpath(path) - - for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) - - self.sim = TDSimClient() - self.sim.init(self.path) - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def deploy(self, index): - self.sim.setTestCluster(self.testCluster) - - if (self.simDeployed == False): - self.sim.deploy() - self.simDeployed = True - - self.check(index) - self.dnodes[index - 1].setTestCluster(self.testCluster) - self.dnodes[index - 1].setValgrind(self.valgrind) - self.dnodes[index - 1].deploy() - - def cfg(self, index, option, value): - self.check(index) - self.dnodes[index - 1].cfg(option, value) - - def start(self, index): - self.check(index) - self.dnodes[index - 1].start() - - def stop(self, index): - self.check(index) - self.dnodes[index - 1].stop() - - def getDataSize(self, index): - self.check(index) - return self.dnodes[index - 1].getDataSize() - - def forcestop(self, index): - self.check(index) - self.dnodes[index - 1].forcestop() - - def startIP(self, index): - self.check(index) - - if self.testCluster: - self.dnodes[index - 1].startIP() - - def stopIP(self, index): - self.check(index) - - if self.dnodes[index - 1].testCluster: - self.dnodes[index - 1].stopIP() - - def check(self, index): - if index < 1 or index > 10: - tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) - - def stopAll(self): - tdLog.info("stop all dnodes") - for i in range(len(self.dnodes)): - self.dnodes[i].stop() - - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - if processID: - cmd = "sudo systemctl stop taosd" - os.system(cmd) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - - def getDnodesRootDir(self): - dnodesRootDir = "%s/sim" % (self.path) - return dnodesRootDir - - def getSimCfgPath(self): - return self.sim.getCfgDir() - - def getSimLogPath(self): - return self.sim.getLogDir() - - def addSimExtraCfg(self, option, value): - self.sim.addExtraCfg(option, value) - - -tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes-random-fail.py b/tests/army/frame/dnodes-random-fail.py deleted file mode 100644 index 794adfd7ed..0000000000 --- a/tests/army/frame/dnodes-random-fail.py +++ /dev/null @@ -1,497 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -import os.path -import subprocess -from frame.log import * - - -class TDSimClient: - def __init__(self): - self.testCluster = False - - self.cfgDict = { - "numOfLogLines": "100000000", - "locale": "en_US.UTF-8", - "charset": "UTF-8", - "asyncLog": "0", - "rpcDebugFlag": "135", - "tmrDebugFlag": "131", - "cDebugFlag": "135", - "udebugFlag": "135", - "jnidebugFlag": "135", - "qdebugFlag": "135", - "telemetryReporting": "0", - } - - def init(self, path): - self.__init__() - self.path = path - - def getLogDir(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - return self.logDir - - def getCfgDir(self): - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - return self.cfgDir - - def setTestCluster(self, value): - self.testCluster = value - - def addExtraCfg(self, option, value): - self.cfgDict.update({option: value}) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def deploy(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("logDir", self.logDir) - - for key, value in self.cfgDict.items(): - self.cfg(key, value) - - tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) - - -class TDDnode: - def __init__(self, index): - self.index = index - self.running = 0 - self.deployed = 0 - self.testCluster = False - self.valgrind = 0 - - def init(self, path): - self.path = path - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def getDataSize(self): - totalSize = 0 - - if (self.deployed == 1): - for dirpath, dirnames, filenames in os.walk(self.dataDir): - for f in filenames: - fp = os.path.join(dirpath, f) - - if not os.path.islink(fp): - totalSize = totalSize + os.path.getsize(fp) - - return totalSize - - def deploy(self): - self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") - self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") - self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") - self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") - - cmd = "rm -rf " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.startIP() - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("publicIp", "192.168.0.%d" % (self.index)) - self.cfg("internalIp", "192.168.0.%d" % (self.index)) - self.cfg("privateIp", "192.168.0.%d" % (self.index)) - self.cfg("dataDir", self.dataDir) - self.cfg("logDir", self.logDir) - self.cfg("numOfLogLines", "100000000") - self.cfg("mnodeEqualVnodeNum", "0") - self.cfg("walLevel", "2") - self.cfg("fsync", "1000") - self.cfg("statusInterval", "1") - self.cfg("numOfMnodes", "3") - self.cfg("numOfThreadsPerCore", "2.0") - self.cfg("monitor", "0") - self.cfg("maxVnodeConnections", "30000") - self.cfg("maxMgmtConnections", "30000") - self.cfg("maxMeterConnections", "30000") - self.cfg("maxShellConns", "30000") - self.cfg("locale", "en_US.UTF-8") - self.cfg("charset", "UTF-8") - self.cfg("asyncLog", "0") - self.cfg("anyIp", "0") - self.cfg("dDebugFlag", "135") - self.cfg("mDebugFlag", "135") - self.cfg("sdbDebugFlag", "135") - self.cfg("rpcDebugFlag", "135") - self.cfg("tmrDebugFlag", "131") - self.cfg("cDebugFlag", "135") - self.cfg("httpDebugFlag", "135") - self.cfg("monitorDebugFlag", "135") - self.cfg("udebugFlag", "135") - self.cfg("jnidebugFlag", "135") - self.cfg("qdebugFlag", "135") - self.deployed = 1 - tdLog.debug( - "dnode:%d is deployed and configured by %s" % - (self.index, self.cfgPath)) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def start(self): - buildPath = self.getBuildPath() - - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - - binPath = buildPath + "/build/bin/taosd" - - if self.deployed == 0: - tdLog.exit("dnode:%d is not deployed" % (self.index)) - - if self.valgrind == 0: - cmd = "nohup %s -c %s --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) - else: - valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" - - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) - - print(cmd) - - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - - tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) - time.sleep(5) - - def stop(self): - if self.valgrind == 0: - toBeKilled = "taosd" - else: - toBeKilled = "valgrind.bin" - - if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -INT %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) - if self.valgrind: - time.sleep(2) - - self.running = 0 - tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) - - def forcestop(self): - if self.valgrind == 0: - toBeKilled = "taosd" - else: - toBeKilled = "valgrind.bin" - - if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) - if self.valgrind: - time.sleep(2) - - self.running = 0 - tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) - - def startIP(self): - cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def stopIP(self): - cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( - self.index, self.index) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def getDnodeRootDir(self, index): - dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) - return dnodeRootDir - - def getDnodesRootDir(self): - dnodesRootDir = os.path.join(self.path,"sim","psim") - return dnodesRootDir - - -class TDDnodes: - def __init__(self): - self.dnodes = [] - self.dnodes.append(TDDnode(1)) - self.dnodes.append(TDDnode(2)) - self.dnodes.append(TDDnode(3)) - self.dnodes.append(TDDnode(4)) - self.dnodes.append(TDDnode(5)) - self.dnodes.append(TDDnode(6)) - self.dnodes.append(TDDnode(7)) - self.dnodes.append(TDDnode(8)) - self.dnodes.append(TDDnode(9)) - self.dnodes.append(TDDnode(10)) - self.simDeployed = False - - def init(self, path): - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - binPath = os.path.dirname(os.path.realpath(__file__)) - binPath = binPath + "/../../../debug/" - tdLog.debug("binPath %s" % (binPath)) - binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) - - # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) - # tdLog.debug(cmd) - # os.system(cmd) - - # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - # tdLog.debug("execute %s" % (cmd)) - - # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - # tdLog.debug("execute %s" % (cmd)) - - if path == "": - # self.path = os.path.expanduser('~') - self.path = os.path.abspath(binPath + "../../") - else: - self.path = os.path.realpath(path) - - for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) - - self.sim = TDSimClient() - self.sim.init(self.path) - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def deploy(self, index): - self.sim.setTestCluster(self.testCluster) - - if (self.simDeployed == False): - self.sim.deploy() - self.simDeployed = True - - self.check(index) - self.dnodes[index - 1].setTestCluster(self.testCluster) - self.dnodes[index - 1].setValgrind(self.valgrind) - self.dnodes[index - 1].deploy() - - def cfg(self, index, option, value): - self.check(index) - self.dnodes[index - 1].cfg(option, value) - - def start(self, index): - self.check(index) - self.dnodes[index - 1].start() - - def stop(self, index): - self.check(index) - self.dnodes[index - 1].stop() - - def getDataSize(self, index): - self.check(index) - return self.dnodes[index - 1].getDataSize() - - def forcestop(self, index): - self.check(index) - self.dnodes[index - 1].forcestop() - - def startIP(self, index): - self.check(index) - - if self.testCluster: - self.dnodes[index - 1].startIP() - - def stopIP(self, index): - self.check(index) - - if self.dnodes[index - 1].testCluster: - self.dnodes[index - 1].stopIP() - - def check(self, index): - if index < 1 or index > 10: - tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) - - def stopAll(self): - tdLog.info("stop all dnodes") - for i in range(len(self.dnodes)): - self.dnodes[i].stop() - - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - if processID: - cmd = "sudo systemctl stop taosd" - os.system(cmd) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - - def getDnodesRootDir(self): - dnodesRootDir = "%s/sim" % (self.path) - return dnodesRootDir - - def getSimCfgPath(self): - return self.sim.getCfgDir() - - def getSimLogPath(self): - return self.sim.getLogDir() - - def addSimExtraCfg(self, option, value): - self.sim.addExtraCfg(option, value) - - -tdDnodes = TDDnodes() diff --git a/tests/army/frame/eos.py b/tests/army/frame/eos.py new file mode 100644 index 0000000000..802b62e052 --- /dev/null +++ b/tests/army/frame/eos.py @@ -0,0 +1,87 @@ +################################################################### +# Copyright (c) 2023 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +# +# about system funciton extension +# + +import sys +import os +import time +import datetime +import platform +import subprocess + +# +# platform +# + +# if windows platform return True +def isWin(): + return platform.system().lower() == 'windows' + +def isArm64Cpu(): + system = platform.system() + + if system == 'Linux': + machine = platform.machine().lower() + + # Check for ARM64 architecture on Linux systems + return machine in ['aarch64', 'armv8l'] + elif system == 'Darwin' or system == 'Windows': + processor = platform.processor().lower() + + # Check for ARM64 architecture on macOS and Windows systems + return processor in ['arm64', 'aarch64'] + else: + print("Unsupported operating system") + return False + +# +# execute programe +# + +# wait util execute file finished +def exe(file): + return os.system(file) + +# execute file and return immediately +def exeNoWait(file): + if isWin(): + cmd = f"mintty -h never {file}" + else: + cmd = f"nohup {file} > /dev/null 2>&1 & " + return exe(cmd) + +# run return output and error +def run(command, timeout = 10): + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process.wait(timeout) + + output = process.stdout.read().decode(encoding="gbk") + error = process.stderr.read().decode(encoding="gbk") + + return output, error + + +# return list after run +def runRetList(command, timeout=10): + output,error = run(command, timeout) + return output.splitlines() + +# +# file +# + +def delFile(file): + return exe(f"rm -rf {file}") \ No newline at end of file diff --git a/tests/army/frame/epath.py b/tests/army/frame/epath.py new file mode 100644 index 0000000000..c7796befbc --- /dev/null +++ b/tests/army/frame/epath.py @@ -0,0 +1,56 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +# +# about path function extension +# + +import os +from frame.log import * + +# build/bin path +binDir = "" + +def binPath(): + global binDir + + if binDir != "": + return binDir + + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community/tests" in selfPath): + projPath = selfPath[:selfPath.find("community/tests")] + else: + projPath = selfPath[:selfPath.find("TDengine/tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + # check + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info(f"taosd found in {buildPath}") + # return + binDir = buildPath + "/build/bin/" + return binDir + +def binFile(filename): + return binPath() + filename + + + diff --git a/tests/army/frame/etime.py b/tests/army/frame/etime.py new file mode 100644 index 0000000000..2ee223e122 --- /dev/null +++ b/tests/army/frame/etime.py @@ -0,0 +1,23 @@ +################################################################### +# Copyright (c) 2023 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +# +# about tools funciton extension +# + +import sys +import os +import time +import datetime + + diff --git a/tests/army/frame/etool.py b/tests/army/frame/etool.py new file mode 100644 index 0000000000..0650d4a0e2 --- /dev/null +++ b/tests/army/frame/etool.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2023 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +# +# about system funciton extension +# + +import sys +import os +import time +import datetime +import frame.epath +import frame.eos +from frame.log import * + +# run taosBenchmark with command or json file mode +def benchMark(command = "", json = "") : + # get taosBenchmark path + bmFile = frame.epath.binFile("taosBenchmark") + if frame.eos.isWin(): + bmFile += ".exe" + + # run + if command != "": + frame.eos.exe(bmFile + " " + command) + if json != "": + cmd = f"{bmFile} -f {json}" + print(cmd) + status = frame.eos.exe(cmd) + if status !=0: + tdLog.exit(f"run failed {cmd} status={status}") + + +# get current directory file name +def curFile(fullPath, filename): + return os.path.dirname(fullPath) + "/" + filename + + +# run build/bin file +def runBinFile(fname, command, show=True): + binFile = frame.epath.binFile(fname) + if frame.eos.isWin(): + binFile += ".exe" + + cmd = f"{binFile} {command}" + if show: + tdLog.info(cmd) + return frame.eos.runRetList(cmd) + +# exe build/bin file +def exeBinFile(fname, command, wait=True, show=True): + binFile = frame.epath.binFile(fname) + if frame.eos.isWin(): + binFile += ".exe" + + cmd = f"{binFile} {command}" + if wait: + if show: + tdLog.info("wait exe:" + cmd) + return frame.eos.exe(f"{binFile} {command}") + else: + if show: + tdLog.info("no wait exe:" + cmd) + return frame.eos.exeNoWait(cmd) \ No newline at end of file diff --git a/tests/army/frame/pathFinding.py b/tests/army/frame/pathFinding.py deleted file mode 100644 index df03f0ed68..0000000000 --- a/tests/army/frame/pathFinding.py +++ /dev/null @@ -1,83 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - - -import os -from frame.log import * - - - -class TDFindPath: - """This class is for finding path within TDengine - """ - def __init__(self): - self.file = "" - - - def init(self, file): - """[summary] - - Args: - file (str): the file location you want to start the query. Generally using __file__ - """ - self.file = file - - def getTaosdemoPath(self): - """for finding the path of directory containing taosdemo - - Returns: - str: the path to directory containing taosdemo - """ - selfPath = os.path.dirname(os.path.realpath(self.file)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info(f"taosd found in {buildPath}") - return buildPath + "/build/bin/" - - def getTDenginePath(self): - """for finding the root path of TDengine - - Returns: - str: the root path of TDengine - """ - selfPath = os.path.dirname(os.path.realpath(self.file)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - print(projPath) - for root, dirs, files in os.walk(projPath): - if ("sim" in dirs): - print(root) - rootRealPath = os.path.realpath(root) - if (rootRealPath == ""): - tdLog.exit("TDengine not found!") - else: - tdLog.info(f"TDengine found in {rootRealPath}") - return rootRealPath - -tdFindPath = TDFindPath() \ No newline at end of file diff --git a/tests/army/frame/cluster.py b/tests/army/frame/server/cluster.py similarity index 91% rename from tests/army/frame/cluster.py rename to tests/army/frame/server/cluster.py index 4da53840c0..ade8ac39a2 100644 --- a/tests/army/frame/cluster.py +++ b/tests/army/frame/server/cluster.py @@ -8,7 +8,7 @@ import socket from frame.log import * from frame.sql import * from frame.cases import * -from frame.dnodes import * +from frame.server.dnodes import * from frame.common import * class ClusterDnodes(TDDnodes): @@ -35,17 +35,21 @@ class ConfigureyCluster: self.startPort = 6030 self.portStep = 100 self.mnodeNums = 0 + self.level = 0 + self.disk = 0 - def configure_cluster(self ,dnodeNums=5,mnodeNums=0,independentMnode=True,startPort=6030,portStep=100,hostname="%s"%hostname): + def configure_cluster(self ,dnodeNums=5, mnodeNums=0, independentMnode=True, startPort=6030, portStep=100, hostname="%s"%hostname, level=1, disk=1): self.startPort=int(startPort) self.portStep=int(portStep) self.hostname=hostname self.dnodeNums = int(dnodeNums) self.mnodeNums = int(mnodeNums) + self.level = int(level) + self.disk = int(disk) self.dnodes = [] startPort_sec = int(startPort+portStep) for num in range(1, (self.dnodeNums+1)): - dnode = TDDnode(num) + dnode = TDDnode(num, self.level, self.disk) dnode.addExtraCfg("firstEp", f"{hostname}:{self.startPort}") dnode.addExtraCfg("fqdn", f"{hostname}") dnode.addExtraCfg("serverPort", f"{self.startPort + (num-1)*self.portStep}") diff --git a/tests/army/frame/dnodes.py b/tests/army/frame/server/dnode.py similarity index 65% rename from tests/army/frame/dnodes.py rename to tests/army/frame/server/dnode.py index 5581f29a57..6331939636 100644 --- a/tests/army/frame/dnodes.py +++ b/tests/army/frame/server/dnode.py @@ -22,101 +22,18 @@ import base64 import json import copy from fabric2 import Connection -from frame.log import * from shutil import which - -class TDSimClient: - def __init__(self, path): - self.testCluster = False - self.path = path - self.cfgDict = { - "fqdn": "localhost", - "numOfLogLines": "100000000", - "locale": "en_US.UTF-8", - "charset": "UTF-8", - "asyncLog": "0", - "rpcDebugFlag": "135", - "tmrDebugFlag": "131", - "cDebugFlag": "135", - "uDebugFlag": "135", - "jniDebugFlag": "135", - "qDebugFlag": "135", - "supportVnodes": "1024", - "enableQueryHb": "1", - "telemetryReporting": "0", - "tqDebugflag": "135", - "wDebugflag":"135", - } - - def getLogDir(self): - self.logDir = os.path.join(self.path,"sim","psim","log") - return self.logDir - - def getCfgDir(self): - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - return self.cfgDir - - def setTestCluster(self, value): - self.testCluster = value - - def addExtraCfg(self, option, value): - self.cfgDict.update({option: value}) - - def cfg(self, option, value): - cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) - if os.system(cmd) != 0: - tdLog.exit(cmd) - - def deploy(self, *updatecfgDict): - self.logDir = os.path.join(self.path,"sim","psim","log") - self.cfgDir = os.path.join(self.path,"sim","psim","cfg") - self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") - - cmd = "rm -rf " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) - os.makedirs(self.logDir) - - cmd = "rm -rf " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) - - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) - os.makedirs(self.cfgDir) - - cmd = "touch " + self.cfgPath - if os.system(cmd) != 0: - tdLog.exit(cmd) - - if self.testCluster: - self.cfg("masterIp", "192.168.0.1") - self.cfg("secondIp", "192.168.0.2") - self.cfg("logDir", self.logDir) - - for key, value in self.cfgDict.items(): - self.cfg(key, value) - - try: - if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]: - clientCfg = dict (updatecfgDict[0][0].get('clientCfg')) - for key, value in clientCfg.items(): - self.cfg(key, value) - except Exception: - pass - - tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) +# self +from frame.log import * class TDDnode: - def __init__(self, index): + def __init__(self, index=1, level=1, disk=1): self.index = index + self.level = level + self.disk = disk + self.dataDir = [] self.running = 0 self.deployed = 0 self.testCluster = False @@ -209,14 +126,30 @@ class TDDnode: self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) def deploy(self, *updatecfgDict): + # logDir self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") - self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") - self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") - self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + # dataDir + simPath = os.path.join(self.path, "sim", "dnode%d" % self.index) + primary = 1 + if self.level == 1 and self.disk == 1: + eDir = os.path.join(simPath, "data") + self.dataDir.append(eDir) + else: + for i in range(self.level): + for j in range(self.disk): + eDir = os.path.join(simPath, f"data{i}{j}") + self.dataDir.append(f"{eDir} {i} {primary}") + if primary == 1: + primary = 0 - cmd = "rm -rf " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + # taos.cfg + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.cfgDir, "taos.cfg") + + for eDir in self.dataDir: + cmd = "rm -rf " + eDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "rm -rf " + self.logDir if os.system(cmd) != 0: @@ -229,7 +162,8 @@ class TDDnode: # cmd = "mkdir -p " + self.dataDir # if os.system(cmd) != 0: # tdLog.exit(cmd) - os.makedirs(self.dataDir) + for eDir in self.dataDir: + os.makedirs(eDir.split(' ')[0]) # cmd = "mkdir -p " + self.logDir # if os.system(cmd) != 0: @@ -275,7 +209,11 @@ class TDDnode: self.addExtraCfg(key, value) if (self.remoteIP == ""): for key, value in self.cfgDict.items(): - self.cfg(key, value) + if type(value) == list: + for v in value: + self.cfg(key, v) + else: + self.cfg(key, value) else: self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) @@ -650,241 +588,4 @@ class TDDnode: def getDnodesRootDir(self): dnodesRootDir = os.path.join(self.path,"sim","psim") - return dnodesRootDir - - -class TDDnodes: - def __init__(self): - self.dnodes = [] - self.dnodes.append(TDDnode(1)) - self.dnodes.append(TDDnode(2)) - self.dnodes.append(TDDnode(3)) - self.dnodes.append(TDDnode(4)) - self.dnodes.append(TDDnode(5)) - self.dnodes.append(TDDnode(6)) - self.dnodes.append(TDDnode(7)) - self.dnodes.append(TDDnode(8)) - self.dnodes.append(TDDnode(9)) - self.dnodes.append(TDDnode(10)) - self.simDeployed = False - self.testCluster = False - self.valgrind = 0 - self.asan = False - self.killValgrind = 0 - - def init(self, path, remoteIP = ""): - binPath = self.dnodes[0].getPath() + "/../../../" - # tdLog.debug("binPath %s" % (binPath)) - binPath = os.path.realpath(binPath) - # tdLog.debug("binPath real path %s" % (binPath)) - - if path == "": - self.path = os.path.abspath(binPath + "../../") - else: - self.path = os.path.realpath(path) - - for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path, remoteIP) - self.sim = TDSimClient(self.path) - - def setTestCluster(self, value): - self.testCluster = value - - def setValgrind(self, value): - self.valgrind = value - - def setAsan(self, value): - self.asan = value - if value: - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh") - self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh") - else: - self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh") - self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh") - tdLog.info("run in address sanitizer mode") - - def setKillValgrind(self, value): - self.killValgrind = value - - def deploy(self, index, *updatecfgDict): - self.sim.setTestCluster(self.testCluster) - - if (self.simDeployed == False): - self.sim.deploy(updatecfgDict) - self.simDeployed = True - - self.check(index) - self.dnodes[index - 1].setTestCluster(self.testCluster) - self.dnodes[index - 1].setValgrind(self.valgrind) - self.dnodes[index - 1].setAsan(self.asan) - self.dnodes[index - 1].deploy(updatecfgDict) - - def cfg(self, index, option, value): - self.check(index) - self.dnodes[index - 1].cfg(option, value) - - def starttaosd(self, index): - self.check(index) - self.dnodes[index - 1].starttaosd() - - def stoptaosd(self, index): - self.check(index) - self.dnodes[index - 1].stoptaosd() - - def start(self, index): - self.check(index) - self.dnodes[index - 1].start() - - def startWithoutSleep(self, index): - self.check(index) - self.dnodes[index - 1].startWithoutSleep() - - def stop(self, index): - self.check(index) - self.dnodes[index - 1].stop() - - def getDataSize(self, index): - self.check(index) - return self.dnodes[index - 1].getDataSize() - - def forcestop(self, index): - self.check(index) - self.dnodes[index - 1].forcestop() - - def startIP(self, index): - self.check(index) - - if self.testCluster: - self.dnodes[index - 1].startIP() - - def stopIP(self, index): - self.check(index) - - if self.dnodes[index - 1].testCluster: - self.dnodes[index - 1].stopIP() - - def check(self, index): - if index < 1 or index > 10: - tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) - - def StopAllSigint(self): - tdLog.info("stop all dnodes sigint, asan:%d" % self.asan) - if self.asan: - tdLog.info("execute script: %s" % self.stopDnodesSigintPath) - os.system(self.stopDnodesSigintPath) - tdLog.info("execute finished") - return - - def killProcesser(self, processerName): - if platform.system().lower() == 'windows': - killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName) - psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName)) - else: - killCmd = ( - "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" - % processerName - ) - psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName) - - processID = "" - - try: - processID = subprocess.check_output(psCmd, shell=True) - while processID: - os.system(killCmd) - time.sleep(1) - try: - processID = subprocess.check_output(psCmd, shell=True) - except Exception as err: - processID = "" - tdLog.debug('**** kill pid warn: {err}') - except Exception as err: - processID = "" - tdLog.debug(f'**** find pid warn: {err}') - - - - def stopAll(self): - tdLog.info("stop all dnodes, asan:%d" % self.asan) - if platform.system().lower() != 'windows': - distro_id = distro.id() - else: - distro_id = "not alpine" - if self.asan and distro_id != "alpine": - tdLog.info("execute script: %s" % self.stopDnodesPath) - os.system(self.stopDnodesPath) - tdLog.info("execute finished") - return - - if (not self.dnodes[0].remoteIP == ""): - self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()") - return - for i in range(len(self.dnodes)): - self.dnodes[i].stop() - - - if (distro_id == "alpine"): - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - while(processID): - print(processID) - killCmd = "kill -9 %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8").strip() - elif platform.system().lower() == 'windows': - self.killProcesser("taosd") - self.killProcesser("tmq_sim") - self.killProcesser("taosBenchmark") - else: - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - if processID: - cmd = "sudo systemctl stop taosd" - os.system(cmd) - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - while(processID): - killCmd = "kill -9 %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8").strip() - if self.killValgrind == 1: - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - while(processID): - if platform.system().lower() == 'windows': - killCmd = "kill -TERM %s > nul 2>&1" % processID - else: - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8").strip() - - # if os.system(cmd) != 0 : - # tdLog.exit(cmd) - - def getDnodesRootDir(self): - dnodesRootDir = "%s/sim" % (self.path) - return dnodesRootDir - - def getSimCfgPath(self): - return self.sim.getCfgDir() - - def getSimLogPath(self): - return self.sim.getLogDir() - - def addSimExtraCfg(self, option, value): - self.sim.addExtraCfg(option, value) - - def getAsan(self): - return self.asan - -tdDnodes = TDDnodes() \ No newline at end of file + return dnodesRootDir \ No newline at end of file diff --git a/tests/army/frame/server/dnodes.py b/tests/army/frame/server/dnodes.py new file mode 100644 index 0000000000..a22e1cbff7 --- /dev/null +++ b/tests/army/frame/server/dnodes.py @@ -0,0 +1,272 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import platform +import distro +import subprocess +from time import sleep +import base64 +import json +import copy +from fabric2 import Connection +from shutil import which + +# self +from frame.log import * +from frame.server.dnode import * +from frame.server.simClient import * + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + self.testCluster = False + self.valgrind = 0 + self.asan = False + self.killValgrind = 0 + + def init(self, path, remoteIP = ""): + binPath = self.dnodes[0].getPath() + "/../../../" + # tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + # tdLog.debug("binPath real path %s" % (binPath)) + + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path, remoteIP) + self.sim = TDSimClient(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def setAsan(self, value): + self.asan = value + if value: + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh") + self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh") + else: + self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh") + self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh") + tdLog.info("run in address sanitizer mode") + + def setKillValgrind(self, value): + self.killValgrind = value + + def deploy(self, index, *updatecfgDict): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy(updatecfgDict) + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].setAsan(self.asan) + self.dnodes[index - 1].deploy(updatecfgDict) + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def starttaosd(self, index): + self.check(index) + self.dnodes[index - 1].starttaosd() + + def stoptaosd(self, index): + self.check(index) + self.dnodes[index - 1].stoptaosd() + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def startWithoutSleep(self, index): + self.check(index) + self.dnodes[index - 1].startWithoutSleep() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def StopAllSigint(self): + tdLog.info("stop all dnodes sigint, asan:%d" % self.asan) + if self.asan: + tdLog.info("execute script: %s" % self.stopDnodesSigintPath) + os.system(self.stopDnodesSigintPath) + tdLog.info("execute finished") + return + + def killProcesser(self, processerName): + if platform.system().lower() == 'windows': + killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName) + psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName)) + else: + killCmd = ( + "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" + % processerName + ) + psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName) + + processID = "" + + try: + processID = subprocess.check_output(psCmd, shell=True) + while processID: + os.system(killCmd) + time.sleep(1) + try: + processID = subprocess.check_output(psCmd, shell=True) + except Exception as err: + processID = "" + tdLog.debug('**** kill pid warn: {err}') + except Exception as err: + processID = "" + tdLog.debug(f'**** find pid warn: {err}') + + + + def stopAll(self): + tdLog.info("stop all dnodes, asan:%d" % self.asan) + if platform.system().lower() != 'windows': + distro_id = distro.id() + else: + distro_id = "not alpine" + if self.asan and distro_id != "alpine": + tdLog.info("execute script: %s" % self.stopDnodesPath) + os.system(self.stopDnodesPath) + tdLog.info("execute finished") + return + + if (not self.dnodes[0].remoteIP == ""): + self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()") + return + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + + if (distro_id == "alpine"): + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + print(processID) + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + elif platform.system().lower() == 'windows': + self.killProcesser("taosd") + self.killProcesser("tmq_sim") + self.killProcesser("taosBenchmark") + else: + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + if platform.system().lower() == 'windows': + killCmd = "kill -TERM %s > nul 2>&1" % processID + else: + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + def getAsan(self): + return self.asan + + def setLevelDisk(self, level, disk): + for i in range(len(self.dnodes)): + self.dnodes[i].level = int(level) + self.dnodes[i].disk = int(disk) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/server/simClient.py b/tests/army/frame/server/simClient.py new file mode 100644 index 0000000000..a4b719f798 --- /dev/null +++ b/tests/army/frame/server/simClient.py @@ -0,0 +1,116 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import platform +import distro +import subprocess +from time import sleep +import base64 +import json +import copy +from fabric2 import Connection +from shutil import which + +# self +from frame.log import * + + +class TDSimClient: + def __init__(self, path): + self.testCluster = False + self.path = path + self.cfgDict = { + "fqdn": "localhost", + "numOfLogLines": "100000000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "uDebugFlag": "135", + "jniDebugFlag": "135", + "qDebugFlag": "135", + "supportVnodes": "1024", + "enableQueryHb": "1", + "telemetryReporting": "0", + "tqDebugflag": "135", + "wDebugflag":"135", + } + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self, *updatecfgDict): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + try: + if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]: + clientCfg = dict (updatecfgDict[0][0].get('clientCfg')) + for key, value in clientCfg.items(): + self.cfg(key, value) + except Exception: + pass + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) \ No newline at end of file diff --git a/tests/army/frame/sql.py b/tests/army/frame/sql.py index d7dce2bc3e..eafae9be2d 100644 --- a/tests/army/frame/sql.py +++ b/tests/army/frame/sql.py @@ -482,6 +482,11 @@ class TDSql: time.sleep(1) pass + # execute many sql + def executes(self, sqls, queryTimes=30, show=False): + for sql in sqls: + self.execute(sql, queryTimes, show) + def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: caller = inspect.getframeinfo(inspect.stack()[1][0]) @@ -535,6 +540,16 @@ class TDSql: tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) raise Exception + # check like select count(*) ... sql + def checkAgg(self, sql, expectCnt): + self.query(sql) + self.checkData(0, 0, expectCnt) + + # get first value + def getFirstValue(self, sql) : + self.query(sql) + return self.getData(0, 0) + def get_times(self, time_str, precision="ms"): caller = inspect.getframeinfo(inspect.stack()[1][0]) if time_str[-1] not in TAOS_TIME_INIT: @@ -602,6 +617,7 @@ class TDSql: if self.cursor.istype(col, "BIGINT UNSIGNED"): return "BIGINT UNSIGNED" + ''' def taosdStatus(self, state): tdLog.sleep(5) pstate = 0 @@ -630,6 +646,7 @@ class TDSql: tdLog.exit("taosd state is %d != expect:%d" %args) pass + def haveFile(self, dir, state): if os.path.exists(dir) and os.path.isdir(dir): if not os.listdir(dir): @@ -644,6 +661,7 @@ class TDSql: tdLog.exit("dir: %s is not empty, expect: empty" %dir) else: tdLog.exit("dir: %s doesn't exist" %dir) + def createDir(self, dir): if os.path.exists(dir): shrmtree(dir) @@ -651,5 +669,6 @@ class TDSql: os.makedirs( dir, 755 ) tdLog.info("dir: %s is created" %dir) pass +''' tdSql = TDSql() diff --git a/tests/army/frame/srvCtl.py b/tests/army/frame/srvCtl.py new file mode 100644 index 0000000000..c01ea33578 --- /dev/null +++ b/tests/army/frame/srvCtl.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime + +from frame.server.dnode import * +from frame.server.dnodes import * + + +class srvCtl: + def __init__(self): + # record server information + self.dnodeNum = 0 + self.mnodeNum = 0 + self.mLevel = 0 + self.mLevelDisk = 0 + + # + # control server + # + + # start + def dnodeStart(self, idx): + return tdDnodes.starttaosd(idx) + + # stop + def dnodeStop(self, idx): + return tdDnodes.stoptaosd(idx) + + + # + # about path + # + + # get cluster root path like /root/TDinternal/sim/ + def clusterRootPath(self): + return tdDnodes.getDnodesRootDir() + + # return dnode data files list + def dnodeDataFiles(self, idx): + files = [] + return files + + # + # get dnodes information + # + + # taos.cfg position + def dnodeCfgPath(self, idx): + return tdDnodes.dnodes[idx-1].cfgPath + + +sc = srvCtl() \ No newline at end of file diff --git a/tests/army/test.py b/tests/army/test.py index cc114e0d16..a3e28b772d 100644 --- a/tests/army/test.py +++ b/tests/army/test.py @@ -28,9 +28,9 @@ import importlib import toml from frame.log import * -from frame.dnodes import * +from frame.server.dnodes import * +from frame.server.cluster import * from frame.cases import * -from frame.cluster import * from frame.taosadapter import * import taos @@ -111,8 +111,12 @@ if __name__ == "__main__": asan = False independentMnode = False previousCluster = False - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous']) + level = 1 + disk = 1 + + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWU:n:i:aP:L:D:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums', + 'queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode',"asan",'previous','level','disk']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -134,11 +138,13 @@ if __name__ == "__main__": tdLog.printNoPrefix('-C create Dnode Numbers in one cluster') tdLog.printNoPrefix('-R restful realization form') tdLog.printNoPrefix('-W websocket connection') - tdLog.printNoPrefix('-D taosadapter update cfg dict ') + tdLog.printNoPrefix('-U taosadapter update cfg dict ') tdLog.printNoPrefix('-n the number of replicas') tdLog.printNoPrefix('-i independentMnode Mnode') tdLog.printNoPrefix('-a address sanitizer mode') tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.') + tdLog.printNoPrefix('-L set multiple level number. range 1 ~ 3') + tdLog.printNoPrefix('-D set disk number on each level. range 1 ~ 10') sys.exit(0) @@ -213,7 +219,7 @@ if __name__ == "__main__": if key in ['-a', '--asan']: asan = True - if key in ['-D', '--adaptercfgupdate']: + if key in ['-U', '--adaptercfgupdate']: try: adaptercfgupdate = eval(base64.b64decode(value.encode()).decode()) except: @@ -226,6 +232,12 @@ if __name__ == "__main__": if key in ['-P', '--previous']: previousCluster = True + if key in ['-L', '--level']: + level = value + + if key in ['-D', '--disk']: + disk = value + # # do exeCmd command # @@ -361,6 +373,7 @@ if __name__ == "__main__": tAdapter.stop(force_kill=True) if dnodeNums == 1 : + tdDnodes.setLevelDisk(level, disk) tdDnodes.deploy(1,updateCfgDict) tdDnodes.start(1) tdCases.logSql(logSql) @@ -391,7 +404,7 @@ if __name__ == "__main__": tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") else : tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) - dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode) + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode, level=level, disk=disk) tdDnodes = ClusterDnodes(dnodeslist) tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) @@ -498,6 +511,7 @@ if __name__ == "__main__": else: tdLog.info("not need to query") else: + # except windows tdDnodes.setKillValgrind(killValgrind) tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) @@ -529,6 +543,7 @@ if __name__ == "__main__": if dnodeNums == 1 : # dnode is one + tdDnodes.setLevelDisk(level, disk) tdDnodes.deploy(1,updateCfgDict) tdDnodes.start(1) tdCases.logSql(logSql) @@ -574,7 +589,8 @@ if __name__ == "__main__": # dnode > 1 cluster tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) print(independentMnode,"independentMnode valuse") - dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode) + # create dnode list + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode, level=level, disk=disk) tdDnodes = ClusterDnodes(dnodeslist) tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 0c66d0d1ed..d52059bc08 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -5,10 +5,17 @@ #unit-test ,,y,unit-test,bash test.sh -#army-test -,,y,army,./pytest.sh python3 ./test.py -f empty.py +# +# army-test +# +,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2 +,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1 +,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2 +,,n,army,python3 ./test.py -f community/cmdline/fullopt.py -#system test +# +# system test +# ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/stream_multi_agg.py ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/stream_basic.py ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py @@ -27,9 +34,11 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py #,,n,system-test,python3 ./test.py -f 8-stream/vnode_restart.py -N 4 #,,n,system-test,python3 ./test.py -f 8-stream/snode_restart.py -N 4 -,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 +#,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 3 @@ -102,6 +111,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/insert_stb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py +,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/stt_blocks_check.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py @@ -132,6 +142,10 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-4233.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-4233.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-4233.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False @@ -249,8 +263,10 @@ e ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py ,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py -,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupRep1.py -N 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupRep3.py -N 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 1 +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 1 +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 3 ,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/delete_check.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/test_hot_refresh_configurations.py @@ -348,6 +364,11 @@ e ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py @@ -515,6 +536,8 @@ e ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQueryInterval.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/systable_func.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/test_ts4382.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/test_ts4403.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity_1.py @@ -524,6 +547,8 @@ e ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tagFilter.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/projectionDesc.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3405_3398_3423.py -N 3 -n 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts-4348-td-27939.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/backslash_g.py ,,n,system-test,python3 ./test.py -f 2-query/queryQnode.py ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode1mnode.py @@ -1070,6 +1095,7 @@ e ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim +,,y,script,./test.sh -f tsim/query/sort-pre-cols.sim ,,y,script,./test.sh -f tsim/query/groupby.sim ,,y,script,./test.sh -f tsim/query/groupby_distinct.sim ,,y,script,./test.sh -f tsim/query/event.sim diff --git a/tests/parallel_test/run.sh b/tests/parallel_test/run.sh index f05e0dfc83..ef9413a9d5 100755 --- a/tests/parallel_test/run.sh +++ b/tests/parallel_test/run.sh @@ -15,61 +15,61 @@ function usage() { ent=0 while getopts "m:t:b:l:o:w:eh" opt; do case $opt in - m) - config_file=$OPTARG - ;; - t) - t_file=$OPTARG - ;; - b) - branch=$OPTARG - ;; - l) - log_dir=$OPTARG - ;; - e) - ent=1 - ;; - o) - timeout_param="-o $OPTARG" - ;; - w) - web_server=$OPTARG - ;; - h) - usage - exit 0 - ;; - \?) - echo "Invalid option: -$OPTARG" - usage - exit 0 - ;; + m) + config_file=$OPTARG + ;; + t) + t_file=$OPTARG + ;; + b) + branch=$OPTARG + ;; + l) + log_dir=$OPTARG + ;; + e) + ent=1 + ;; + o) + timeout_param="-o $OPTARG" + ;; + w) + web_server=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; esac done #config_file=$1 -if [ -z $config_file ]; then +if [ -z "$config_file" ]; then usage exit 1 fi -if [ ! -f $config_file ]; then +if [ ! -f "$config_file" ]; then echo "$config_file not found" usage exit 1 fi #t_file=$2 -if [ -z $t_file ]; then +if [ -z "$t_file" ]; then usage exit 1 fi -if [ ! -f $t_file ]; then +if [ ! -f "$t_file" ]; then echo "$t_file not found" usage exit 1 fi -date_tag=`date +%Y%m%d-%H%M%S` +date_tag=$(date +%Y%m%d-%H%M%S) test_log_dir=${branch}_${date_tag} -if [ -z $log_dir ]; then +if [ -z "$log_dir" ]; then log_dir="log/${test_log_dir}" else log_dir="$log_dir/${test_log_dir}" @@ -82,42 +82,41 @@ workdirs=() threads=() i=0 -while [ 1 ]; do - host=`jq .[$i].host $config_file` +while true; do + host=$(jq .[$i].host "$config_file") if [ "$host" = "null" ]; then break fi - username=`jq .[$i].username $config_file` + username=$(jq .[$i].username "$config_file") if [ "$username" = "null" ]; then break fi - password=`jq .[$i].password $config_file` + password=$(jq .[$i].password "$config_file") if [ "$password" = "null" ]; then password="" fi - workdir=`jq .[$i].workdir $config_file` + workdir=$(jq .[$i].workdir "$config_file") if [ "$workdir" = "null" ]; then break fi - thread=`jq .[$i].thread $config_file` + thread=$(jq .[$i].thread "$config_file") if [ "$thread" = "null" ]; then break fi - hosts[i]=`echo $host|sed 's/\"$//'|sed 's/^\"//'` - usernames[i]=`echo $username|sed 's/\"$//'|sed 's/^\"//'` - passwords[i]=`echo $password|sed 's/\"$//'|sed 's/^\"//'` - workdirs[i]=`echo $workdir|sed 's/\"$//'|sed 's/^\"//'` + hosts[i]=$(echo "$host" | sed 's/\"$//' | sed 's/^\"//') + usernames[i]=$(echo "$username" | sed 's/\"$//' | sed 's/^\"//') + passwords[i]=$(echo "$password" | sed 's/\"$//' | sed 's/^\"//') + workdirs[i]=$(echo "$workdir" | sed 's/\"$//' | sed 's/^\"//') threads[i]=$thread - i=$(( i + 1 )) + i=$((i + 1)) done - function prepare_cases() { - cat $t_file >>$task_file + cat "$t_file" >>"$task_file" local i=0 - while [ $i -lt $1 ]; do - echo "%%FINISHED%%" >>$task_file - i=$(( i + 1 )) + while [ $i -lt "$1" ]; do + echo "%%FINISHED%%" >>"$task_file" + i=$((i + 1)) done } @@ -125,7 +124,7 @@ function clean_tmp() { # clean tmp dir local index=$1 local ssh_script="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" - if [ -z ${passwords[index]} ]; then + if [ -z "${passwords[index]}" ]; then ssh_script="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" fi local cmd="${ssh_script} rm -rf ${workdirs[index]}/tmp" @@ -136,7 +135,7 @@ function run_thread() { local index=$1 local thread_no=$2 local runcase_script="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" - if [ -z ${passwords[index]} ]; then + if [ -z "${passwords[index]}" ]; then runcase_script="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" fi local count=0 @@ -147,8 +146,9 @@ function run_thread() { local cmd="${runcase_script} ${script}" # script="echo" - while [ 1 ]; do - local line=`flock -x $lock_file -c "head -n1 $task_file;sed -i \"1d\" $task_file"` + while true; do + local line + line=$(flock -x "$lock_file" -c "head -n1 $task_file;sed -i \"1d\" $task_file") if [ "x$line" = "x%%FINISHED%%" ]; then # echo "$index . $thread_no EXIT" break @@ -156,15 +156,17 @@ function run_thread() { if [ -z "$line" ]; then continue fi - echo "$line"|grep -q "^#" - if [ $? -eq 0 ]; then + + if echo "$line" | grep -q "^#"; then continue fi - local case_redo_time=`echo "$line"|cut -d, -f2` + local case_redo_time + case_redo_time=$(echo "$line" | cut -d, -f2) if [ -z "$case_redo_time" ]; then case_redo_time=${DEFAULT_RETRY_TIME:-2} fi - local case_build_san=`echo "$line"|cut -d, -f3` + local case_build_san + case_build_san=$(echo "$line" | cut -d, -f3) if [ "${case_build_san}" == "y" ]; then case_build_san="y" DEBUGPATH="debugSan" @@ -175,139 +177,150 @@ function run_thread() { usage exit 1 fi - local exec_dir=`echo "$line"|cut -d, -f4` - local case_cmd=`echo "$line"|cut -d, -f5` + local exec_dir + exec_dir=$(echo "$line" | cut -d, -f4) + local case_cmd + case_cmd=$(echo "$line" | cut -d, -f5) local case_file="" - echo "$case_cmd"|grep -q "\.sh" - if [ $? -eq 0 ]; then - case_file=`echo "$case_cmd"|grep -o ".*\.sh"|awk '{print $NF}'` + + if echo "$case_cmd" | grep -q "\.sh"; then + case_file=$(echo "$case_cmd" | grep -o ".*\.sh" | awk '{print $NF}') fi - echo "$case_cmd"|grep -q "^python3" - if [ $? -eq 0 ]; then - case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` + + if echo "$case_cmd" | grep -q "^python3"; then + case_file=$(echo "$case_cmd" | grep -o ".*\.py" | awk '{print $NF}') fi - echo "$case_cmd"|grep -q "^./pytest.sh" - if [ $? -eq 0 ]; then - case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` + + if echo "$case_cmd" | grep -q "^./pytest.sh"; then + case_file=$(echo "$case_cmd" | grep -o ".*\.py" | awk '{print $NF}') fi - echo "$case_cmd"|grep -q "\.sim" - if [ $? -eq 0 ]; then - case_file=`echo "$case_cmd"|grep -o ".*\.sim"|awk '{print $NF}'` + + if echo "$case_cmd" | grep -q "\.sim"; then + case_file=$(echo "$case_cmd" | grep -o ".*\.sim" | awk '{print $NF}') fi if [ -z "$case_file" ]; then - case_file=`echo "$case_cmd"|awk '{print $NF}'` + case_file=$(echo "$case_cmd" | awk '{print $NF}') fi if [ -z "$case_file" ]; then continue fi + case_sql_file="$exec_dir/${case_file}.sql" case_file="$exec_dir/${case_file}.${index}.${thread_no}.${count}" - count=$(( count + 1 )) - local case_path=`dirname "$case_file"` - if [ ! -z "$case_path" ]; then - mkdir -p $log_dir/$case_path + count=$((count + 1)) + local case_path + case_path=$(dirname "$case_file") + if [ -n "$case_path" ]; then + mkdir -p "$log_dir"/"$case_path" fi cmd="${runcase_script} ${script} -w ${workdirs[index]} -c \"${case_cmd}\" -t ${thread_no} -d ${exec_dir} -s ${case_build_san} ${timeout_param}" # echo "$thread_no $count $cmd" local ret=0 local redo_count=1 local case_log_file=$log_dir/${case_file}.txt - start_time=`date +%s` - local case_index=`flock -x $lock_file -c "sh -c \"echo \\\$(( \\\$( cat $index_file ) + 1 )) | tee $index_file\""` - case_index=`printf "%5d" $case_index` - local case_info=`echo "$line"|cut -d, -f 3,4,5` + start_time=$(date +%s) + local case_index + case_index=$(flock -x "$lock_file" -c "sh -c \"echo \$(( \$( cat $index_file ) + 1 )) | tee $index_file\"") + case_index=$(printf "%5d" "$case_index") + local case_info + case_info=$(echo "$line" | cut -d, -f 3,4,5) while [ ${redo_count} -lt 6 ]; do - if [ -f $case_log_file ]; then - cp $case_log_file $log_dir/$case_file.${redo_count}.redotxt + if [ -f "$case_log_file" ]; then + cp "$case_log_file" "$log_dir"/"$case_file".${redo_count}.redotxt fi - echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$case_log_file - local current_time=`date "+%Y-%m-%d %H:%M:%S"` + echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >"$case_log_file" + local current_time + current_time=$(date "+%Y-%m-%d %H:%M:%S") echo -e "$case_index \e[33m START >>>>> \e[0m ${case_info} \e[33m[$current_time]\e[0m" - echo "$current_time" >>$case_log_file - local real_start_time=`date +%s` + echo "$current_time" >>"$case_log_file" + local real_start_time + real_start_time=$(date +%s) # $cmd 2>&1 | tee -a $case_log_file # ret=${PIPESTATUS[0]} - $cmd >>$case_log_file 2>&1 + $cmd >>"$case_log_file" 2>&1 ret=$? - local real_end_time=`date +%s` - local time_elapsed=$(( real_end_time - real_start_time )) - echo "execute time: ${time_elapsed}s" >>$case_log_file - current_time=`date "+%Y-%m-%d %H:%M:%S"` - echo "${hosts[index]} $current_time exit code:${ret}" >>$case_log_file + local real_end_time + real_end_time=$(date +%s) + local time_elapsed + time_elapsed=$((real_end_time - real_start_time)) + echo "execute time: ${time_elapsed}s" >>"$case_log_file" + current_time=$(date "+%Y-%m-%d %H:%M:%S") + echo "${hosts[index]} $current_time exit code:${ret}" >>"$case_log_file" if [ $ret -eq 0 ]; then break fi redo=0 - grep -q "wait too long for taosd start" $case_log_file - if [ $? -eq 0 ]; then + + if grep -q "wait too long for taosd start" "$case_log_file"; then redo=1 fi - grep -q "kex_exchange_identification: Connection closed by remote host" $case_log_file - if [ $? -eq 0 ]; then + + if grep -q "kex_exchange_identification: Connection closed by remote host" "$case_log_file"; then redo=1 fi - grep -q "ssh_exchange_identification: Connection closed by remote host" $case_log_file - if [ $? -eq 0 ]; then + + if grep -q "ssh_exchange_identification: Connection closed by remote host" "$case_log_file"; then redo=1 fi - grep -q "kex_exchange_identification: read: Connection reset by peer" $case_log_file - if [ $? -eq 0 ]; then + + if grep -q "kex_exchange_identification: read: Connection reset by peer" "$case_log_file"; then redo=1 fi - grep -q "Database not ready" $case_log_file - if [ $? -eq 0 ]; then + + if grep -q "Database not ready" "$case_log_file"; then redo=1 fi - grep -q "Unable to establish connection" $case_log_file - if [ $? -eq 0 ]; then + + if grep -q "Unable to establish connection" "$case_log_file"; then redo=1 fi - if [ $redo_count -lt $case_redo_time ]; then + if [ $redo_count -lt "$case_redo_time" ]; then redo=1 fi if [ $redo -eq 0 ]; then break fi - redo_count=$(( redo_count + 1 )) + redo_count=$((redo_count + 1)) done - end_time=`date +%s` - echo >>$case_log_file - total_time=$(( end_time - start_time )) - echo "${hosts[index]} total time: ${total_time}s" >>$case_log_file + end_time=$(date +%s) + echo >>"$case_log_file" + total_time=$((end_time - start_time)) + echo "${hosts[index]} total time: ${total_time}s" >>"$case_log_file" # echo "$thread_no ${line} DONE" if [ $ret -eq 0 ]; then echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[32m success\e[0m" - flock -x $lock_file -c "echo \"${case_info}|success|${total_time}\" >>${success_case_file}" + flock -x "$lock_file" -c "echo \"${case_info}|success|${total_time}\" >>${success_case_file}" else - if [ ! -z ${web_server} ]; then - flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n ${web_server}/$test_log_dir/${case_file}.txt\" >>${failed_case_file}" + if [ -n "${web_server}" ]; then + flock -x "$lock_file" -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n ${web_server}/$test_log_dir/${case_file}.txt\" >>${failed_case_file}" else - flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n log file: ${case_log_file}\" >>${failed_case_file}" + flock -x "$lock_file" -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n log file: ${case_log_file}\" >>${failed_case_file}" fi - mkdir -p $log_dir/${case_file}.coredump + mkdir -p "${log_dir}"/"${case_file}".coredump local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump" local scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" - if [ -z ${passwords[index]} ]; then + if [ -z "${passwords[index]}" ]; then scpcmd="scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" fi cmd="$scpcmd:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/" $cmd # 2>/dev/null - local corefile=`ls $log_dir/${case_file}.coredump/` + local corefile + corefile=$(ls "$log_dir/${case_file}.coredump/") echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[31m failed\e[0m" echo "=========================log============================" - cat $case_log_file + cat "$case_log_file" echo "=====================================================" echo -e "\e[34m log file: $case_log_file \e[0m" - if [ ! -z "${web_server}" ]; then + if [ -n "${web_server}" ]; then echo "${web_server}/$test_log_dir/${case_file}.txt" fi - if [ ! -z "$corefile" ]; then + if [ -n "$corefile" ]; then echo -e "\e[34m corefiles: $corefile \e[0m" local build_dir=$log_dir/build_${hosts[index]} local remote_build_dir="${workdirs[index]}/${DEBUGPATH}/build" # if [ $ent -ne 0 ]; then # remote_build_dir="${workdirs[index]}/{DEBUGPATH}/build" # fi - mkdir $build_dir 2>/dev/null + mkdir "$build_dir" 2>/dev/null if [ $? -eq 0 ]; then # scp build binary cmd="$scpcmd:${remote_build_dir}/* ${build_dir}/" @@ -318,18 +331,21 @@ function run_thread() { # get remote sim dir local remote_sim_dir="${workdirs[index]}/tmp/thread_volume/$thread_no" local tarcmd="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" - if [ -z ${passwords[index]} ]; then + if [ -z "${passwords[index]}" ]; then tarcmd="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" fi cmd="$tarcmd sh -c \"cd $remote_sim_dir; tar -czf sim.tar.gz sim\"" $cmd local remote_sim_tar="${workdirs[index]}/tmp/thread_volume/$thread_no/sim.tar.gz" + local remote_case_sql_file="${workdirs[index]}/tmp/thread_volume/$thread_no/${case_sql_file}" scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" - if [ -z ${passwords[index]} ]; then + if [ -z "${passwords[index]}" ]; then scpcmd="scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" fi cmd="$scpcmd:${remote_sim_tar} $log_dir/${case_file}.sim.tar.gz" $cmd + cmd="$scpcmd:${remote_case_sql_file} $log_dir/${case_file}.sql" + $cmd # backup source code (disabled) source_tar_dir=$log_dir/TDengine_${hosts[index]} source_tar_file=TDengine.tar.gz @@ -337,11 +353,9 @@ function run_thread() { source_tar_dir=$log_dir/TDinternal_${hosts[index]} source_tar_file=TDinternal.tar.gz fi - mkdir $source_tar_dir 2>/dev/null + mkdir "$source_tar_dir" 2>/dev/null if [ $? -eq 0 ]; then cmd="$scpcmd:${workdirs[index]}/$source_tar_file $source_tar_dir" - # echo "$cmd" - # $cmd fi fi done @@ -357,12 +371,12 @@ function run_thread() { i=0 while [ $i -lt ${#hosts[*]} ]; do clean_tmp $i & - i=$(( i + 1 )) + i=$((i + 1)) done wait -mkdir -p $log_dir -rm -rf $log_dir/* +mkdir -p "$log_dir" +rm -rf "${log_dir:?}"/* task_file=$log_dir/$$.task lock_file=$log_dir/$$.lock index_file=$log_dir/case_index.txt @@ -370,71 +384,71 @@ stat_file=$log_dir/stat.txt failed_case_file=$log_dir/failed.txt success_case_file=$log_dir/success.txt -echo "0" >$index_file +echo "0" >"$index_file" i=0 j=0 while [ $i -lt ${#hosts[*]} ]; do - j=$(( j + threads[i] )) - i=$(( i + 1 )) + j=$((j + threads[i])) + i=$((i + 1)) done prepare_cases $j i=0 while [ $i -lt ${#hosts[*]} ]; do j=0 - while [ $j -lt ${threads[i]} ]; do + while [ $j -lt "${threads[i]}" ]; do run_thread $i $j & - j=$(( j + 1 )) + j=$((j + 1)) done - i=$(( i + 1 )) + i=$((i + 1)) done wait -rm -f $lock_file -rm -f $task_file +rm -f "$lock_file" +rm -f "$task_file" # docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f echo "=====================================================================" echo "log dir: $log_dir" -total_cases=`cat $index_file` +total_cases=$(cat "$index_file") failed_cases=0 -if [ -f $failed_case_file ]; then - if [ ! -z "$web_server" ]; then - failed_cases=`grep -v "$web_server" $failed_case_file|wc -l` +if [ -f "$failed_case_file" ]; then + if [ -n "$web_server" ]; then + failed_cases=$(grep -c -v "$web_server" "$failed_case_file") else - failed_cases=`grep -v "log file:" $failed_case_file|wc -l` + failed_cases=$(grep -c -v "log file:" "$failed_case_file") fi fi -success_cases=$(( total_cases - failed_cases )) -echo "Total Cases: $total_cases" >$stat_file -echo "Successful: $success_cases" >>$stat_file -echo "Failed: $failed_cases" >>$stat_file -cat $stat_file +success_cases=$((total_cases - failed_cases)) +echo "Total Cases: $total_cases" >"$stat_file" +echo "Successful: $success_cases" >>"$stat_file" +echo "Failed: $failed_cases" >>"$stat_file" +cat "$stat_file" RET=0 i=1 if [ -f "${failed_case_file}" ]; then echo "=====================================================" - while read line; do - if [ ! -z "${web_server}" ]; then - echo "$line"|grep -q "${web_server}" - if [ $? -eq 0 ]; then + while read -r line; do + if [ -n "${web_server}" ]; then + + if echo "$line" | grep -q "${web_server}"; then echo " $line" continue fi else - echo "$line"|grep -q "log file:" - if [ $? -eq 0 ]; then + + if echo "$line" | grep -q "log file:"; then echo " $line" continue fi fi - line=`echo "$line"|cut -d, -f 3,4,5` + line=$(echo "$line" | cut -d, -f 3,4,5) echo -e "$i. $line \e[31m failed\e[0m" >&2 - i=$(( i + 1 )) - done <${failed_case_file} + i=$((i + 1)) + done <"${failed_case_file}" RET=1 fi diff --git a/tests/pytest/client/twoClients.py b/tests/pytest/client/twoClients.py index 1a1b36c554..cbafeccf74 100644 --- a/tests/pytest/client/twoClients.py +++ b/tests/pytest/client/twoClients.py @@ -64,7 +64,7 @@ class TwoClients: cursor2.execute("drop table t0") cursor2.execute("create table t0 using tb tags('beijing')") - tdSql.execute("insert into t0 values(now, 2, 'test')") + tdSql.execute("insert into t0 values(now+1s, 2, 'test')") tdSql.query("select * from tb") tdSql.checkRows(1) diff --git a/tests/pytest/util/cluster.py b/tests/pytest/util/cluster.py index 30b70b01fc..2bf2c99604 100644 --- a/tests/pytest/util/cluster.py +++ b/tests/pytest/util/cluster.py @@ -52,10 +52,9 @@ class ConfigureyCluster: dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}") # configure dnoe of independent mnodes - if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True : + if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == "True" : tdLog.info(f"set mnode:{num} supportVnodes 0") dnode.addExtraCfg("supportVnodes", 0) - # print(dnode) self.dnodes.append(dnode) return self.dnodes diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 9c45c09715..010f45a573 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -506,7 +506,22 @@ class TDCom: if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] break + if platform.system().lower() == 'windows': + win_sep = "\\" + buildPath = buildPath.replace(win_sep,'/') + return buildPath + + def getTaosdPath(self, dnodeID="dnode1"): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosdPath = buildPath + "/../sim/" + dnodeID + tdLog.info("taosdPath: %s" % taosdPath) + return taosdPath + def getClientCfgPath(self): buildPath = self.getBuildPath() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 67c3d37960..789e5866ee 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -130,6 +130,7 @@ class TDDnode: "locale": "en_US.UTF-8", "charset": "UTF-8", "asyncLog": "0", + "DebugFlag": "131", "mDebugFlag": "143", "dDebugFlag": "143", "vDebugFlag": "143", diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 99d166ee33..000040d958 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -48,9 +48,9 @@ class TDSql: self.queryCols = 0 self.affectedRows = 0 - def init(self, cursor, log=False): + def init(self, cursor, log=True): self.cursor = cursor - + print(f"sqllog is :{log}") if (log): caller = inspect.getframeinfo(inspect.stack()[1][0]) self.cursor.log(caller.filename + ".sql") @@ -78,6 +78,26 @@ class TDSql: self.cursor.execute(s) time.sleep(2) + def execute(self, sql, queryTimes=30, show=False): + self.sql = sql + if show: + tdLog.info(sql) + i=1 + while i <= queryTimes: + try: + self.affectedRows = self.cursor.execute(sql) + return self.affectedRows + except Exception as e: + tdLog.notice("Try to execute sql again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + def error(self, sql, expectedErrno = None, expectErrInfo = None, fullMatched = True): caller = inspect.getframeinfo(inspect.stack()[1][0]) expectErrNotOccured = True @@ -108,7 +128,7 @@ class TDSql: if expectErrInfo == self.error_info: tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) else: - tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo '%s' occured, but not expected errno '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo '%s' occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) else: if expectedErrno != None: if expectedErrno in self.errno: @@ -120,7 +140,7 @@ class TDSql: if expectErrInfo in self.error_info: tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) else: - tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) return self.error_info @@ -158,6 +178,63 @@ class TDSql: time.sleep(1) pass + def query_success_failed(self, sql, row_tag=None, queryTimes=10, count_expected_res=None, expectErrInfo = None, fullMatched = True): + self.sql = sql + i=1 + while i <= queryTimes: + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + + if count_expected_res is not None: + counter = 0 + while count_expected_res != self.queryResult[0][0]: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + if counter < queryTimes: + counter += 0.5 + time.sleep(0.5) + else: + return False + + tdLog.info("query is success") + time.sleep(1) + continue + except Exception as e: + tdLog.notice("Try to query again, query times: %d "%i) + caller = inspect.getframeinfo(inspect.stack()[1][0]) + if i < queryTimes: + error_info = repr(e) + print(error_info) + self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","") + self.queryRows = 0 + self.queryCols = 0 + self.queryResult = None + + if fullMatched: + if expectErrInfo != None: + if expectErrInfo == self.error_info: + tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo '%s' occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + else: + if expectErrInfo != None: + if expectErrInfo in self.error_info: + tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + + return self.error_info + elif i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass def is_err_sql(self, sql): err_flag = True @@ -471,25 +548,7 @@ class TDSql: time.sleep(1) continue - def execute(self, sql, queryTimes=30, show=False): - self.sql = sql - if show: - tdLog.info(sql) - i=1 - while i <= queryTimes: - try: - self.affectedRows = self.cursor.execute(sql) - return self.affectedRows - except Exception as e: - tdLog.notice("Try to execute sql again, query times: %d "%i) - if i == queryTimes: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) - i+=1 - time.sleep(1) - pass + def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: diff --git a/tests/script/coverage_test.sh b/tests/script/coverage_test.sh old mode 100755 new mode 100644 index 535b6802f2..c5e0c31f83 --- a/tests/script/coverage_test.sh +++ b/tests/script/coverage_test.sh @@ -9,10 +9,15 @@ else exit 1 fi +# work main path +TDENGINE_DIR=/root/TDinternal/community +if [ x$2 != x ];then + TDENGINE_DIR=$2 +fi + +echo "TDENGINE_DIR=$TDENGINE_DIR" today=`date +"%Y%m%d"` -TDENGINE_DIR=/root/TDengine JDBC_DIR=/root/taos-connector-jdbc -TAOSKEEPER_DIR=/root/taoskeeper TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log # Color setting @@ -23,287 +28,249 @@ GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' function buildTDengine() { - echo "check if TDengine need build" - cd $TDENGINE_DIR - git remote prune origin > /dev/null - git remote update > /dev/null - REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` - LOCAL_COMMIT=`git rev-parse --short @` - echo " LOCAL: $LOCAL_COMMIT" - echo "REMOTE: $REMOTE_COMMIT" + echo "check if TDengine need build" + + # pull parent code + cd "$TDENGINE_DIR/../" + echo "git pull parent code..." + git remote prune origin > /dev/null + git remote update > /dev/null - # reset counter - lcov -d . --zerocounters + # pull tdengine code + cd $TDENGINE_DIR + echo "git pull tdengine code..." + git remote prune origin > /dev/null + git remote update > /dev/null + REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` + LOCAL_COMMIT=`git rev-parse --short @` + echo " LOCAL: $LOCAL_COMMIT" + echo "REMOTE: $REMOTE_COMMIT" - if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then - echo "repo up-to-date" - else - echo "repo need to pull" - fi + # reset counter + lcov -d . --zerocounters - git reset --hard - git checkout -- . - git checkout $branch - git checkout -- . - git clean -dfx - git pull - - [ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug - cd $TDENGINE_DIR/debug + if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then + echo "repo up-to-date" + else + echo "repo need to pull" + fi + + git reset --hard + git checkout -- . + git checkout $branch + git checkout -- . + git clean -dfx + git pull + + [ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug + cd $TDENGINE_DIR/debug echo "rebuild.." - LOCAL_COMMIT=`git rev-parse --short @` + LOCAL_COMMIT=`git rev-parse --short @` - rm -rf * - if [ "$branch" == "3.0" ]; then - echo "3.0 =============" - cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_HTTP=false -DBUILD_TOOLS=true .. > /dev/null - else - cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_TOOLS=true -DBUILD_HTTP=false .. > /dev/null - fi - make -j - make install + rm -rf * + makecmd="cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_GEOS=true -DBUILD_CONTRIB=true ../../" + echo "$makecmd" + $makecmd + + make -j 8 install } function runCasesOneByOne () { - while read -r line; do - if [[ "$line" != "#"* ]]; then - cmd=`echo $line | cut -d',' -f 5` + while read -r line; do + if [[ "$line" != "#"* ]]; then + cmd=`echo $line | cut -d',' -f 5` if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then case=`echo $cmd | cut -d' ' -f 3` start_time=`date +%s` - date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \ + date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \ echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \ || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT - elif [[ "$2" == "system-test" ]] && [[ $line == *"system-test"* ]]; then + elif [[ "$line" == *"$2"* ]]; then if [[ "$cmd" == *"pytest.sh"* ]]; then cmd=`echo $cmd | cut -d' ' -f 2-20` fi case=`echo $cmd | cut -d' ' -f 4-20` start_time=`date +%s` - date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \ - echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \ - echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT - end_time=`date +%s` - echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT - elif [[ "$2" == "develop-test" ]] && [[ $line == *"develop-test"* ]]; then - if [[ "$cmd" == *"pytest.sh"* ]]; then - cmd=`echo $cmd | cut -d' ' -f 2-20` - fi - case=`echo $cmd | cut -d' ' -f 4-20` - start_time=`date +%s` - date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \ + date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \ echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \ echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT fi fi - done < $1 + done < $1 } function runUnitTest() { - echo "=== Run unit test case ===" - echo " $TDENGINE_DIR/debug" - cd $TDENGINE_DIR/debug - ctest -j12 - - echo " $TDENGINE_DIR/tests/script/api" - cd $TDENGINE_DIR/tests/script/api - make clean && make - - stopTaosd - stopTaosadapter - - nohup taosd -c /etc/taos >> /dev/null 2>&1 & - ./batchprepare 127.0.0.1 - ./dbTableRoute 127.0.0.1 - ./stopquery 127.0.0.1 demo t1 - - echo "3.0 unit test done" + echo "=== Run unit test case ===" + echo " $TDENGINE_DIR/debug" + cd $TDENGINE_DIR/debug + ctest -j12 + echo "3.0 unit test done" } function runSimCases() { - echo "=== Run sim cases ===" - - cd $TDENGINE_DIR/tests/script - runCasesOneByOne ../parallel_test/cases.task sim - - totalSuccess=`grep 'sim success' $TDENGINE_COVERAGE_REPORT | wc -l` - if [ "$totalSuccess" -gt "0" ]; then - echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT - fi + echo "=== Run sim cases ===" - totalFailed=`grep 'sim failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l` - if [ "$totalFailed" -ne "0" ]; then - echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT - fi + cd $TDENGINE_DIR/tests/script + runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/cases.task sim + + totalSuccess=`grep 'sim success' $TDENGINE_COVERAGE_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT + fi + + totalFailed=`grep 'sim failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT + fi } function runPythonCases() { - echo "=== Run python cases ===" + echo "=== Run python cases ===" - cd $TDENGINE_DIR/tests/parallel_test - sed -i '/compatibility.py/d' cases.task - - cd $TDENGINE_DIR/tests/system-test - runCasesOneByOne ../parallel_test/cases.task system-test + cd $TDENGINE_DIR/tests/parallel_test + sed -i '/compatibility.py/d' cases.task - cd $TDENGINE_DIR/tests/develop-test - runCasesOneByOne ../parallel_test/cases.task develop-test + # army + cd $TDENGINE_DIR/tests/army + runCasesOneByOne ../parallel_test/cases.task army - totalSuccess=`grep 'py success' $TDENGINE_COVERAGE_REPORT | wc -l` - if [ "$totalSuccess" -gt "0" ]; then - echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT - fi + # system-test + cd $TDENGINE_DIR/tests/system-test + runCasesOneByOne ../parallel_test/cases.task system-test - totalFailed=`grep 'py failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l` - if [ "$totalFailed" -ne "0" ]; then - echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT - fi + # develop-test + cd $TDENGINE_DIR/tests/develop-test + runCasesOneByOne ../parallel_test/cases.task develop-test + + totalSuccess=`grep 'py success' $TDENGINE_COVERAGE_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT + fi + + totalFailed=`grep 'py failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT + fi } function runJDBCCases() { - echo "=== Run JDBC cases ===" + echo "=== Run JDBC cases ===" - cd $JDBC_DIR - git checkout -- . - git reset --hard HEAD - git checkout main - git pull + cd $JDBC_DIR + git checkout -- . + git reset --hard HEAD + git checkout main + git pull - stopTaosd - stopTaosadapter + stopTaosd + stopTaosadapter - nohup taosd -c /etc/taos >> /dev/null 2>&1 & - nohup taosadapter >> /dev/null 2>&1 & + nohup $TDENGINE_DIR/debug/build/bin/taosd -c /etc/taos >> /dev/null 2>&1 & + nohup taosadapter >> /dev/null 2>&1 & - mvn clean test > result.txt 2>&1 - summary=`grep "Tests run:" result.txt | tail -n 1` - echo -e "### JDBC test result: $summary ###" | tee -a $TDENGINE_COVERAGE_REPORT -} - -function runTaosKeeperCases() { - echo "=== Run taoskeeper cases ===" - - cd $TAOSKEEPER_DIR - git checkout -- . - git reset --hard HEAD - git checkout master - git pull - - stopTaosd - stopTaosadapter - - taosd -c /etc/taos >> /dev/null 2>&1 & - taosadapter >> /dev/null 2>&1 & - - go mod tidy && go test -v ./... + mvn clean test > result.txt 2>&1 + summary=`grep "Tests run:" result.txt | tail -n 1` + echo -e "### JDBC test result: $summary ###" | tee -a $TDENGINE_COVERAGE_REPORT } function runTest() { - echo "run Test" - - cd $TDENGINE_DIR - [ -d sim ] && rm -rf sim + echo "run Test" + + cd $TDENGINE_DIR + [ -d sim ] && rm -rf sim [ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT - runUnitTest - runSimCases - runPythonCases - runJDBCCases - runTaosKeeperCases - - stopTaosd - cd $TDENGINE_DIR/tests/script - find . -name '*.sql' | xargs rm -f + runUnitTest + runSimCases + runPythonCases + runJDBCCases - cd $TDENGINE_DIR/tests/pytest - find . -name '*.sql' | xargs rm -f + stopTaosd + cd $TDENGINE_DIR/tests/script + find . -name '*.sql' | xargs rm -f + + cd $TDENGINE_DIR/tests/pytest + find . -name '*.sql' | xargs rm -f } function lcovFunc { - echo "collect data by lcov" - cd $TDENGINE_DIR + echo "collect data by lcov" + cd $TDENGINE_DIR - # collect data - lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info + # collect data + lcov --capture -d $TDENGINE_DIR -b $TDENGINE_DIR -o coverage.info --ignore-errors negative,mismatch,inconsistent,source --branch-coverage --function-coverage --no-external - # remove exclude paths - if [ "$branch" == "main" ] ; then - lcov --remove coverage.info \ - '*/contrib/*' '*/tests/*' '*/test/*' '*/tools/*' '*/libs/sync/*'\ - '*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c'\ - '*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\ - '*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/tidpool.c' '*/tmempool.c'\ - '*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c' '*/shellAuto.c' '*/shellTire.c'\ - '*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' '*/catalog.c'\ - '*/tqSnapshot.c' '*/tsdbSnapshot.c''*/metaSnapshot.c' '*/smaSnapshot.c' '*/tqOffsetSnapshot.c'\ - '*/vnodeSnapshot.c' '*/metaSnapshot.c' '*/tsdbSnapshot.c' '*/mndGrant.c' '*/mndSnode.c' '*/streamRecover.c'\ - '*/osAtomic.c' '*/osDir.c' '*/osFile.c' '*/osMath.c' '*/osSignal.c' '*/osSleep.c' '*/osString.c' '*/osSystem.c'\ - '*/osThread.c' '*/osTime.c' '*/osTimezone.c' \ - --rc lcov_branch_coverage=1 -o coverage.info - else - lcov --remove coverage.info \ - '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' '*/ttype.h' '*/tarithoperator.c' '*/TSDBJNIConnector.c' '*/taosdemo.c' '*/clientJniConnector.c'\ - --rc lcov_branch_coverage=1 -o coverage.info - fi + # remove exclude paths + lcov --remove coverage.info \ + '*/contrib/*' '*/tests/*' '*/test/*' '*/packaging/*' '*/taos-tools/*' '*/taosadapter/*' '*/TSZ/*' \ + '*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c'\ + '*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\ + '*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/examples/*' '*/tidpool.c' '*/tmempool.c'\ + '*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c'\ + '*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' \ + '*/shellAuto.c' '*/shellTire.c' '*/shellCommand.c'\ + '*/sql.c' '*/sql.y'\ + --branch-coverage --function-coverage -o coverage.info + # generate result + echo "generate result" + lcov -l --branch-coverage --function-coverage coverage.info | tee -a $TDENGINE_COVERAGE_REPORT - # generate result - echo "generate result" - lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT + sed -i 's/\/root\/TDengine\/sql.c/\/root\/TDengine\/source\/libs\/parser\/inc\/sql.c/g' coverage.info + sed -i 's/\/root\/TDengine\/sql.y/\/root\/TDengine\/source\/libs\/parser\/inc\/sql.y/g' coverage.info - # push result to coveralls.io - echo "push result to coveralls.io" - /usr/local/bin/coveralls-lcov coverage.info -t o7uY02qEAgKyJHrkxLGiCOTfL3IGQR2zm | tee -a $TDENGINE_COVERAGE_REPORT + # push result to coveralls.io + echo "push result to coveralls.io" + /usr/local/bin/coveralls-lcov coverage.info -b $branch -t o7uY02qEAgKyJHrkxLGiCOTfL3IGQR2zm | tee -a $TDENGINE_COVERAGE_REPORT - #/root/pxiao/checkCoverageFile.sh -s $TDENGINE_DIR/source -f $TDENGINE_COVERAGE_REPORT - #cat /root/pxiao/fileListNoCoverage.log | tee -a $TDENGINE_COVERAGE_REPORT - cat $TDENGINE_COVERAGE_REPORT | grep "| 0.0%" | awk -F "%" '{print $1}' | awk -F "|" '{if($2==0.0)print $1}' | tee -a $TDENGINE_COVERAGE_REPORT - + #/root/pxiao/checkCoverageFile.sh -s $TDENGINE_DIR/source -f $TDENGINE_COVERAGE_REPORT + #cat /root/pxiao/fileListNoCoverage.log | tee -a $TDENGINE_COVERAGE_REPORT + cat $TDENGINE_COVERAGE_REPORT | grep "| 0.0%" | awk -F "%" '{print $1}' | awk -F "|" '{if($2==0.0)print $1}' | tee -a $TDENGINE_COVERAGE_REPORT } function sendReport { - echo "send report" - receiver="develop@taosdata.com" - mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" + echo "send report" + receiver="develop@taosdata.com" + mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" - cd $TDENGINE_DIR + cd $TDENGINE_DIR - sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_COVERAGE_REPORT - BODY_CONTENT=`cat $TDENGINE_COVERAGE_REPORT` - echo -e "from: \nto: ${receiver}\nsubject: Coverage test report ${branch} ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ - (cat - && uuencode $TDENGINE_COVERAGE_REPORT coverage-report-$today.log) | \ - /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" + sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_COVERAGE_REPORT + BODY_CONTENT=`cat $TDENGINE_COVERAGE_REPORT` + echo -e "from: \nto: ${receiver}\nsubject: Coverage test report ${branch} ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + (cat - && uuencode $TDENGINE_COVERAGE_REPORT coverage-report-$today.log) | \ + /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" } function stopTaosd { - echo "Stop taosd start" + echo "Stop taosd start" systemctl stop taosd - PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` - while [ -n "$PID" ] - do + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do pkill -TERM -x taosd sleep 1 - PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` - done - echo "Stop tasod end" + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + echo "Stop tasod end" } function stopTaosadapter { - echo "Stop taosadapter" - systemctl stop taosadapter.service - PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'` - while [ -n "$PID" ] - do - pkill -TERM -x taosadapter - sleep 1 - PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` - done - echo "Stop tasoadapter end" + echo "Stop taosadapter" + systemctl stop taosadapter.service + PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosadapter + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + echo "Stop tasoadapter end" } @@ -318,7 +285,7 @@ buildTDengine runTest lcovFunc -sendReport +#sendReport stopTaosd date >> $WORK_DIR/cron.log diff --git a/tests/script/tsim/field/3.sim b/tests/script/tsim/field/3.sim index 8b428febcd..4d4801c54a 100644 --- a/tests/script/tsim/field/3.sim +++ b/tests/script/tsim/field/3.sim @@ -447,44 +447,44 @@ if $data00 != 100 then endi print =============== step17 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step18 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/4.sim b/tests/script/tsim/field/4.sim index 361ca4c326..d83de81c88 100644 --- a/tests/script/tsim/field/4.sim +++ b/tests/script/tsim/field/4.sim @@ -619,56 +619,56 @@ if $data00 != 100 then endi print =============== step22 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step23 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/6.sim b/tests/script/tsim/field/6.sim index 52fd0b3780..2d2b5623c7 100644 --- a/tests/script/tsim/field/6.sim +++ b/tests/script/tsim/field/6.sim @@ -824,117 +824,117 @@ if $data00 != 25 then endi print =============== step28 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol3 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol3 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol4 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol4 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol5 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol5 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol6 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol6 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step29 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step30 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/bigint.sim b/tests/script/tsim/field/bigint.sim index ce35cacd84..a791d6a1cb 100644 --- a/tests/script/tsim/field/bigint.sim +++ b/tests/script/tsim/field/bigint.sim @@ -127,7 +127,7 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 print $data10 $data11 $data12 $data13 $data14 $data15 $data16 if $data00 != 100 then @@ -136,7 +136,7 @@ if $data00 != 100 then endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/double.sim b/tests/script/tsim/field/double.sim index 1f0cea4be8..2daa78eb7a 100644 --- a/tests/script/tsim/field/double.sim +++ b/tests/script/tsim/field/double.sim @@ -127,14 +127,14 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/smallint.sim b/tests/script/tsim/field/smallint.sim index 66bfee5838..980d69297a 100644 --- a/tests/script/tsim/field/smallint.sim +++ b/tests/script/tsim/field/smallint.sim @@ -128,14 +128,14 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/unsigined_bigint.sim b/tests/script/tsim/field/unsigined_bigint.sim index baa57ce1f6..de8098c297 100644 --- a/tests/script/tsim/field/unsigined_bigint.sim +++ b/tests/script/tsim/field/unsigined_bigint.sim @@ -131,7 +131,7 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 print $data10 $data11 $data12 $data13 $data14 $data15 $data16 if $data00 != 100 then @@ -140,7 +140,7 @@ if $data00 != 100 then endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/parser/mixed_blocks.sim b/tests/script/tsim/parser/mixed_blocks.sim index 04e9df3ff4..8716192858 100644 --- a/tests/script/tsim/parser/mixed_blocks.sim +++ b/tests/script/tsim/parser/mixed_blocks.sim @@ -100,7 +100,8 @@ if $data05 != 1 then endi sql select max(c1), min(c1), sum(c1), avg(c1), count(c1), t1 from $stb where c1 > 0 group by t1 -if $rows != 1 then +if $rows != 2 then + print === rows $rows return -1 endi if $data00 != 319 then diff --git a/tests/script/tsim/query/sort-pre-cols.sim b/tests/script/tsim/query/sort-pre-cols.sim new file mode 100644 index 0000000000..ef69725d87 --- /dev/null +++ b/tests/script/tsim/query/sort-pre-cols.sim @@ -0,0 +1,17 @@ + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database d +sql use d +sql create table st(ts timestamp, v int) tags(lj json) +sql insert into ct1 using st tags('{"instance":"200"}') values(now, 1)(now+1s, 2); +sql insert into ct2 using st tags('{"instance":"200"}') values(now+2s, 3)(now+3s, 4); +sql select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(v) from st group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), lj->'instance' order by time; +print $data01 +if $data01 != 0.000000000 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/basic0.sim b/tests/script/tsim/stream/basic0.sim index 917a0a73ab..a525594861 100644 --- a/tests/script/tsim/stream/basic0.sim +++ b/tests/script/tsim/stream/basic0.sim @@ -33,6 +33,8 @@ if $rows != 3 then return -1 endi +sleep 1000 + sql create stream s1 trigger at_once into outstb as select _wstart, min(k), max(k), sum(k) as sum_alias from ct1 interval(10m) sql show stables diff --git a/tests/script/tsim/stream/basic4.sim b/tests/script/tsim/stream/basic4.sim index b4e3d62545..d2bf321ad5 100644 --- a/tests/script/tsim/stream/basic4.sim +++ b/tests/script/tsim/stream/basic4.sim @@ -80,6 +80,7 @@ sql use test2; sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create stream streams2 trigger at_once ignore expired 0 ignore update 0 waterMark 200s into streamt2 as select _wstart, count(*) c1 from t1 interval(1s); +sleep 1000 sql insert into t1 values(1648791211000,1,2,3,1.0); sql insert into t1 values(1648791212001,2,2,3,1.1); diff --git a/tests/script/tsim/stream/event2.sim b/tests/script/tsim/stream/event2.sim index eb9fca46e6..9fc7615fb8 100644 --- a/tests/script/tsim/stream/event2.sim +++ b/tests/script/tsim/stream/event2.sim @@ -31,6 +31,8 @@ sql insert into t4 values(1648791223000,1,1,10,3.0); sql insert into t4 values(1648791233000,0,2,11,1.0); sql insert into t4 values(1648791243000,1,9,12,2.0); +sleep 1000 + sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 fill_history 1 into streamt0 as select _wstart as s, count(*) c1, sum(b), max(c), _wend as e from st partition by tbname event_window start with a = 0 end with b = 9; sleep 1000 @@ -79,6 +81,37 @@ if $data21 != 2 then goto loop0 endi +sql insert into t3 values(1648791222000,0,1,7,3.0); + +$loop_count = 0 +loop1: + +sleep 300 +print 2 sql select * from streamt0 order by 1, 2, 3, 4; +sql select * from streamt0 order by 1, 2, 3, 4; + +print +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print ======rows=$rows + goto loop1 +endi + +if $data01 != 5 then + print ======data01=$data01 + goto loop0 +endi + print event1 end system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/session1.sim b/tests/script/tsim/stream/session1.sim index cf42159d84..aae17053b2 100644 --- a/tests/script/tsim/stream/session1.sim +++ b/tests/script/tsim/stream/session1.sim @@ -327,4 +327,40 @@ if $rows != 1 then goto loop17 endi +sql create database test2 vgroups 4; +sql use test2; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams4 trigger at_once ignore update 0 ignore expired 0 into streamt4 as select _wstart, count(*) c1, count(a) c2 from st session(ts, 2s) ; + +sql insert into t1 values(1648791255100,1,2,3); +sql insert into t1 values(1648791255300,1,2,3); + +sleep 1000 + +sql insert into t1 values(1648791253000,1,2,3) (1648791254000,1,2,3); + +$loop_count = 0 +loop18: +sleep 1000 +sql select * from streamt4; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 1 then + print =====rows=$rows + goto loop18 +endi + +if $data01 != 4 then + print =====data01=$data01 + goto loop18 +endi + +print =====over + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/state1.sim b/tests/script/tsim/stream/state1.sim index 775528bd4e..e6f7a81851 100644 --- a/tests/script/tsim/stream/state1.sim +++ b/tests/script/tsim/stream/state1.sim @@ -152,6 +152,49 @@ endi print step 2 over +print step 3 + +sql create database test3 vgroups 1; +sql use test3; +sql create table t1(ts timestamp, a int, b int , c int, d double); + +sql insert into t1 values(1648791213000,1,2,3,1.0); +sql insert into t1 values(1648791213001,1,2,3,1.0); + +print create stream streams3 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into streamt3 as select _wstart, max(a), count(*) c1 from t1 state_window(a); +sql create stream streams3 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into streamt3 as select _wstart, max(a), count(*) c1 from t1 state_window(a); + +sleep 1000 + +sql insert into t1 values(1648791203000,2,2,3,1.0); + +sleep 500 + +sql insert into t1 values(1648791214000,1,2,3,1.0); + +$loop_count = 0 +loop6: + +sleep 1000 +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print select * from streamt3 +sql select * from streamt3; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 +print $data30 $data31 $data32 + +if $rows != 2 then + print =====rows=$rows + goto loop6 +endi + +print step 3 over + print state1 end system sh/stop_dnodes.sh diff --git a/tests/script/win-test-file b/tests/script/win-test-file index d9ff09f468..744415c89f 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -15,7 +15,6 @@ ./test.sh -f tsim/dnode/balance2.sim ./test.sh -f tsim/vnode/replica3_repeat.sim ./test.sh -f tsim/parser/col_arithmetic_operation.sim -./test.sh -f tsim/trans/create_db.sim ./test.sh -f tsim/dnode/balance3.sim ./test.sh -f tsim/vnode/replica3_many.sim ./test.sh -f tsim/stable/metrics_idx.sim @@ -190,6 +189,7 @@ ./test.sh -f tsim/query/unionall_as_table.sim ./test.sh -f tsim/query/multi_order_by.sim ./test.sh -f tsim/query/sys_tbname.sim +./test.sh -f tsim/query/sort-pre-cols.sim ./test.sh -f tsim/query/groupby.sim ./test.sh -f tsim/query/groupby_distinct.sim ./test.sh -f tsim/query/event.sim @@ -201,6 +201,7 @@ ./test.sh -f tsim/query/show_db_table_kind.sim ./test.sh -f tsim/query/bi_star_table.sim ./test.sh -f tsim/query/bi_tag_scan.sim +./test.sh -f tsim/query/bi_tbname_col.sim ./test.sh -f tsim/query/tag_scan.sim ./test.sh -f tsim/query/nullColSma.sim ./test.sh -f tsim/query/bug3398.sim diff --git a/tests/system-test/0-others/splitVGroup.py b/tests/system-test/0-others/splitVGroup.py new file mode 100644 index 0000000000..c49713fc6d --- /dev/null +++ b/tests/system-test/0-others/splitVGroup.py @@ -0,0 +1,464 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +import time +import copy +import string + +import taos +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + + # random string + def random_string(self, count): + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(count)) + + # get col value and total max min ... + def getColsValue(self, i, j): + # c1 value + if random.randint(1, 10) == 5: + c1 = None + else: + c1 = 1 + + # c2 value + if j % 3200 == 0: + c2 = 8764231 + elif random.randint(1, 10) == 5: + c2 = None + else: + c2 = random.randint(-87654297, 98765321) + + + value = f"({self.ts}, " + + # c1 + if c1 is None: + value += "null," + else: + self.c1Cnt += 1 + value += f"{c1}," + # c2 + if c2 is None: + value += "null," + else: + value += f"{c2}," + # total count + self.c2Cnt += 1 + # max + if self.c2Max is None: + self.c2Max = c2 + else: + if c2 > self.c2Max: + self.c2Max = c2 + # min + if self.c2Min is None: + self.c2Min = c2 + else: + if c2 < self.c2Min: + self.c2Min = c2 + # sum + if self.c2Sum is None: + self.c2Sum = c2 + else: + self.c2Sum += c2 + + # c3 same with ts + value += f"{self.ts})" + + # move next + self.ts += 1 + + return value + + # insert data + def insertData(self): + tdLog.info("insert data ....") + sqls = "" + for i in range(self.childCnt): + # insert child table + values = "" + pre_insert = f"insert into @db_name.t{i} values " + for j in range(self.childRow): + if values == "": + values = self.getColsValue(i, j) + else: + values += "," + self.getColsValue(i, j) + + # batch insert + if j % self.batchSize == 0 and values != "": + sql = pre_insert + values + self.exeDouble(sql) + values = "" + # append last + if values != "": + sql = pre_insert + values + self.exeDouble(sql) + values = "" + + # insert nomal talbe + for i in range(20): + self.ts += 1000 + name = self.random_string(20) + sql = f"insert into @db_name.ta values({self.ts}, {i}, {self.ts%100000}, '{name}', false)" + self.exeDouble(sql) + + # insert finished + tdLog.info(f"insert data successfully.\n" + f" inserted child table = {self.childCnt}\n" + f" inserted child rows = {self.childRow}\n" + f" total inserted rows = {self.childCnt*self.childRow}\n") + return + + def exeDouble(self, sql): + # dbname replace + sql1 = sql.replace("@db_name", self.db1) + + if len(sql1) > 100: + tdLog.info(sql1[:100]) + else: + tdLog.info(sql1) + tdSql.execute(sql1) + + sql2 = sql.replace("@db_name", self.db2) + if len(sql2) > 100: + tdLog.info(sql2[:100]) + else: + tdLog.info(sql2) + tdSql.execute(sql2) + + + # prepareEnv + def prepareEnv(self): + # init + self.ts = 1680000000000 + self.childCnt = 4 + self.childRow = 10000 + self.batchSize = 50000 + self.vgroups1 = 1 + self.vgroups2 = 1 + self.db1 = "db1" + self.db2 = "db2" + + # total + self.c1Cnt = 0 + self.c2Cnt = 0 + self.c2Max = None + self.c2Min = None + self.c2Sum = None + + # create database db wal_retention_period 0 + sql = f"create database @db_name vgroups {self.vgroups1} replica {self.replicaVar} wal_retention_period 0 wal_retention_size 1" + self.exeDouble(sql) + + # create super talbe st + sql = f"create table @db_name.st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)" + self.exeDouble(sql) + + # create child table + for i in range(self.childCnt): + sql = f"create table @db_name.t{i} using @db_name.st tags({i}) " + self.exeDouble(sql) + + # create normal table + sql = f"create table @db_name.ta(ts timestamp, c1 int, c2 bigint, c3 binary(32), c4 bool)" + self.exeDouble(sql) + + # insert data + self.insertData() + + # update + self.ts = 1680000000000 + 20000 + self.childRow = 1000 + + + # delete data + sql = "delete from @db_name.st where ts > 1680000019000 and ts < 1680000062000" + self.exeDouble(sql) + sql = "delete from @db_name.st where ts > 1680000099000 and ts < 1680000170000" + self.exeDouble(sql) + + # check data correct + def checkExpect(self, sql, expectVal): + tdSql.query(sql) + rowCnt = tdSql.getRows() + for i in range(rowCnt): + val = tdSql.getData(i,0) + if val != expectVal: + tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}") + return False + + tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}") + return True + + # init + def init(self, conn, logSql, replicaVar=1): + seed = time.time() % 10000 + random.seed(seed) + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + # check query result same + def queryDouble(self, sql): + # sql + sql1 = sql.replace('@db_name', self.db1) + tdLog.info(sql1) + start1 = time.time() + rows1 = tdSql.query(sql1,queryTimes=2) + spend1 = time.time() - start1 + res1 = copy.deepcopy(tdSql.queryResult) + + sql2 = sql.replace('@db_name', self.db2) + tdLog.info(sql2) + start2 = time.time() + tdSql.query(sql2,queryTimes=2) + spend2 = time.time() - start2 + res2 = tdSql.queryResult + + rowlen1 = len(res1) + rowlen2 = len(res2) + errCnt = 0 + + if rowlen1 != rowlen2: + tdLog.exit(f"both row count not equal. rowlen1={rowlen1} rowlen2={rowlen2} ") + return False + + for i in range(rowlen1): + row1 = res1[i] + row2 = res2[i] + collen1 = len(row1) + collen2 = len(row2) + if collen1 != collen2: + tdLog.exit(f"both col count not equal. collen1={collen1} collen2={collen2}") + return False + for j in range(collen1): + if row1[j] != row2[j]: + tdLog.info(f"error both column value not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .") + errCnt += 1 + + if errCnt > 0: + tdLog.exit(f" db2 column value different with db2. different count ={errCnt} ") + + # warning performance + diff = (spend2 - spend1)*100/spend1 + tdLog.info("spend1=%.6fs spend2=%.6fs diff=%.1f%%"%(spend1, spend2, diff)) + if spend2 > spend1 and diff > 20: + tdLog.info("warning: the diff for performance after spliting is over 20%") + + return True + + + # check result + def checkResult(self): + # check vgroupid + sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{self.db2}'" + tdSql.query(sql,queryTimes=2) + tdSql.checkRows(self.vgroups2) + + # check child table count same + sql = "select table_name from information_schema.ins_tables where db_name='@db_name' order by table_name" + self.queryDouble(sql) + + # check row value is ok + sql = "select * from @db_name.st order by ts, tbname" + self.queryDouble(sql) + + # where + sql = "select *,tbname from @db_name.st where c1 < 1000 order by ts, tbname" + self.queryDouble(sql) + + # max + sql = "select max(c1) from @db_name.st" + self.queryDouble(sql) + + # min + sql = "select min(c2) from @db_name.st" + self.queryDouble(sql) + + # sum + sql = "select sum(c1) from @db_name.st" + self.queryDouble(sql) + + # normal table + + # count + sql = "select count(*) from @db_name.ta" + self.queryDouble(sql) + + # all rows + sql = "select * from @db_name.ta" + self.queryDouble(sql) + + # sum + sql = "select sum(c1) from @db_name.ta" + self.queryDouble(sql) + + + # get vgroup list + def getVGroup(self, db_name): + vgidList = [] + sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{db_name}'" + res = tdSql.getResult(sql) + rows = len(res) + for i in range(rows): + vgidList.append(res[i][0]) + + return vgidList; + + # split vgroup on db2 + def splitVGroup(self, db_name): + vgids = self.getVGroup(db_name) + selid = random.choice(vgids) + sql = f"split vgroup {selid}" + tdLog.info(sql) + tdSql.execute(sql) + + # wait end + seconds = 300 + for i in range(seconds): + sql ="show transactions;" + rows = tdSql.query(sql) + if rows == 0: + tdLog.info("split vgroup finished.") + return True + #tdLog.info(f"i={i} wait split vgroup ...") + time.sleep(1) + + tdLog.exit(f"split vgroup transaction is not finished after executing {seconds}s") + return False + + # split error + def expectSplitError(self, dbName): + vgids = self.getVGroup(dbName) + selid = random.choice(vgids) + sql = f"split vgroup {selid}" + tdLog.info(sql) + tdSql.error(sql) + + # expect split ok + def expectSplitOk(self, dbName): + # split vgroup + vgList1 = self.getVGroup(dbName) + self.splitVGroup(dbName) + vgList2 = self.getVGroup(dbName) + vgNum1 = len(vgList1) + 1 + vgNum2 = len(vgList2) + if vgNum1 != vgNum2: + tdLog.exit(f" vglist len={vgNum1} is not same for expect {vgNum2}") + return + + # split empty database + def splitEmptyDB(self): + dbName = "emptydb" + vgNum = 2 + # create database + sql = f"create database {dbName} vgroups {vgNum} replica {self.replicaVar }" + tdLog.info(sql) + tdSql.execute(sql) + + # split vgroup + self.expectSplitOk(dbName) + + + # forbid + def checkForbid(self): + # stream + tdLog.info("check forbid split having stream...") + tdSql.execute("create database streamdb;") + tdSql.execute("use streamdb;") + tdSql.execute("create table ta(ts timestamp, age int);") + tdSql.execute("create stream ma into sta as select count(*) from ta interval(1s);") + self.expectSplitError("streamdb") + tdSql.execute("drop stream ma;") + self.expectSplitOk("streamdb") + + # topic + tdLog.info("check forbid split having topic...") + tdSql.execute("create database topicdb wal_retention_period 10;") + tdSql.execute("use topicdb;") + tdSql.execute("create table ta(ts timestamp, age int);") + tdSql.execute("create topic toa as select * from ta;") + + #self.expectSplitError("topicdb") + tdSql.execute("drop topic toa;") + self.expectSplitOk("topicdb") + + # compact and check db2 + def compactAndCheck(self): + tdLog.info("compact db2 and check result ...") + # compact + tdSql.execute(f"compact database {self.db2};") + # check result + self.checkResult() + + # run + def run(self): + # prepare env + self.prepareEnv() + tdLog.info("generate at least two stt files of the same fileset (e.g. v4f1944) for db2 and db1 ") + for dbname in [self.db2, self.db1]: + tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.010",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdLog.info("check db1 and db2 same after creating ...") + + self.checkResult() + + for i in range(3): + # split vgroup on db2 + start = time.time() + self.splitVGroup(self.db2) + end = time.time() + self.vgroups2 += 1 + + # insert the same data per tables into splited vgroups + tdLog.info("insert the same data per tables into splited vgroups(3,4)") + for dbname in [self.db2, self.db1]: + for tableid in range(self.childCnt): + tdSql.execute(f'insert into {dbname}.t{tableid} values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdSql.execute(f'insert into {dbname}.ta values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999",0);') + tdSql.execute(f'flush database {dbname}') + + # check two db query result same + self.checkResult() + spend = "%.3f"%(end-start) + tdLog.info(f"split vgroup i={i} passed. spend = {spend}s") + + # split empty db + self.splitEmptyDB() + + # check topic and stream forib + self.checkForbid() + + # compact database + self.compactAndCheck() + + # stop + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/splitVGroupRep1.py b/tests/system-test/0-others/splitVGroupWal.py similarity index 88% rename from tests/system-test/0-others/splitVGroupRep1.py rename to tests/system-test/0-others/splitVGroupWal.py index 0b75a3e6e1..d11d63b5a9 100644 --- a/tests/system-test/0-others/splitVGroupRep1.py +++ b/tests/system-test/0-others/splitVGroupWal.py @@ -164,7 +164,7 @@ class TDTestCase: self.c2Sum = None # create database db - sql = f"create database @db_name vgroups {self.vgroups1} replica 1" + sql = f"create database @db_name vgroups {self.vgroups1} replica {self.replicaVar}" self.exeDouble(sql) # create super talbe st @@ -221,14 +221,14 @@ class TDTestCase: sql1 = sql.replace('@db_name', self.db1) tdLog.info(sql1) start1 = time.time() - rows1 = tdSql.query(sql1) + rows1 = tdSql.query(sql1,queryTimes=2) spend1 = time.time() - start1 res1 = copy.deepcopy(tdSql.queryResult) sql2 = sql.replace('@db_name', self.db2) tdLog.info(sql2) start2 = time.time() - tdSql.query(sql2) + tdSql.query(sql2,queryTimes=2) spend2 = time.time() - start2 res2 = tdSql.queryResult @@ -269,7 +269,7 @@ class TDTestCase: def checkResult(self): # check vgroupid sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{self.db2}'" - tdSql.query(sql) + tdSql.query(sql,queryTimes=2) tdSql.checkRows(self.vgroups2) # check child table count same @@ -277,11 +277,11 @@ class TDTestCase: self.queryDouble(sql) # check row value is ok - sql = "select * from @db_name.st order by ts" + sql = "select * from @db_name.st order by ts, tbname" self.queryDouble(sql) # where - sql = "select *,tbname from @db_name.st where c1 < 1000 order by ts" + sql = "select *,tbname from @db_name.st where c1 < 1000 order by ts, tbname" self.queryDouble(sql) # max @@ -369,7 +369,7 @@ class TDTestCase: dbName = "emptydb" vgNum = 2 # create database - sql = f"create database {dbName} vgroups {vgNum} replica 1" + sql = f"create database {dbName} vgroups {vgNum} replica {self.replicaVar}" tdLog.info(sql) tdSql.execute(sql) @@ -412,8 +412,16 @@ class TDTestCase: def run(self): # prepare env self.prepareEnv() - + tdLog.info("generate at least two stt files of the same fileset (e.g. v4f1944) for db2 and db1 ") + for dbname in [self.db2, self.db1]: + tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.010",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') tdLog.info("check db1 and db2 same after creating ...") + self.checkResult() for i in range(3): @@ -423,6 +431,15 @@ class TDTestCase: end = time.time() self.vgroups2 += 1 + # insert the same data per tables into splited vgroups + tdLog.info("insert the same data per tables into splited vgroups(3,4)") + for dbname in [self.db2, self.db1]: + for tableid in range(self.childCnt): + tdSql.execute(f'insert into {dbname}.t{tableid} values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;') + tdSql.execute(f'flush database {dbname}') + tdSql.execute(f'insert into {dbname}.ta values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999",0);') + tdSql.execute(f'flush database {dbname}') + # check two db query result same self.checkResult() spend = "%.3f"%(end-start) diff --git a/tests/system-test/0-others/tag_index_basic.py b/tests/system-test/0-others/tag_index_basic.py index c1e1d521d2..a55508e229 100644 --- a/tests/system-test/0-others/tag_index_basic.py +++ b/tests/system-test/0-others/tag_index_basic.py @@ -88,6 +88,13 @@ class TDTestCase: tdLog.info(f" create {count} child tables ok.") + def create_tagidx_check(self, stbname): + err_dict = {"NULL","",",","\"","\"\"","undef","t1,t2","t12,t12"} + for errs in err_dict: + sql = (f'create index idx_err_check on {stbname} (%s)'% (errs)) + tdLog.info(f' sql={sql}') + tdSql.error(f'{sql}') + tdLog.info(f' create tagidx check ok.') # create stable and child tables def create_tagidx(self, stbname): @@ -231,6 +238,7 @@ class TDTestCase: count = 1000 # do self.create_table(stable, tbname, count) + self.create_tagidx_check(stable) self.create_tagidx(stable) self.insert_data(tbname) self.show_tagidx(stable) diff --git a/tests/system-test/0-others/test_hot_refresh_configurations.py b/tests/system-test/0-others/test_hot_refresh_configurations.py index cbde8c060e..71f6290469 100644 --- a/tests/system-test/0-others/test_hot_refresh_configurations.py +++ b/tests/system-test/0-others/test_hot_refresh_configurations.py @@ -159,20 +159,21 @@ class TDTestCase: ] } - def get_param_value_with_gdb(self, config_name, process_name): - res = subprocess.Popen("gdb -q -nx -p `pidof {}` --batch -ex 'set height 0' -ex 'p {}'".format(process_name, config_name), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - r_lines = res.stdout.read() - if r_lines: - for line in r_lines.decode().split("\n"): - if "$1 = " in line: - tdLog.debug("gdb result: {}".format(line)) - return line.split(" = ")[1] + def get_param_value(self, config_name): + tdSql.query("show local variables;") + for row in tdSql.queryResult: + if config_name == row[0]: + tdLog.debug("Found variable '{}'".format(row[0])) + return row[1] def cli_check(self, name, values, except_values=False): if not except_values: for v in values: tdLog.debug("Set {} to {}".format(name, v)) tdSql.execute(f'alter local "{name} {v}";') + value = self.get_param_value(name) + tdLog.debug("Get {} value: {}".format(name, value)) + assert(v == int(value)) else: for v in values: tdLog.debug("Set {} to {}".format(name, v)) @@ -190,9 +191,7 @@ class TDTestCase: for v in values: dnode = random.choice(p_list) tdSql.execute(f'alter {dnode} "{name} {v}";') - if platform.system() == "Linux" and platform.machine() == "aarch64": - continue - value = self.get_param_value_with_gdb(alias, "taosd") + value = self.get_param_value(alias) if value: tdLog.debug(f"value: {value}") assert(value == str(bool(v)).lower() if is_bool else str(v)) @@ -202,6 +201,14 @@ class TDTestCase: tdSql.error(f'alter {dnode} "{name} {v}";') def run(self): + + # reset log + taosdLogAbsoluteFilename = tdCom.getTaosdPath() + "/log/" + "taosdlog*" + tdSql.execute("alter all dnodes 'resetlog';") + r = subprocess.Popen("cat {} | grep 'reset log file'".format(taosdLogAbsoluteFilename), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = r.communicate() + assert('reset log file' in stdout.decode()) + for key in self.configration_dic: if "cli" == key: for item in self.configration_dic[key]: @@ -216,6 +223,7 @@ class TDTestCase: else: raise Exception(f"unknown key: {key}") + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/1-insert/alter_database.py b/tests/system-test/1-insert/alter_database.py index 8de43b8ee8..6a831b88ff 100644 --- a/tests/system-test/1-insert/alter_database.py +++ b/tests/system-test/1-insert/alter_database.py @@ -4,7 +4,8 @@ import time import socket import os import threading - +import psutil +import platform from util.log import * from util.sql import * from util.cases import * @@ -17,6 +18,14 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self.buffer_boundary = [3, 4097, 8193, 12289, 16384] + # remove the value > free_memory, 70% is the weight to calculate the max value + if platform.system() == "Linux" and platform.machine() == "aarch64": + mem = psutil.virtual_memory() + free_memory = mem.free * 0.7 / 1024 / 1024 + for item in self.buffer_boundary: + if item > free_memory: + self.buffer_boundary.remove(item) + self.buffer_error = [self.buffer_boundary[0] - 1, self.buffer_boundary[-1]+1] # pages_boundary >= 64 diff --git a/tests/system-test/1-insert/db_tb_name_check.py b/tests/system-test/1-insert/db_tb_name_check.py index fa43603e25..e217bbe183 100644 --- a/tests/system-test/1-insert/db_tb_name_check.py +++ b/tests/system-test/1-insert/db_tb_name_check.py @@ -76,7 +76,8 @@ class TDTestCase: new_tbname = ''.join(tbname1) for sql in [f'`{dbname}`.`{new_tbname}`',f'`{new_tbname}`']: tdSql.error(f'create table {sql} (ts timestamp,c0 int)') - tdSql.execute(f'drop database `{dbname}`') + tdSql.execute(f'trim database `{dbname}`') + tdSql.execute(f'drop database `{dbname}`') def run(self): self.db_name_check() self.tb_name_check() diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 3e9bad4efd..85a0afed48 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -32,7 +32,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self._conn = conn def createDb(self, name="test", db_update_tag=0): diff --git a/tests/system-test/1-insert/insert_double.py b/tests/system-test/1-insert/insert_double.py index b47b22ab44..b7af7237db 100644 --- a/tests/system-test/1-insert/insert_double.py +++ b/tests/system-test/1-insert/insert_double.py @@ -15,7 +15,7 @@ class TDTestCase: self.replicaVar = int(replicaVar) self.database = "db1" tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepare_db(self): tdSql.execute(f"drop database if exists {self.database}") diff --git a/tests/system-test/1-insert/insert_perf.py b/tests/system-test/1-insert/insert_perf.py index d96a031458..dac2afb3b2 100644 --- a/tests/system-test/1-insert/insert_perf.py +++ b/tests/system-test/1-insert/insert_perf.py @@ -30,7 +30,7 @@ class TDTestCase: self.tb3 = "t3" self.once = 1000 tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepare_db(self): tdSql.execute(f"drop database if exists {self.database}") diff --git a/tests/system-test/1-insert/insert_timestamp.py b/tests/system-test/1-insert/insert_timestamp.py index bc3b1c0275..310a27dae0 100644 --- a/tests/system-test/1-insert/insert_timestamp.py +++ b/tests/system-test/1-insert/insert_timestamp.py @@ -13,25 +13,13 @@ class TDTestCase: tdSql.init(conn.cursor(), True) def run(self): - """ - timestamp输入插入规则: - 对于插入的字段类型为timestamp类型的字段,只允许这么几种情况: - timestamp - timestamp +/- interval - interval + timestamp - timestamp可以是字符串譬如:"2023-12-05 00:00:00.000", 也可以是int型, 譬如:1701619200000 - interval支持:b, u, a, s, m, h, d, w 不支持n, y,譬如:1h, 2d - - 仅支持2元表达式,譬如:timestamp + 2h, 不支持2元以上表达,譬如timestamp + 2h + 1d - """ - tdSql.execute("create database test_insert_timestamp PRECISION 'ns';") tdSql.execute("use test_insert_timestamp;") tdSql.execute("create stable st(ts timestamp, c1 int) tags(id int);") tdSql.execute("create table test_t using st tags(1);") expectErrInfo = "syntax error" - # 异常场景:timestamp + timestamp + # abnormal scenario: timestamp + timestamp tdSql.error("insert into test_t values(now + today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(now - today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(today() + now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) @@ -40,24 +28,24 @@ class TDTestCase: tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(1701111500000 + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:timestamp + interval + interval + # abnormal scenario: timestamp + interval + interval tdSql.error("insert into test_t values(today() + 1d + 1s, 1);", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:interval - timestamp + # abnormal scenario: interval - timestamp tdSql.error("insert into test_t values(2h - now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(2h - today(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:interval + interval + # abnormal scenario: interval + interval tdSql.error("insert into test_t values(2h - 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(2h + 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:非法interval类型n + # abnormal scenario: non-support datatype - n tdSql.error("insert into test_t values(today() + 2n, 7); ", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:非法interval类型y + # abnormal scenario: non-support datatype - y tdSql.error("insert into test_t values(today() - 2y, 8);", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:数据类型不对 + # abnormal scenario: non-support datatype tdSql.error("insert into test_t values('a1701619200000', 8);", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values('ss2023-12-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(123456, 1);", expectErrInfo="Timestamp data out of range") @@ -66,31 +54,31 @@ class TDTestCase: tdSql.error("insert into test_t values(None, 1);", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(null, 1);", expectErrInfo=expectErrInfo, fullMatched=False) - # 异常场景:格式不对 + # abnormal scenario: incorrect format tdSql.error("insert into test_t values('2023-122-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values('2023-12--05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values('12/12/2023' + 10a, 1);", expectErrInfo=expectErrInfo, fullMatched=False) tdSql.error("insert into test_t values(1701619200000111, 1);", expectErrInfo="Timestamp data out of range", fullMatched=False) - # 正常场景:timestamp + interval + # normal scenario:timestamp + interval tdSql.execute("insert into test_t values(today() + 2b, 1);") tdSql.execute("insert into test_t values(1701619200000000000 + 2u, 2);") tdSql.execute("insert into test_t values(today + 2a, 3);") tdSql.execute("insert into test_t values('2023-12-05 23:59:59.999' + 2a, 4);") tdSql.execute("insert into test_t values(1701921599000000000 + 3a, 5);") - # 正常场景:timestamp - interval + # normal scenario:timestamp - interval tdSql.execute("insert into test_t values(today() - 2s, 6);") tdSql.execute("insert into test_t values(now() - 2m, 7);") tdSql.execute("insert into test_t values(today - 2h, 8);") tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000000000' - 2a, 9);") tdSql.execute("insert into test_t values(1701669000000000000 - 2a, 10);") - # 正常场景:interval + timestamp + # normal scenario:interval + timestamp tdSql.execute("insert into test_t values(2d + now, 11);") tdSql.execute("insert into test_t values(2w + today, 12);") - # 正常场景:timestamp + # normal scenario:timestamp tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000', 13);") tdSql.execute("insert into test_t values(1701629100000000000, 14);") tdSql.execute("insert into test_t values(now() + 2s, 15);") @@ -102,7 +90,7 @@ class TDTestCase: tdSql.execute("insert into test_t values(1701619200000000000, -5);") tdSql.execute("insert into test_t values('2023-12-05 12:12:12' + 10a, 19);") - # 验证数据 + # data verification tdSql.query(f'select ts,c1 from test_t order by c1;') tdSql.checkRows(22) tdSql.checkEqual(tdSql.queryResult[0][0], 1699977600000000000) # c1=-15 @@ -133,12 +121,10 @@ class TDTestCase: tdSql.execute("drop database if exists test_insert_timestamp;") def __convert_ts_to_date(self, ts: int) -> str: - # 创建datetime对象并进行转换 dt_object = datetime.datetime.fromtimestamp(ts / 1e9) - # 格式化日期字符串 formatted_date = dt_object.strftime('%Y-%m-%d') - # print("转换后的日期为:", formatted_date) + return formatted_date def __get_today_ts(self) -> int: diff --git a/tests/system-test/1-insert/mutil_stage.py b/tests/system-test/1-insert/mutil_stage.py index 970554c6c1..ebaa1e7dc3 100644 --- a/tests/system-test/1-insert/mutil_stage.py +++ b/tests/system-test/1-insert/mutil_stage.py @@ -60,7 +60,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.taos_cfg_path = tdDnodes.dnodes[0].cfgPath self.taos_data_dir = tdDnodes.dnodes[0].dataDir diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py index d419aee12c..2d9e34c287 100644 --- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py +++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py @@ -33,7 +33,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self._conn = conn self.smlChildTableName_value = "id" diff --git a/tests/system-test/1-insert/stt_blocks_check.py b/tests/system-test/1-insert/stt_blocks_check.py new file mode 100644 index 0000000000..5de6414453 --- /dev/null +++ b/tests/system-test/1-insert/stt_blocks_check.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from random import randint +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +from util.boundary import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def stt_block_check(self): + tdSql.prepare() + tdSql.execute('use db') + + tdSql.execute('create table meters (ts timestamp, c1 int, c2 float) tags(t1 int)') + tdSql.execute("create table d0 using meters tags(1)") + + ts = 1704261670000 + + sql = "insert into d0 values " + for i in range(100): + sql = sql + f"({ts + i}, 1, 0.1)" + tdSql.execute(sql) + tdSql.execute("flush database db") + + ts = 1704261670099 + + sql = "insert into d0 values " + for i in range(100): + sql = sql + f"({ts + i}, 1, 0.1)" + tdSql.execute(sql) + tdSql.execute("flush database db") + + tdSql.execute(f"insert into d0 values({ts + 100}, 2, 1.0)") + tdSql.execute("flush database db") + + time.sleep(2) + + tdSql.query("show table distributed db.meters") + tdSql.query("select count(*) from db.meters") + tdSql.checkData(0, 0, 200) + + def run(self): + self.stt_block_check() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py index 14b717d1da..a3df0eb6b0 100644 --- a/tests/system-test/1-insert/time_range_wise.py +++ b/tests/system-test/1-insert/time_range_wise.py @@ -137,7 +137,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.precision = "ms" self.sma_count = 0 self.sma_created_index = [] diff --git a/tests/system-test/1-insert/ts-4272.py b/tests/system-test/1-insert/ts-4272.py index bb81305eb3..f2bacd0c2b 100644 --- a/tests/system-test/1-insert/ts-4272.py +++ b/tests/system-test/1-insert/ts-4272.py @@ -26,12 +26,10 @@ class TDTestCase: self.file1 = f"{self.testcasePath}/b.csv" self.file2 = f"{self.testcasePath}/c.csv" - #os.system("rm -rf %s/b.csv" %self.testcasePath) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), logSql) def check_count(self, rows, records): - tdSql.execute(f"use {self.db};") tdSql.query(f"select tbname,count(*) from {self.stable0} group by tbname order by tbname;") tdSql.checkRows(rows) for i in range(rows): @@ -39,13 +37,6 @@ class TDTestCase: def reset_tb(self): # create database and tables - # os.system("taos -s 'drop database if exists d1;'") - # os.system("taos -s 'create database d1;use d1;create stable meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);'") - # os.system(f"taos -s 'use d1;create table d2001 using meters(groupId) tags(5);'") - # res = os.system(f"taos -s 'use d1;create table d2002 using meters(groupId) tags(6);'") - # if (0 != res): - # tdLog.exit(f"create tb error") - tdSql.execute(f"drop database if exists {self.db};") tdSql.execute(f"create database {self.db};") tdSql.execute(f"use {self.db};") @@ -54,13 +45,14 @@ class TDTestCase: tdSql.execute(f"create table {self.tb2} {self.tag2};") tdSql.execute(f"create stable {self.stable1} (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);") tdSql.execute(f"create stable {self.stable2} (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);") + tdSql.execute(f"create table {self.stable1}_1 using {self.stable1}(t_int) tags(1);") + tdSql.execute(f"create table {self.stable2}_1 using {self.stable2}(t_int) tags(2);") def test(self, sql): - sql = f"use {self.db};" + sql - res = os.system(f'taos -s "{sql}"') - # if (0 != res): - # tdLog.exit(f"taos sql error") - + # sql = f"use {self.db};" + sql + # os.system(f'taos -s "{sql}"') + print(f'{sql}\n') + tdSql.execute(sql, 1) def check(self): # same table, auto create + create @@ -95,13 +87,8 @@ class TDTestCase: sql = f"insert into {self.tb1} file '{self.file1}' {self.tb2} file '{self.file2}';" self.test(sql) - # bigNum = 1010000 - # self.check_count(5, [2100, 2100, bigNum, bigNum, bigNum]) + self.check_count(2, [2010000, 1000000]) - result = os.popen("taos -s 'select count(*) from %s.%s'" %(self.db, self.tb1)) - res = result.read() - if (f"OK" in res): - tdLog.info(f"check count success") def make_csv(self, filepath, once, qtime, startts): f = open(filepath, 'w') @@ -118,10 +105,8 @@ class TDTestCase: def test_mix(self): #forbid use both value and file in one insert - result = os.popen(f"insert into {self.tb1} file '{self.file2}' {self.tb2} values('2021-07-13 14:06:34.630', 10.2, 219, 0.32);") - res = result.read() - if (f"error" in res): - tdLog.info(f"forbid success") + self.make_csv(self.file2, 10, 10, self.ts) + tdSql.error(f"insert into {self.tb1} file '{self.file2}' {self.tb2} values('2021-07-13 14:06:34.630', 10.2, 219, 0.32);") def test_bigcsv(self): # prepare csv @@ -144,7 +129,6 @@ class TDTestCase: self.test(sql) print("end insert to table") - #tdSql.execute(f"use d1;") tdSql.query(f"select tbname,count(*) from {self.stable0} group by tbname order by tbname;") tdSql.checkRows(2) tdSql.checkData(0, 1, rowNum1) @@ -160,7 +144,7 @@ class TDTestCase: ts = startts + offset rows = [] for i in range(once): - rows.append([table_name, ts + i, offset + i, 'NULL']) + rows.append([f"\'{table_name}\'", ts + i, offset + i, 'NULL']) writer.writerows(rows) f.close() print(datetime.now(), filepath, " ready!") @@ -171,22 +155,22 @@ class TDTestCase: once = 10000 qtime1 = 101 qtime2 = 100 - # rowNum1 = qtime1 * once - # rowNum2 = qtime2 * once child_1 = f"{self.stable1}_1" child_2 = f"{self.stable2}_1" self.make_stable_csv(self.file1, once, qtime1, self.ts - 86400000, child_1) self.make_stable_csv(self.file2, once, qtime2, self.ts, child_2) print("end stable_csv data prepare") - - # insert create child table of stable + sql = f"insert into {self.db}.{self.stable1}(tbname,ts,q_int,q_binary) file '{self.file1}' {self.db}.{self.stable2}(tbname,ts,q_int,q_binary) file '{self.file2}';" self.test(sql) print("end insert to stable") - #tdSql.execute(f"insert into {self.db}.{child_1}(ts, q_int) values(now, 1);") - tdSql.query(f"select tbname,count(*) from {self.stable1} group by tbname order by tbname;") - tdSql.checkRows(0) + tdSql.query(f"select tbname,count(*) from {self.stable1} group by tbname;") + tdSql.checkRows(1) + tdSql.checkData(0, 1, qtime1 * once) + tdSql.query(f"select tbname,count(*) from {self.stable2} group by tbname;") + tdSql.checkRows(1) + tdSql.checkData(0, 1, qtime2 * once) print("check stable success") def run(self): @@ -194,8 +178,10 @@ class TDTestCase: self.reset_tb() self.test_stable_csv() self.test_bigcsv() - self.test_mix() self.check() + self.test_mix() + os.system(f"rm -rf {self.file1}") + os.system(f"rm -rf {self.file2}") tdSql.close() def stop(self): diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py index d64d550bc4..0eabd91535 100644 --- a/tests/system-test/2-query/abs.py +++ b/tests/system-test/2-query/abs.py @@ -17,7 +17,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.tb_nums = 10 self.row_nums = 20 self.ts = 1434938400000 diff --git a/tests/system-test/2-query/and_or_for_byte.py b/tests/system-test/2-query/and_or_for_byte.py index 15e9110b3b..5b2fb51998 100644 --- a/tests/system-test/2-query/and_or_for_byte.py +++ b/tests/system-test/2-query/and_or_for_byte.py @@ -17,7 +17,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.tb_nums = 10 self.row_nums = 20 self.ts = 1434938400000 diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py index e190bf9374..ee6835bd10 100644 --- a/tests/system-test/2-query/apercentile.py +++ b/tests/system-test/2-query/apercentile.py @@ -21,7 +21,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(),False) + tdSql.init(conn.cursor(),True) self.rowNum = 10 self.ts = 1537146000000 self.setsql = TDSetSql() diff --git a/tests/system-test/2-query/backslash_g.py b/tests/system-test/2-query/backslash_g.py new file mode 100644 index 0000000000..69b06289c3 --- /dev/null +++ b/tests/system-test/2-query/backslash_g.py @@ -0,0 +1,72 @@ + +import taos + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def checksql(self, sql): + result = os.popen(f"taos -s \"{sql}\" ") + res = result.read() + print(res) + if ("Query OK" in res): + tdLog.info(f"checkEqual success") + else : + tdLog.exit(f"checkEqual error") + + def td_28164(self): + tdSql.execute("drop database if exists td_28164;") + tdSql.execute("create database td_28164;") + tdSql.execute("create table td_28164.test (ts timestamp, name varchar(10));") + tdSql.execute("insert into td_28164.test values (now(), 'ac\\\\G') (now() + 1s, 'ac\\\\G') (now()+2s, 'ac\\G') (now()+3s, 'acG') (now()+4s, 'acK') ;") + + tdSql.query(f"select * from td_28164.test;") + tdSql.checkRows(5) + + tdSql.query(f"select * from td_28164.test where name like 'ac\\\\\\G';") + tdSql.checkRows(2) + + tdSql.query(f"select * from td_28164.test where name like 'ac\\\\G';") + tdSql.checkRows(2) + + tdSql.query(f"select * from td_28164.test where name like 'ac\\G';") + tdSql.checkRows(2) + + # tdSql.query(f"select * from td_28164.test where name like 'ac\\\g';") + # tdSql.checkRows(0) + # + # tdSql.query(f"select * from td_28164.test where name like 'ac\\g';") + # tdSql.checkRows(0) + + self.checksql(f'select * from td_28164.test where name like \'ac\\G\'\G;') + # tdSql.checkRows(2) + + self.checksql(f"select * from td_28164.test where name like \'ac\\G\' \G;") + # tdSql.checkRows(2) + + tdSql.query(f"select * from td_28164.test where name like 'ac/\\G';") + tdSql.checkRows(0) + + tdSql.query(f"select * from td_28164.test where name like 'ac/G';") + tdSql.checkRows(0) + + def run(self): + # tdSql.prepare() + self.td_28164() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/blockSMA.py b/tests/system-test/2-query/blockSMA.py index 85c0189e27..605542cb3c 100644 --- a/tests/system-test/2-query/blockSMA.py +++ b/tests/system-test/2-query/blockSMA.py @@ -16,7 +16,7 @@ class TDTestCase: def run(self): dbname = "db" - tdSql.prepare() + tdSql.prepare(dbname=dbname, drop=True, stt_trigger=1) tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') diff --git a/tests/system-test/2-query/case_when.py b/tests/system-test/2-query/case_when.py index eebbcc3fa3..1ccd2b5076 100755 --- a/tests/system-test/2-query/case_when.py +++ b/tests/system-test/2-query/case_when.py @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor()) self.testcasePath = os.path.split(__file__)[0] self.testcaseFilename = os.path.split(__file__)[-1] diff --git a/tests/system-test/2-query/check_tsdb.py b/tests/system-test/2-query/check_tsdb.py index b3ff259cc5..c49c4f1481 100644 --- a/tests/system-test/2-query/check_tsdb.py +++ b/tests/system-test/2-query/check_tsdb.py @@ -15,7 +15,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepare_datas(self, dbname="db"): tdSql.execute( diff --git a/tests/system-test/2-query/compact-col.py b/tests/system-test/2-query/compact-col.py new file mode 100644 index 0000000000..0a4614d710 --- /dev/null +++ b/tests/system-test/2-query/compact-col.py @@ -0,0 +1,71 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-] + ''' + return + + def init(self, conn, logSql, replicaVer=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + self.conn = conn + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use tbname_vgroup") + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists tbname_vgroup") + tdSql.execute("create database if not exists tbname_vgroup") + tdSql.execute('use tbname_vgroup') + tdSql.execute('drop database if exists dbvg') + tdSql.execute('create database dbvg vgroups 8;') + + tdSql.execute('use dbvg;') + + tdSql.execute('create table st(ts timestamp, f int) tags (t int);') + + tdSql.execute("insert into ct1 using st tags(1) values('2021-04-19 00:00:01', 1)") + + tdSql.execute("insert into ct2 using st tags(2) values('2021-04-19 00:00:02', 2)") + + tdSql.execute("insert into ct3 using st tags(3) values('2021-04-19 00:00:03', 3)") + + tdSql.execute("insert into ct4 using st tags(4) values('2021-04-19 00:00:04', 4)") + + + tdSql.execute('create table st2(ts timestamp, f int) tags (t int);') + + tdSql.execute("insert into ct21 using st2 tags(1) values('2021-04-19 00:00:01', 1)") + + tdSql.execute("insert into ct22 using st2 tags(2) values('2021-04-19 00:00:02', 2)") + + tdSql.execute("insert into ct23 using st2 tags(3) values('2021-04-19 00:00:03', 3)") + + tdSql.execute("insert into ct24 using st2 tags(4) values('2021-04-19 00:00:04', 4)") + + + col_names = tdSql.getColNameList("compact database tbname_vgroup") + if col_names[0] != "result": + raise Exception("first column name of compact result shall be result") + + col_names = tdSql.getColNameList("show variables") + if col_names[0] != "name": + raise Exception("first column name of compact result shall be name") + + tdSql.execute('drop database tbname_vgroup') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index 40d9b3ff8b..c06ee28d02 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -6,7 +6,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(),False) + tdSql.init(conn.cursor(),True) self.setsql = TDSetSql() self.rowNum = 10 self.ts = 1537146000000 @@ -57,8 +57,11 @@ class TDTestCase: tdSql.query(f'select count(*) from {self.stbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) + rows = [2, 0] function_names = ['count', 'hyperloglog'] - for function_name in function_names: + for i in range(2): + function_name = function_names[i] + row = rows[i] tdSql.query(f'select {function_name}(tbname) from {self.stbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -93,17 +96,17 @@ class TDTestCase: tdSql.query(f'select sum(1),max(c2),min(1),leastsquares(c1,1,1) from {self.stbname}') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} group by tbname') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} group by c1') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} group by t0') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by tbname') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by c1') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by t0') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(1) from (select {function_name}(c1),sum(c1) from {self.stbname} group by c1)') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -113,17 +116,24 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by c1 interval(1s)') tdSql.checkRows(0) - tdSql.query(f'select {function_name}(1),sum(1) from (select {function_name}(1) from {self.stbname} group by tbname)') + tdSql.query(f'select {function_name}(1),sum(1) from (select {function_name}(1) from {self.stbname} group by tbname order by tbname)') tdSql.checkRows(1) - tdSql.checkData(0, 0, 0) - tdSql.checkData(0, 1, None) + if 'count' == function_name: + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, 2) + elif 'hyperloglog' == function_name: + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) def query_empty_ntb(self): tdSql.query(f'select count(*) from {self.ntbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) + rows = [1, 0] function_names = ['count', 'hyperloglog'] - for function_name in function_names: + for i in range(2): + function_name = function_names[i] + row = rows[i] tdSql.query(f'select {function_name}(tbname) from {self.ntbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -158,7 +168,7 @@ class TDTestCase: tdSql.query(f'select sum(1),max(c2),min(1),leastsquares(c1,1,1) from {self.ntbname}') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.ntbname} group by tbname') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.ntbname} group by c1') tdSql.checkRows(0) tdSql.query(f'select {function_name}(1) from (select {function_name}(c1),sum(c1) from {self.ntbname} group by c1)') @@ -170,10 +180,11 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.ntbname} partition by c1 interval(1s)') tdSql.checkRows(0) - tdSql.query(f'select count(1),sum(1) from (select count(1) from {self.ntbname} group by tbname)') + tdSql.query(f'select count(1),sum(1) from (select count(1) from {self.ntbname} group by tbname order by tbname)') tdSql.checkRows(1) - tdSql.checkData(0, 0, 0) - tdSql.checkData(0, 1, None) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 1) + def count_query_stb(self,column_dict,tag_dict,stbname,tbnum,rownum): tdSql.query(f'select count(tbname) from {stbname}') tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum) diff --git a/tests/system-test/2-query/countAlwaysReturnValue.py b/tests/system-test/2-query/countAlwaysReturnValue.py index 244964e486..f22fa3e01e 100644 --- a/tests/system-test/2-query/countAlwaysReturnValue.py +++ b/tests/system-test/2-query/countAlwaysReturnValue.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepare_data(self, dbname="db"): tdSql.execute( diff --git a/tests/system-test/2-query/count_interval.py b/tests/system-test/2-query/count_interval.py new file mode 100644 index 0000000000..b37cc1db22 --- /dev/null +++ b/tests/system-test/2-query/count_interval.py @@ -0,0 +1,60 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +import random + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use d") + + def run(self): + tdSql.execute("drop database if exists d"); + tdSql.execute("create database d"); + tdSql.execute("use d"); + tdSql.execute("create table st(ts timestamp, f int) tags (t int)") + + for i in range(-2048, 2047): + ts = 1626624000000 + i; + tdSql.execute(f"insert into ct1 using st tags(1) values({ts}, {i})") + + tdSql.execute("flush database d") + for i in range(1638): + ts = 1648758398208 + i + tdSql.execute(f"insert into ct1 using st tags(1) values({ts}, {i})") + tdSql.execute("insert into ct1 using st tags(1) values(1648970742528, 1638)") + tdSql.execute("flush database d") + + tdSql.query("select count(ts) from ct1 interval(17n, 5n)") + self.restartTaosd() + tdSql.query("select count(ts) from ct1 interval(17n, 5n)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/count_partition.py b/tests/system-test/2-query/count_partition.py index f59376a979..e970b00cec 100644 --- a/tests/system-test/2-query/count_partition.py +++ b/tests/system-test/2-query/count_partition.py @@ -66,9 +66,9 @@ class TDTestCase: tdSql.checkRows(self.row_nums+1) tdSql.query(f"select count(c1) , count(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname") - tdSql.checkRows(2) + tdSql.checkRows(10) tdSql.query(f"select count(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname") - tdSql.checkRows(2) + tdSql.checkRows(10) tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index 0246626e40..07514a0950 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def case1(self): tdSql.execute("create database if not exists dbms precision 'ms'") @@ -52,7 +52,7 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, i + 1) tdSql.checkData(0, 1, 'debugFlag') - tdSql.checkData(0, 2, 0) + tdSql.checkData(0, 2, 131) tdSql.query("show dnode 1 variables like '%debugFlag'") tdSql.checkRows(23) diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index 10e16a690f..15f73344d3 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -88,12 +88,12 @@ class TDTestCase: tdSql.execute( f"create table {dbname}.ntb_null(ts timestamp,c1 int,c2 double,c3 float,c4 bool)") tdSql.execute(f"insert into {dbname}.ntb_null values(now, 1, 1.0, NULL, NULL)") - tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 2.0, 2.0, NULL)") - tdSql.execute(f"insert into {dbname}.ntb_null values(now, 2, NULL, NULL, false)") - tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 1.0, 1.0, NULL)") - tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 3.0, NULL, true)") - tdSql.execute(f"insert into {dbname}.ntb_null values(now, 3, NULL, 3.0, NULL)") - tdSql.execute(f"insert into {dbname}.ntb_null values(now, 1, NULL, NULL, true)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now+1s, NULL, 2.0, 2.0, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now+2s, 2, NULL, NULL, false)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now+3s, NULL, 1.0, 1.0, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now+4s, NULL, 3.0, NULL, true)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now+5s, 3, NULL, 3.0, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now+6s, 1, NULL, NULL, true)") tdSql.query(f"select diff(c1) from {dbname}.ntb_null") tdSql.checkRows(6) diff --git a/tests/system-test/2-query/fill_with_group.py b/tests/system-test/2-query/fill_with_group.py index b442647ff4..05be2ec9d2 100644 --- a/tests/system-test/2-query/fill_with_group.py +++ b/tests/system-test/2-query/fill_with_group.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: @@ -144,10 +144,61 @@ class TDTestCase: tdSql.query(sql) tdSql.checkRows(6) + def test_fill_with_order_by2(self): + ## window size: 5 minutes, with 6 rows in meters every 10 minutes + sql = "select _wstart, count(*) from meters where ts >= '2018-09-20 00:00:00.000' and ts < '2018-09-20 01:00:00.000' interval(5m) fill(prev) order by _wstart asc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + tdSql.checkData(0, 1, 10) + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 1, 10) + tdSql.checkData(3, 1, 10) + tdSql.checkData(4, 1, 10) + tdSql.checkData(5, 1, 10) + tdSql.checkData(6, 1, 10) + tdSql.checkData(7, 1, 10) + tdSql.checkData(8, 1, 10) + tdSql.checkData(9, 1, 10) + tdSql.checkData(10, 1, 10) + tdSql.checkData(11, 1, 10) + + sql = "select _wstart, count(*) from meters where ts >= '2018-09-20 00:00:00.000' and ts < '2018-09-20 01:00:00.000' interval(5m) fill(prev) order by _wstart desc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 1, 10) + tdSql.checkData(3, 1, 10) + tdSql.checkData(4, 1, 10) + tdSql.checkData(5, 1, 10) + tdSql.checkData(6, 1, 10) + tdSql.checkData(7, 1, 10) + tdSql.checkData(8, 1, 10) + tdSql.checkData(9, 1, 10) + tdSql.checkData(10, 1, 10) + tdSql.checkData(11, 1, 10) + + sql = "select _wstart, count(*) from meters where ts >= '2018-09-20 00:00:00.000' and ts < '2018-09-20 01:00:00.000' interval(5m) fill(linear) order by _wstart desc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 1, 10) + tdSql.checkData(3, 1, 10) + tdSql.checkData(4, 1, 10) + tdSql.checkData(5, 1, 10) + tdSql.checkData(6, 1, 10) + tdSql.checkData(7, 1, 10) + tdSql.checkData(8, 1, 10) + tdSql.checkData(9, 1, 10) + tdSql.checkData(10, 1, 10) + tdSql.checkData(11, 1, 10) + def run(self): self.prepareTestEnv() self.test_partition_by_with_interval_fill_prev_new_group_fill_error() self.test_fill_with_order_by() + self.test_fill_with_order_by2() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/func_to_char_timestamp.py b/tests/system-test/2-query/func_to_char_timestamp.py index d955e00a82..034d025fcd 100644 --- a/tests/system-test/2-query/func_to_char_timestamp.py +++ b/tests/system-test/2-query/func_to_char_timestamp.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py index 44b86ee543..e5056b7c56 100644 --- a/tests/system-test/2-query/function_null.py +++ b/tests/system-test/2-query/function_null.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.tb_nums = 10 self.row_nums = 20 self.ts = 1434938400000 diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py new file mode 100644 index 0000000000..e228351f0e --- /dev/null +++ b/tests/system-test/2-query/group_partition.py @@ -0,0 +1,215 @@ +# author : bobliu +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.row_nums = 10 + self.tb_nums = 10 + self.ts = 1537146000000 + self.dbname = "db" + self.stable = "stb" + + def prepare_db(self): + tdSql.execute(f" use {self.dbname} ") + tdSql.execute(f" create stable {self.dbname}.{self.stable} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ + uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\ + , t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ") + + for i in range(self.tb_nums): + tbname = f"{self.dbname}.sub_{self.stable}_{i}" + ts = self.ts + i*10000 + tdSql.execute(f"create table {tbname} using {self.dbname}.{self.stable} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") + + def insert_db(self, tb_nums, row_nums): + for i in range(tb_nums): + tbname = f"{self.dbname}.sub_{self.stable}_{i}" + ts_base = self.ts + i*10000 + for row in range(row_nums): + ts = ts_base + row*1000 + tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )") + + + def test_groupby(self, keyword, check_num, nonempty_tb_num): + ####### by tbname + tdSql.query(f"select count(*), count(1), count(c1) from {self.dbname}.{self.stable} {keyword} by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} {keyword} by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname from {self.dbname}.{self.stable} {keyword} by tbname order by count(*)") + tdSql.checkRows(check_num) + + # last + tdSql.query(f"select last(ts), count(*) from {self.dbname}.{self.stable} {keyword} by tbname order by last(ts)") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname from {self.dbname}.{self.stable} {keyword} by tbname having count(*)>=0") + tdSql.checkRows(check_num) + + # having filter out empty + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} {keyword} by tbname having count(*) <= 0") + tdSql.checkRows(check_num - nonempty_tb_num) + + ####### by tag + tdSql.query(f"select t2, count(*), count(1), count(c1) from {self.dbname}.{self.stable} {keyword} by t2 ") + tdSql.checkRows(check_num) + + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} {keyword} by t2 having count(*) <= 0") + tdSql.checkRows(check_num - nonempty_tb_num) + + # where + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now {keyword} by t2 ") + tdSql.checkRows(check_num) + + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 {keyword} by t2 ") + tdSql.checkRows(check_num) + + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 {keyword} by t2 ") + tdSql.checkRows(check_num) + + ####### by col + tdSql.query(f"select c1, count(*), count(1), count(c1) from {self.dbname}.{self.stable} {keyword} by c1 ") + num = 0 + if nonempty_tb_num > 0: + num = self.row_nums + tdSql.checkRows(num) + + tdSql.query(f"select ts, count(*) from {self.dbname}.{self.stable} {keyword} by ts ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) + + # col + tag + tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} {keyword} by t2, c1 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) + + tdSql.query(f"select t2, t3, c1, count(*) from {self.dbname}.{self.stable} {keyword} by t2, t3, c1 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) + + + def test_multi_group_key(self, check_num, nonempty_tb_num): + # multi tag/tbname + tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} group by t2, t3, tbname") + tdSql.checkRows(check_num) + + tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} partition by t2, t3, tbname") + tdSql.checkRows(check_num) + + # multi tag + col + tdSql.query(f"select t1, t2, c1, count(*) from {self.dbname}.{self.stable} partition by t1, t2, c1 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) + + # tag + multi col + tdSql.query(f"select t2, c1, c2, count(*) from {self.dbname}.{self.stable} partition by t2, c1, c2 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) + + + def test_multi_agg(self, all_tb_num, nonempty_tb_num): + tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(all_tb_num) + + tdSql.query(f"select count(1), sum(1), avg(c1), apercentile(c1, 50), spread(ts) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(all_tb_num) + + tdSql.query(f"select count(c1), sum(c1), min(c1), mode(c1), stddev(c1), spread(c1) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(all_tb_num) + + # elapsed: continuous duration in a statistical period, table merge scan + tdSql.query(f" select count(c1), max(c5), last_row(c5), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} group by tbname") + tdSql.checkRows(all_tb_num) + + tdSql.query(f" select count(c1), min(c1), avg(c1), elapsed(ts), mode(c1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(all_tb_num) + + tdSql.query(f" select count(c1), elapsed(ts), twa(c1), irate(c1), leastsquares(c1,1,1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(all_tb_num) + + tdSql.query(f" select avg(c1), elapsed(ts), twa(c1), irate(c1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(nonempty_tb_num) + + # if nonempty_tb_num > 0: + # tdSql.query(f" select avg(c1), percentile(c1, 50) from {self.dbname}.sub_{self.stable}_1") + # tdSql.checkRows(1) + + def test_innerSelect(self, check_num): + tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} group by tbname) ") + tdSql.checkRows(check_num) + + tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} partition by tbname) ") + tdSql.checkRows(check_num) + + tdSql.query(f"select t1, c from (select t1, sum(c1) as s, count(*) as c from {self.dbname}.{self.stable} partition by t1)") + tdSql.checkRows(check_num) + + + def test_window(self, nonempty_tb_num): + # empty group optimization condition is not met + # time window + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname interval(1d)") + tdSql.checkRows(nonempty_tb_num) + + tdSql.query(f"select _wstart, _wend, count(c1), max(c1), apercentile(c1, 50) from {self.dbname}.{self.stable} partition by tbname interval(1d)") + tdSql.checkRows(nonempty_tb_num) + + # state window + tdSql.query(f"select tbname, count(*), c1 from {self.dbname}.{self.stable} partition by tbname state_window(c1)") + tdSql.checkRows(nonempty_tb_num * self.row_nums) + + # session window + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname session(ts, 5s)") + tdSql.checkRows(nonempty_tb_num) + + # event window + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname event_window start with c1 >= 0 end with c2 = 9;") + tdSql.checkRows(nonempty_tb_num) + + + + def test_error(self): + tdSql.error(f"select * from {self.dbname}.{self.stable} group by t2") + tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") + tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 interval(1d)") + + + def run(self): + tdSql.prepare() + self.prepare_db() + # empty table only + self.test_groupby('group', self.tb_nums, 0) + self.test_groupby('partition', self.tb_nums, 0) + self.test_innerSelect(self.tb_nums) + self.test_multi_group_key(self.tb_nums, 0) + self.test_multi_agg(self.tb_nums, 0) + self.test_window(0) + + # insert data to 5 tables + nonempty_tb_num = 5 + self.insert_db(nonempty_tb_num, self.row_nums) + + self.test_groupby('group', self.tb_nums, nonempty_tb_num) + self.test_groupby('partition', self.tb_nums, nonempty_tb_num) + self.test_innerSelect(self.tb_nums) + self.test_multi_group_key(self.tb_nums, nonempty_tb_num) + self.test_multi_agg(self.tb_nums, nonempty_tb_num) + self.test_window(nonempty_tb_num) + + ## test old version before changed + # self.test_groupby('group', 0, 0) + # self.insert_db(5, self.row_nums) + # self.test_groupby('group', 5, 5) + + self.test_error() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/histogram.py b/tests/system-test/2-query/histogram.py index cc6e8fe7e9..a69fb505d1 100644 --- a/tests/system-test/2-query/histogram.py +++ b/tests/system-test/2-query/histogram.py @@ -147,7 +147,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def __create_hsg(self, sma:Hsgschema): return f"""{sma.histogram_flag}({sma.col}, '{sma.bin_type}', '{sma.bin_desc}', {sma.normalized})""" diff --git a/tests/system-test/2-query/interval_limit_opt.py b/tests/system-test/2-query/interval_limit_opt.py index 492f453de5..aa1702fe3c 100644 --- a/tests/system-test/2-query/interval_limit_opt.py +++ b/tests/system-test/2-query/interval_limit_opt.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: diff --git a/tests/system-test/2-query/interval_limit_opt_2.py b/tests/system-test/2-query/interval_limit_opt_2.py index cadb32b388..cfabe46dca 100644 --- a/tests/system-test/2-query/interval_limit_opt_2.py +++ b/tests/system-test/2-query/interval_limit_opt_2.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: diff --git a/tests/system-test/2-query/interval_unit.py b/tests/system-test/2-query/interval_unit.py index 9430c74cff..03917bfaf7 100644 --- a/tests/system-test/2-query/interval_unit.py +++ b/tests/system-test/2-query/interval_unit.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d', precision: str='ms'): if dropFlag == 1: diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index d976edb49c..82841541f0 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.tb_nums = 10 self.row_nums = 20 self.ts = 1434938400000 @@ -209,7 +209,7 @@ class TDTestCase: tdSql.error(f"select irate(c1), abs(c1) from {dbname}.ct4 ") # agg functions mix with agg functions - tdSql.query(f"select irate(c1), count(c5) from {dbname}.stb1 partition by tbname order by tbname") + tdSql.query(f"select irate(c1), count(c5) from {dbname}.stb1 partition by tbname having count(c5)>0 order by tbname") tdSql.checkData(0, 0, 0.000000000) tdSql.checkData(1, 0, 0.000000000) tdSql.checkData(0, 1, 13) diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index 120cc2eb30..c5c8f6c730 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -66,7 +66,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def __query_condition(self,tbname): query_condition = [] diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py index 7ddc5e57ba..1006033eae 100644 --- a/tests/system-test/2-query/join2.py +++ b/tests/system-test/2-query/join2.py @@ -29,7 +29,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def __query_condition(self,tbname): query_condition = [] diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index 8db92f38a2..33209620e1 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -16,7 +16,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def run(self): # tdSql.prepare() diff --git a/tests/system-test/2-query/json_tag_large_tables.py b/tests/system-test/2-query/json_tag_large_tables.py index ea3963640a..86bd6c2d82 100644 --- a/tests/system-test/2-query/json_tag_large_tables.py +++ b/tests/system-test/2-query/json_tag_large_tables.py @@ -36,7 +36,7 @@ class TDTestCase: self.testcaseFilename = os.path.split(__file__)[-1] # os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def run(self): # tdSql.prepare() diff --git a/tests/system-test/2-query/last.py b/tests/system-test/2-query/last.py index f4a1ab1790..a160aeab11 100644 --- a/tests/system-test/2-query/last.py +++ b/tests/system-test/2-query/last.py @@ -289,6 +289,82 @@ class TDTestCase: tdSql.query(f"select * from {stbname}") tdSql.checkRows(tables * rows) + def check_explain_res_has_row(self, plan_str_expect: str, rows, sql): + plan_found = False + for row in rows: + if str(row).find(plan_str_expect) >= 0: + tdLog.debug("plan: [%s] found in: [%s]" % (plan_str_expect, str(row))) + plan_found = True + break + if not plan_found: + tdLog.exit("plan: %s not found in res: [%s] in sql: %s" % (plan_str_expect, str(rows), sql)) + + def check_explain_res_no_row(self, plan_str_not_expect: str, res, sql): + for row in res: + if str(row).find(plan_str_not_expect) >= 0: + tdLog.exit('plan: [%s] found in: [%s] for sql: %s' % (plan_str_not_expect, str(row), sql)) + + def explain_sql(self, sql: str): + sql = "explain " + sql + tdSql.query(sql, queryTimes=1) + return tdSql.queryResult + + def last_check_scan_type(self, cacheModel): + tdSql.execute("create database test_last_tbname cachemodel '%s';" % cacheModel) + tdSql.execute("use test_last_tbname;") + tdSql.execute("create stable test_last_tbname.st(ts timestamp, id int) tags(tid int);") + tdSql.execute("create table test_last_tbname.test_t1 using test_last_tbname.st tags(1);") + + maxRange = 100 + # 2023-11-13 00:00:00.000 + startTs = 1699804800000 + for i in range(maxRange): + insertSqlString = "insert into test_last_tbname.test_t1 values(%d, %d);" % (startTs + i, i) + tdSql.execute(insertSqlString) + + last_ts = startTs + maxRange + tdSql.execute("insert into test_last_tbname.test_t1 (ts) values(%d)" % (last_ts)) + sql = f'select tbname, last(ts) from test_last_tbname.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, "test_t1") + tdSql.checkData(0, 1, last_ts) + + explain_res = self.explain_sql(sql) + if cacheModel == "both" or cacheModel == "last_value": + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + else: + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + + sql = f'select last(ts), tbname from test_last_tbname.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, "test_t1") + + explain_res = self.explain_sql(sql) + if cacheModel == "both" or cacheModel == "last_value": + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + else: + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select tbname, last(ts), tbname from test_last_tbname.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, "test_t1") + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, "test_t1") + + explain_res = self.explain_sql(sql) + if cacheModel == "both" or cacheModel == "last_value": + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + else: + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + tdSql.execute("drop table if exists test_last_tbname.test_t1 ;") + tdSql.execute("drop stable if exists test_last_tbname.st;") + tdSql.execute("drop database if exists test_last_tbname;") def run(self): self.last_check_stb_tb_base() @@ -296,6 +372,11 @@ class TDTestCase: self.last_check_stb_distribute() self.last_file_check() + self.last_check_scan_type("none") + self.last_check_scan_type("last_row") + self.last_check_scan_type("last_value") + self.last_check_scan_type("both") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/last_cache_scan.py b/tests/system-test/2-query/last_cache_scan.py index 01795f6eef..3881607437 100644 --- a/tests/system-test/2-query/last_cache_scan.py +++ b/tests/system-test/2-query/last_cache_scan.py @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: @@ -181,7 +181,7 @@ class TDTestCase: select_items = [ "last(ts), ts", "last(ts), c1", "last(ts), c2", "last(ts), c3",\ "last(ts), c4", "last(ts), tbname", "last(ts), t1", "last(ts), ts, ts"] - has_last_row_scan_res = [1, 0, 0, 0, 0, 0, 0, 1] + has_last_row_scan_res = [1, 0, 0, 0, 0, 1, 1, 1] res_expect = [ ["2018-11-25 19:30:00.000", "2018-11-25 19:30:00.000"], None, None, None, None, None, None, @@ -193,7 +193,7 @@ class TDTestCase: select_items = ["last(c1),ts", "last(c1), c1", "last(c1), c2", "last(c1), c3",\ "last(c1), c4", "last(c1), tbname", "last(c1), t1", "last(c1), ts, ts", "last(c1), c1, c1"] - has_last_row_scan_res = [1, 1, 0, 0, 0, 0, 0, 1, 1] + has_last_row_scan_res = [1, 1, 0, 0, 0, 1, 1, 1, 1] res_expect = [ [999, "2018-11-25 19:30:00.000"], [999, 999], None, None, None, None, None, @@ -207,7 +207,7 @@ class TDTestCase: sql_template = 'select %s from t1' select_items = ["last(c4),ts", "last(c4), c1", "last(c4), c2", "last(c4), c3",\ "last(c4), c4", "last(c4), tbname", "last(c4), t1"] - has_last_row_scan_res = [1, 0, 0, 0, 1, 0, 0] + has_last_row_scan_res = [1, 0, 0, 0, 1, 1, 1] res_expect = [ [4999.000000000000000, "2018-11-25 19:30:00.000"], None,None,None, @@ -220,7 +220,7 @@ class TDTestCase: sql_template = 'select %s from meters' select_items = ["last(c8), ts", "last(c8), c1", "last(c8), c8", "last(c8), tbname", \ "last(c8), t1", "last(c8), c8, c8", "last(c8), ts, ts"] - has_last_row_scan_res = [1, 0, 1, 0, 0, 1, 1] + has_last_row_scan_res = [1, 0, 1, 1, 1, 1, 1] res_expect = [ ["binary9999", "2018-11-25 19:30:00.000"], None, @@ -284,6 +284,8 @@ class TDTestCase: tdSql.checkData(0, 3, 1001) tdSql.checkData(0, 4, "2018-11-25 19:30:00.000") + tdSql.query("select last(ts) from meters partition by tbname") + tdSql.query("select last(ts) from meters partition by t1") sql_template = 'select %s from meters partition by tbname' select_items = ["ts, last(c10), c10, ts", "ts, ts, last(c10), c10, tbname", "last(c10), c10, ts"] has_last_row_scan_res = [1,1,1] @@ -339,44 +341,12 @@ class TDTestCase: tdSql.checkData(0, 0, '999') p = subprocess.run(["taos", '-s', "alter table test.meters drop column c1; alter table test.meters add column c12 int"]) p.check_returncode() - tdSql.query("select last(c1) from meters", queryTimes=1) - tdSql.checkData(0, 0, None) - tdSql.query('select last(*) from meters', queryTimes=1) - print(str(tdSql.queryResult)) - tdSql.checkData(0, 1, None) - tdSql.query('select last(c1), c1, ts from meters', queryTimes=1) - tdSql.checkRows(1) - tdSql.checkData(0, 0, None) - tdSql.checkData(0, 1, None) - tdSql.checkData(0, 2, None) - - try: - tdSql.query('select ts, last(c1), c1, ts, c1 from meters', queryTimes=1) - except Exception as e: - if str(e).count('Invalid column name') == 1: - print('column has been dropped, the cache has been updated: %s' % (str(e))) - return - else: - raise - tdSql.checkRows(1) - tdSql.checkCols(5) - tdSql.checkData(0, 0, None) - tdSql.checkData(0, 1, None) - tdSql.checkData(0, 2, None) - tdSql.checkData(0, 3, None) - tdSql.checkData(0, 4, None) - - try: - tdSql.query('select last(c1), last(c2), last(c3) from meters', queryTimes=1) - except Exception as e: - if str(e).count('Invalid column name') == 1: - print('column has been dropped, the cache has been updated: %s' % (str(e))) - return - else: - raise + tdSql.query_success_failed("select ts, last(c1), c1, ts, c1 from meters", queryTimes=10, expectErrInfo="Invalid column name: c1") + tdSql.query('select last(c12), c12, ts from meters', queryTimes=1) tdSql.checkRows(1) tdSql.checkCols(3) tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, None) def test_cache_scan_with_drop_column(self): tdSql.query('select last(*) from meters') @@ -401,49 +371,48 @@ class TDTestCase: tdSql.checkData(0, 10, None) def test_cache_scan_last_row_with_drop_column2(self): - tdSql.query('select last_row(c1) from meters') + tdSql.query('select last_row(c2) from meters') print(str(tdSql.queryResult)) tdSql.checkCols(1) - p = subprocess.run(["taos", '-s', "alter table test.meters drop column c1; alter table test.meters add column c11 int"]) + p = subprocess.run(["taos", '-s', "alter table test.meters drop column c2; alter table test.meters add column c1 int"]) p.check_returncode() - tdSql.query('select last_row(c1) from meters', queryTimes=1) - print(str(tdSql.queryResult)) - tdSql.checkCols(1) + tdSql.query_success_failed("select ts, last(c2), c12, ts, c12 from meters", queryTimes=10, expectErrInfo="Invalid column name: c2") + tdSql.query('select last(c1), c1, ts from meters', queryTimes=1) + tdSql.checkRows(1) + tdSql.checkCols(3) tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, None) def test_cache_scan_last_row_with_partition_by(self): tdSql.query('select last(c1) from meters partition by t1') print(str(tdSql.queryResult)) tdSql.checkCols(1) - tdSql.checkRows(5) - p = subprocess.run(["taos", '-s', "alter table test.meters drop column c1; alter table test.meters add column c11 int"]) + tdSql.checkRows(2) + p = subprocess.run(["taos", '-s', "alter table test.meters drop column c1; alter table test.meters add column c2 int"]) p.check_returncode() - tdSql.query('select last_row(c1) from meters partition by t1', queryTimes=1) + tdSql.query_success_failed('select last(c1) from meters partition by t1', queryTimes=10, expectErrInfo="Invalid column name: c1") + tdSql.query('select last(c2), c2, ts from meters', queryTimes=1) print(str(tdSql.queryResult)) - tdSql.checkCols(1) - tdSql.checkRows(5) + tdSql.checkRows(1) + tdSql.checkCols(3) tdSql.checkData(0, 0, None) - tdSql.checkData(1, 0, None) - tdSql.checkData(2, 0, None) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) + tdSql.checkData(0, 1, None) + def test_cache_scan_last_row_with_partition_by_tbname(self): - tdSql.query('select last(c1) from meters partition by tbname', queryTimes=1) + tdSql.query('select last(c2) from meters partition by tbname') print(str(tdSql.queryResult)) tdSql.checkCols(1) tdSql.checkRows(10) - p = subprocess.run(["taos", '-s', "alter table test.meters drop column c1; alter table test.meters add column c11 int"]) + p = subprocess.run(["taos", '-s', "alter table test.meters drop column c2; alter table test.meters add column c1 int"]) p.check_returncode() - tdSql.query('select last_row(c1) from meters partition by tbname', queryTimes=1) + tdSql.query_success_failed('select last_row(c2) from meters partition by tbname', queryTimes=10, expectErrInfo="Invalid column name: c2") + tdSql.query('select last(c1), c1, ts from meters', queryTimes=1) print(str(tdSql.queryResult)) - tdSql.checkCols(1) - tdSql.checkRows(10) + tdSql.checkRows(1) + tdSql.checkCols(3) tdSql.checkData(0, 0, None) - tdSql.checkData(1, 0, None) - tdSql.checkData(2, 0, None) - tdSql.checkData(3, 0, None) - tdSql.checkData(4, 0, None) + tdSql.checkData(0, 1, None) @@ -455,9 +424,9 @@ class TDTestCase: self.test_cache_scan_with_drop_and_add_column2() #self.test_cache_scan_with_drop_column() #self.test_cache_scan_last_row_with_drop_column() - #self.test_cache_scan_last_row_with_drop_column2() - #self.test_cache_scan_last_row_with_partition_by() - #self.test_cache_scan_last_row_with_partition_by_tbname() + self.test_cache_scan_last_row_with_drop_column2() + self.test_cache_scan_last_row_with_partition_by() + self.test_cache_scan_last_row_with_partition_by_tbname() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index a6bcc2c5f1..6469b3ea54 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -435,7 +435,7 @@ class TDTestCase: tdSql.checkData(1,0,'ct4') tdSql.checkData(1,1,None) - tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 ") + tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 having count(c1)>0") tdSql.checkRows(2) # filter by tbname diff --git a/tests/system-test/2-query/like.py b/tests/system-test/2-query/like.py new file mode 100644 index 0000000000..2f165d46ea --- /dev/null +++ b/tests/system-test/2-query/like.py @@ -0,0 +1,190 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * +from util.common import tdCom + +class TDTestCase: + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def initDB(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + def stopTest(self): + tdSql.execute("drop database if exists db") + + def like_wildcard_test(self): + tdSql.execute("create table db.t1x (ts timestamp, c1 varchar(100))") + tdSql.execute("create table db.t_1x (ts timestamp, c1 varchar(100))") + + tdSql.query("select * from information_schema.ins_columns where table_name like '%1x'") + tdSql.checkRows(4) + + tdSql.query("select * from information_schema.ins_columns where table_name like '%\_1x'") + tdSql.checkRows(2) + + + tdSql.query("insert into db.t1x values(now, 'abc'), (now+1s, 'a%c'),(now+2s, 'a_c'),(now+3s, '_c'),(now+4s, '%c')") + + tdSql.query("select * from db.t1x") + tdSql.checkRows(5) + + tdSql.query("select * from db.t1x where c1 like '%_c'") + tdSql.checkRows(5) + + tdSql.query("select * from db.t1x where c1 like '%__c'") + tdSql.checkRows(3) + + tdSql.query("select * from db.t1x where c1 like '%\_c'") + tdSql.checkRows(2) + + tdSql.query("select * from db.t1x where c1 like '%\%c'") + tdSql.checkRows(2) + + tdSql.query("select * from db.t1x where c1 like '_\%c'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "a%c") + + tdSql.query("select * from db.t1x where c1 like '_\_c'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "a_c") + + tdSql.query("select * from db.t1x where c1 like '%%_c'") + tdSql.checkRows(5) + + tdSql.query("select * from db.t1x where c1 like '%_%c'") + tdSql.checkRows(5) + + tdSql.query("select * from db.t1x where c1 like '__%c'") + tdSql.checkRows(3) + + tdSql.query("select * from db.t1x where c1 not like '__%c'") + tdSql.checkRows(2) + + def like_cnc_wildcard_test(self): + tdSql.execute("create table db.t3x (ts timestamp, c1 varchar(100))") + + tdSql.execute("insert into db.t3x values(now, '我是中文'), (now+1s, '我是_中文'), (now+2s, '我是%中文'), (now+3s, '%中文'),(now+4s, '_中文')") + tdSql.query("select * from db.t3x") + tdSql.checkRows(5) + + tdSql.query("select * from db.t3x where c1 like '%中文'") + tdSql.checkRows(5) + + tdSql.query("select * from db.t3x where c1 like '%中_文'") + tdSql.checkRows(0) + + tdSql.query("select * from db.t3x where c1 like '%\%中文'") + tdSql.checkRows(2) + + tdSql.query("select * from db.t3x where c1 like '%\_中文'") + tdSql.checkRows(2) + + tdSql.query("select * from db.t3x where c1 like '_中文'") + tdSql.checkRows(2) + + tdSql.query("select * from db.t3x where c1 like '\_中文'") + tdSql.checkRows(1) + + def like_multi_wildcard_test(self): + tdSql.execute("create table db.t4x (ts timestamp, c1 varchar(100))") + + # 插入测试数据 + tdSql.execute("insert into db.t4x values(now, 'abc'), (now+1s, 'a%c'),(now+2s, 'a_c'),(now+3s, '_c'),(now+4s, '%c')") + tdSql.execute("insert into db.t4x values(now+5s, '%%%c'),(now+6s, '___c'),(now+7s, '%_%c'),(now+8s, '%\\c')") + + tdSql.query("select * from db.t4x where c1 like '%%%_'") + tdSql.checkRows(9) + + tdSql.query("select * from db.t4x where c1 like '\%\%\%_'") + tdSql.checkRows(1) + + tdSql.query("select * from db.t4x where c1 like '%\_%%'") + tdSql.checkRows(4) + + tdSql.query("select * from db.t4x where c1 like '_\%\%'") + tdSql.checkRows(0) + + tdSql.query("select * from db.t4x where c1 like '%abc%'") + tdSql.checkRows(1) + + tdSql.query("select * from db.t4x where c1 like '_%abc%'") + tdSql.checkRows(0) + + tdSql.query("select * from db.t4x where c1 like '\%%\%%'") + tdSql.checkRows(2) + + tdSql.query("select * from db.t4x where c1 like '\%\_%\%%'") + tdSql.checkRows(1) + + def like_wildcard_test2(self): + tdSql.execute("create table db.t5x (ts timestamp, c1 varchar(100))") + + tdSql.execute("insert into db.t5x values(now(), 'a\%c')") + tdSql.execute("insert into db.t5x values(now+1s, 'a\%bbbc')") + tdSql.execute("insert into db.t5x values(now()+2s, 'a%c')") + + tdSql.query("select * from db.t5x where c1 like 'a\%c'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "a%c") + + tdSql.execute("create table db.t6x (ts timestamp, c1 varchar(100))") + + tdSql.execute("insert into db.t6x values(now(), '\%c')") + tdSql.execute("insert into db.t6x values(now+1s, '\%bbbc')") + tdSql.execute("insert into db.t6x values(now()+2s, '%c')") + + tdSql.query("select * from db.t6x where c1 like '\%c'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "%c") + + tdSql.execute("create table db.t7x (ts timestamp, c1 varchar(100))") + + tdSql.execute("insert into db.t7x values(now(), 'a\_c')") + tdSql.execute("insert into db.t7x values(now+1s, 'a\_bbbc')") + tdSql.execute("insert into db.t7x values(now()+2s, 'a_c')") + + tdSql.query("select * from db.t7x where c1 like 'a\_c'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "a_c") + + tdSql.execute("create table db.t8x (ts timestamp, c1 varchar(100))") + + tdSql.execute("insert into db.t8x values(now(), '\_c')") + tdSql.execute("insert into db.t8x values(now+1s, '\_bbbc')") + tdSql.execute("insert into db.t8x values(now()+2s, '_c')") + + tdSql.query("select * from db.t8x where c1 like '\_c'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "_c") + + def run(self): + tdLog.printNoPrefix("==========start like_wildcard_test run ...............") + tdSql.prepare(replica = self.replicaVar) + + + self.initDB() + self.like_wildcard_test() + self.like_cnc_wildcard_test() + self.like_multi_wildcard_test() + self.like_wildcard_test2() + tdLog.printNoPrefix("==========end like_wildcard_test run ...............") + + self.stopTest() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/limit.py b/tests/system-test/2-query/limit.py index 961cff5087..e6736704f7 100644 --- a/tests/system-test/2-query/limit.py +++ b/tests/system-test/2-query/limit.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1): if dropFlag == 1: diff --git a/tests/system-test/2-query/nestedQueryInterval.py b/tests/system-test/2-query/nestedQueryInterval.py index e937774c4c..07b5519432 100644 --- a/tests/system-test/2-query/nestedQueryInterval.py +++ b/tests/system-test/2-query/nestedQueryInterval.py @@ -5,6 +5,8 @@ import socket import os import threading +from datetime import timezone, datetime + from util.log import * from util.sql import * from util.cases import * @@ -1111,265 +1113,201 @@ class TDTestCase: def TS_3932(self): tdLog.debug("test insert data into stable") - tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") + tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname having count(*)>0 order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 100); - tdSql.checkData(1, 1, 200); + tdSql.checkData(0, 1, 100) + tdSql.checkData(1, 1, 200) tdSql.query(f"insert into nested.stable_1 (ts,tbname) values(now,'stable_1_1');") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 101); - tdSql.checkData(1, 1, 200); + tdSql.checkRows(6) + tdSql.checkData(0, 1, 101) + tdSql.checkData(1, 1, 200) - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bigint) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_smallint) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_tinyint) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_float) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_double) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bool) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_binary) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_nchar) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_ts) values(now,'stable_1_1',1);") - tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 111); - tdSql.checkData(1, 1, 200); - - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bigint_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_smallint_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_tinyint_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_float_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_double_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bool_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_binary_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_nchar_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_ts_null) values(now,'stable_1_1',1);") - tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 121); - tdSql.checkData(1, 1, 200); + qlist = ['q_int', 'q_bigint', 'q_smallint', 'q_tinyint', 'q_float', 'q_double', 'q_bool', 'q_binary', 'q_nchar', 'q_ts'] + for i in range(10): + coulmn_name = qlist[i] + tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") + tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;",queryTimes=5) + tdSql.checkRows(6) + tdSql.checkData(0, 1, 111) + tdSql.checkData(1, 1, 200) + + q_null_list = ['q_int_null', 'q_bigint_null', 'q_smallint_null', 'q_tinyint_null', 'q_float_null', 'q_double_null', 'q_bool_null', 'q_binary_null', 'q_nchar_null', 'q_ts_null'] + for i in range(10): + coulmn_name = q_null_list[i] + tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") + tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;",queryTimes=5) + tdSql.checkRows(6) + tdSql.checkData(0, 1, 121) + tdSql.checkData(1, 1, 200) tdSql.query(f"insert into nested.stable_null_data (ts,tbname) values(now,'stable_null_data_1');") tdSql.query(f"select tbname,count(*) from nested.stable_null_data_1 group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 1); - - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bigint) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_smallint) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_tinyint) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_float) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_double) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bool) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_binary) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_nchar) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_ts) values(now,'stable_null_data_1',1);") + tdSql.checkData(0, 1, 1) + + for i in range(10): + coulmn_name = qlist[i] + tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_data_1',1);") + + tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;",queryTimes=5) + tdSql.checkRows(1) + tdSql.checkData(0, 1, 11) + + for i in range(10): + coulmn_name = q_null_list[i] + tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_data_1',1);") + tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 11); - - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bigint_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_smallint_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_tinyint_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_float_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_double_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bool_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_binary_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_nchar_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_ts_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 21); - + tdSql.checkData(0, 1, 21) tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname) values(now,'stable_null_childtable_1');") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 1); - - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bigint) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_smallint) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_tinyint) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_float) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_double) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bool) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_binary) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_nchar) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_ts) values(now,'stable_null_childtable_1',1);") + tdSql.checkData(0, 1, 1) + + for i in range(10): + coulmn_name = qlist[i] + tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_childtable_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 11); - - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bigint_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_smallint_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_tinyint_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_float_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_double_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bool_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_binary_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_nchar_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_ts_null) values(now,'stable_null_childtable_1',1);") + tdSql.checkData(0, 1, 11) + + for i in range(10): + coulmn_name = q_null_list[i] + tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_childtable_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 21); + tdSql.checkData(0, 1, 21) def TS_3932_flushdb(self): tdLog.debug("test flush db and insert data into stable") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 121); - tdSql.checkData(1, 1, 200); - + tdSql.checkRows(6) + tdSql.checkData(0, 1, 121) + tdSql.checkData(1, 1, 200) + + qlist = ['q_int', 'q_bigint', 'q_smallint', 'q_tinyint', 'q_float', 'q_double', 'q_bool', 'q_binary', 'q_nchar', 'q_ts'] + q_null_list = ['q_int_null', 'q_bigint_null', 'q_smallint_null', 'q_tinyint_null', 'q_float_null', 'q_double_null', 'q_bool_null', 'q_binary_null', 'q_nchar_null', 'q_ts_null'] tdSql.query(f"insert into nested.stable_1 (ts,tbname) values(now,'stable_1_1');") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 122); - tdSql.checkData(1, 1, 200); - - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bigint) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_smallint) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_tinyint) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_float) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_double) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bool) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_binary) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_nchar) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_ts) values(now,'stable_1_1',1);") + tdSql.checkRows(6) + tdSql.checkData(0, 1, 122) + tdSql.checkData(1, 1, 200) + + pd = datetime.datetime.now() + ts = int(datetime.datetime.timestamp(pd)*1000 - 10000) + print(f"start time {ts}") + + for i in range(10): + coulmn_name = qlist[i] + tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values({ts+i},'stable_1_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 132); - tdSql.checkData(1, 1, 200); - - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bigint_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_smallint_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_tinyint_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_float_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_double_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_bool_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_binary_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_nchar_null) values(now,'stable_1_1',1);") - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_ts_null) values(now,'stable_1_1',1);") + tdSql.checkRows(6) + tdSql.checkData(0, 1, 132) + tdSql.checkData(1, 1, 200) + + for i in range(10): + coulmn_name = q_null_list[i] + tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values({ts+i},'stable_1_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) - tdSql.checkData(0, 1, 142); - tdSql.checkData(1, 1, 200); + tdSql.checkRows(6) + tdSql.checkData(0, 1, 142) + tdSql.checkData(1, 1, 200) - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'stable_1_1',1) \ - nested.stable_1 (ts,tbname,q_bigint) values(now+1a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_smallint) values(now+2a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_tinyint) values(now+3a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_float) values(now+4a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_double) values(now+5a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_bool) values(now+6a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_binary) values(now+7a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_nchar) values(now+8a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_ts) values(now+9a,'stable_1_1',1);") + tdSql.execute(f"insert into nested.stable_1 (ts,tbname,q_int) values({ts},'stable_1_1',1) \ + nested.stable_1 (ts,tbname,q_bigint) values({ts+1},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_smallint) values({ts+2},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_tinyint) values({ts+3},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_float) values({ts+4},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_double) values({ts+5},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_bool) values({ts+6},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_binary) values({ts+7},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_nchar) values({ts+8},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_ts) values({ts+9},'stable_1_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 152); tdSql.checkData(1, 1, 200); - tdSql.query(f"insert into nested.stable_null_data (ts,tbname) values(now,'stable_null_data_1');") + tdSql.query(f"insert into nested.stable_null_data (ts,tbname) values({ts},'stable_null_data_1');") + ts = ts + 1 tdSql.query(f"select tbname,count(*) from nested.stable_null_data_1 group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 22); - - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bigint) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_smallint) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_tinyint) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_float) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_double) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bool) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_binary) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_nchar) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_ts) values(now,'stable_null_data_1',1);") + + for i in range(10): + coulmn_name = qlist[i] + tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_data_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 32); - - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bigint_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_smallint_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_tinyint_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_float_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_double_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_bool_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_binary_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_nchar_null) values(now,'stable_null_data_1',1);") - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_ts_null) values(now,'stable_null_data_1',1);") + tdSql.checkData(0, 1, 32) + + for i in range(10): + coulmn_name = q_null_list[i] + tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_data_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 42); + tdSql.checkData(0, 1, 42) - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int) values(now,'stable_null_data_1',1) \ - nested.stable_null_data (ts,tbname,q_bigint) values(now+1a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_smallint) values(now+2a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_tinyint) values(now+3a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_float) values(now+4a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_double) values(now+5a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_bool) values(now+6a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_binary) values(now+7a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_nchar) values(now+8a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_ts) values(now+9a,'stable_null_data_1',1);") + tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int) values({ts},'stable_null_data_1',1) \ + nested.stable_null_data (ts,tbname,q_bigint) values({ts+1},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_smallint) values({ts+2},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_tinyint) values({ts+3},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_float) values({ts+4},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_double) values({ts+5},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_bool) values({ts+6},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_binary) values({ts+7},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_nchar) values({ts+8},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_ts) values({ts+9},'stable_null_data_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 52); - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname) values(now,'stable_null_childtable_1');") + tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname) values({ts},'stable_null_childtable_1');") + ts = ts + 1 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 22); - - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bigint) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_smallint) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_tinyint) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_float) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_double) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bool) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_binary) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_nchar) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_ts) values(now,'stable_null_childtable_1',1);") + tdSql.checkData(0, 1, 22) + + for i in range(10): + coulmn_name = qlist[i] + tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_childtable_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 32); - - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bigint_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_smallint_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_tinyint_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_float_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_double_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_bool_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_binary_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_nchar_null) values(now,'stable_null_childtable_1',1);") - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_ts_null) values(now,'stable_null_childtable_1',1);") + tdSql.checkData(0, 1, 32) + + for i in range(10): + coulmn_name = q_null_list[i] + tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_childtable_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 42); - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int) values(now,'stable_null_childtable_1',1) \ - nested.stable_null_childtable (ts,tbname,q_bigint) values(now+1a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_smallint) values(now+2a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_tinyint) values(now+3a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_float) values(now+4a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_double) values(now+5a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_bool) values(now+6a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_binary) values(now+7a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_nchar) values(now+8a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_ts) values(now+9a,'stable_null_childtable_1',1);") + tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int) values({ts},'stable_null_childtable_1',1) \ + nested.stable_null_childtable (ts,tbname,q_bigint) values({ts+1},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_smallint) values({ts+2},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_tinyint) values({ts+3},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_float) values({ts+4},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_double) values({ts+5},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_bool) values({ts+6},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_binary) values({ts+7},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_nchar) values({ts+8},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_ts) values({ts+9},'stable_null_childtable_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 52); @@ -1408,7 +1346,7 @@ class TDTestCase: nested.stable_null_childtable (ts,tbname,q_ts) values(now+9a,'stable_null_childtable_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 162); tdSql.checkData(1, 1, 200); @@ -1422,12 +1360,12 @@ class TDTestCase: #test special character - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'!@!@$$^$',1) \ - nested.stable_null_data (ts,tbname,q_int) values(now,'%^$^&^&',1) \ - nested.stable_null_childtable (ts,tbname,q_int) values(now,'$^%$%^&',1);") + tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now+10a,'!@!@$$^$',1) \ + nested.stable_null_data (ts,tbname,q_int) values(now+10a,'%^$^&^&',1) \ + nested.stable_null_childtable (ts,tbname,q_int) values(now+10a,'$^%$%^&',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(3) + tdSql.checkRows(7) tdSql.checkData(0, 1, 1); tdSql.checkData(1, 1, 162); tdSql.checkData(2, 1, 200); @@ -1465,7 +1403,7 @@ class TDTestCase: nested.stable_null_childtable(tbname,ts,q_int,q_binary) file '{self.testcasePath}/stable_null_childtable.csv';") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(3) + tdSql.checkRows(7) tdSql.checkData(0, 1, 1); tdSql.checkData(1, 1, 162); tdSql.checkData(2, 1, 200); @@ -1501,7 +1439,7 @@ class TDTestCase: tdSql.query(f"insert into nested.stable_null_childtable(tbname,ts,q_int,q_binary) file '{self.testcasePath}/stable_null_childtable.csv';") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(3) + tdSql.checkRows(7) tdSql.checkData(0, 1, 1); tdSql.checkData(1, 1, 162); tdSql.checkData(2, 1, 200); diff --git a/tests/system-test/2-query/odbc.py b/tests/system-test/2-query/odbc.py index 8fbad93995..7daf5fd730 100644 --- a/tests/system-test/2-query/odbc.py +++ b/tests/system-test/2-query/odbc.py @@ -13,7 +13,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def check_ins_cols(self): tdSql.execute("create database if not exists db") diff --git a/tests/system-test/2-query/partition_by_col.py b/tests/system-test/2-query/partition_by_col.py index 230b7582d9..549e2738be 100644 --- a/tests/system-test/2-query/partition_by_col.py +++ b/tests/system-test/2-query/partition_by_col.py @@ -27,7 +27,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: @@ -169,6 +169,16 @@ class TDTestCase: self.check_explain_res_has_row("Partition on", self.explain_sql(sql)) self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint)) + sql = 'select count(*), c1 from meters partition by c1' + sql_hint = 'select /*+ sort_for_group() partition_first()*/ count(*), c1 from meters partition by c1' + self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint)) + sql_hint = 'select /*+ partition_first()*/ count(*), c1 from meters partition by c1' + self.check_explain_res_has_row("Partition on", self.explain_sql(sql_hint)) + sql_hint = 'select /*+ partition_first() sort_for_group()*/ count(*), c1 from meters partition by c1' + self.check_explain_res_has_row("Partition on", self.explain_sql(sql_hint)) + sql_hint = 'select /*+ sort_for_group() partition_first()*/ count(*), c1 from meters partition by c1' + self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint)) + def add_order_by(self, sql: str, order_by: str, select_list: str = "*") -> str: return "select %s from (%s)t order by %s" % (select_list, sql, order_by) diff --git a/tests/system-test/2-query/partition_by_col_agg.py b/tests/system-test/2-query/partition_by_col_agg.py index 1bc7a2414a..405cff9663 100644 --- a/tests/system-test/2-query/partition_by_col_agg.py +++ b/tests/system-test/2-query/partition_by_col_agg.py @@ -27,7 +27,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py index d15ef89eb8..851b5bb1d1 100644 --- a/tests/system-test/2-query/qnodeCluster.py +++ b/tests/system-test/2-query/qnodeCluster.py @@ -29,7 +29,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py index b8d4e3c314..d86b5d07e7 100644 --- a/tests/system-test/2-query/query_cols_tags_and_or.py +++ b/tests/system-test/2-query/query_cols_tags_and_or.py @@ -20,7 +20,7 @@ class TDTestCase: self.replicaVar = int(replicaVar) ## add for TD-6672 tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def insertData(self, tb_name): insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)', diff --git a/tests/system-test/2-query/select_null.py b/tests/system-test/2-query/select_null.py index e20f6131c9..8411a33a1f 100755 --- a/tests/system-test/2-query/select_null.py +++ b/tests/system-test/2-query/select_null.py @@ -401,7 +401,29 @@ class TDTestCase: tdSql.execute(sql) sql = "select * from %s.`12345` order by `567` desc limit 2;"%(database) tdSql.error(sql) - + + def td_27939(self,database): + sql = "create table %s.`test1eq2` (`ts` timestamp, id int);"%(database) + tdSql.execute(sql) + + sql = "insert into %s.test1eq2 values (now,1);"%(database) + tdSql.execute(sql) + + sql = "insert into %s.`test1eq2` values (now,2);"%(database) + tdSql.execute(sql) + + sql = "select * from %s.`test1eq2` where 1=2;"%(database) + tdSql.query(sql) + tdSql.checkRows(0) + + sql = "select * from (select * from %s.`test1eq2` where 1=2);"%(database) + tdSql.query(sql) + tdSql.checkRows(0) + + sql = "drop table %s.`test1eq2` ;"%(database) + tdSql.execute(sql) + + def run(self): startTime = time.time() @@ -418,6 +440,8 @@ class TDTestCase: self.ts_3110("%s" %self.db) self.ts_23505("%s" %self.db) self.ts_3036("%s" %self.db) + + self.td_27939("%s" %self.db) tdSql.query("flush database %s" %self.db) @@ -430,8 +454,9 @@ class TDTestCase: self.ts_3110("%s" %self.db) self.ts_23505("%s" %self.db) self.ts_3036("%s" %self.db) - - + + self.td_27939("%s" %self.db) + self.test_select_as_chinese_characters(); endTime = time.time() print("total time %ds" % (endTime - startTime)) diff --git a/tests/system-test/2-query/state_window.py b/tests/system-test/2-query/state_window.py index a211cd9dbe..7dedeb88f1 100644 --- a/tests/system-test/2-query/state_window.py +++ b/tests/system-test/2-query/state_window.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): if dropFlag == 1: @@ -186,7 +186,25 @@ class TDTestCase: tdSql.execute("insert into t0 values(now, 3,3,3,3,3,3,3,3,3)", queryTimes=1) tdSql.execute("select bottom(c1, 1), c2 from t0 state_window(c2) order by ts", queryTimes=1) + def test_crash_for_session_window(self): + tdSql.execute("drop database if exists test") + self.prepareTestEnv() + tdSql.execute("alter local 'queryPolicy' '3'") + tdSql.execute("insert into t0 values(now, 2,2,2,2,2,2,2,2,2)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 2,2,2,2,2,2,2,2,2)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 2,2,2,2,2,2,2,2,2)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 2,2,2,2,2,2,2,2,2)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 2,2,2,2,2,2,2,2,2)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 3,3,3,3,3,3,3,3,3)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 3,NULL,3,3,3,3,3,3,3)", queryTimes=1) + tdSql.execute("insert into t0 values(now, 3,NULL,3,3,3,3,3,3,3)", queryTimes=1) + tdSql.execute("flush database test", queryTimes=1) + time.sleep(2) + tdSql.execute("insert into t0 values(now, 3,NULL,3,3,3,3,3,3,3)", queryTimes=1) + tdSql.query("select first(c2) from t0 session(ts, 1s) order by ts", queryTimes=1) + def run(self): + self.test_crash_for_session_window() self.test_crash_for_state_window1() self.test_crash_for_state_window2() self.test_crash_for_state_window3() diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py index 302711b13d..ad5a2bba55 100644 --- a/tests/system-test/2-query/substr.py +++ b/tests/system-test/2-query/substr.py @@ -32,7 +32,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(),False) + tdSql.init(conn.cursor(),True) def __substr_condition(self): # sourcery skip: extract-method substr_condition = [] diff --git a/tests/system-test/2-query/test_ts4382.py b/tests/system-test/2-query/test_ts4382.py new file mode 100644 index 0000000000..50ec67ed23 --- /dev/null +++ b/tests/system-test/2-query/test_ts4382.py @@ -0,0 +1,75 @@ +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.sqlset import * +from util import constant +from util.common import * + + +class TDTestCase: + """Verify the jira TS-4382 + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.dbname = 'db' + self.stbname = 'st' + self.ctbname_list = ["ct1", "ct2"] + self.tag_value_list = ['{"instance":"100"}', '{"instance":"200"}'] + + def prepareData(self): + # db + tdSql.execute("create database {};".format(self.dbname)) + tdSql.execute("use {};".format(self.dbname)) + tdLog.debug("Create database %s" % self.dbname) + + # super table + tdSql.execute("create table {} (ts timestamp, col1 int) tags (t1 json);".format(self.stbname)) + tdLog.debug("Create super table %s" % self.stbname) + + # child table + for i in range(len(self.ctbname_list)): + tdSql.execute("create table {} using {} tags('{}');".format(self.ctbname_list[i], self.stbname, self.tag_value_list[i])) + tdLog.debug("Create child table %s" % self.ctbname_list) + + # insert data + tdSql.execute("insert into {} values(now, 1)(now+1s, 2)".format(self.ctbname_list[0])) + tdSql.execute("insert into {} values(now, null)(now+1s, null)".format(self.ctbname_list[1])) + + def run(self): + self.prepareData() + sql_list = [ + # super table query with correct tag name of json type + { + "sql": "select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(col1) from st group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), t1->'instance' order by time;", + "result_check": "0.0" + }, + # child table query with incorrect tag name of json type + { + "sql": "select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(col1) from ct1 group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), t1->'name' order by time;", + "result_check": "None" + }, + # child table query with null value + { + "sql": "select ts, avg(col1) from ct2 group by ts, t1->'name' order by ts;", + "result_check": "None" + } + ] + for sql_dic in sql_list: + tdSql.query(sql_dic["sql"]) + tdLog.debug("execute sql: %s" % sql_dic["sql"]) + for item in [row[1] for row in tdSql.queryResult]: + if sql_dic["result_check"] in str(item): + tdLog.debug("Check query result of '{}' successfully".format(sql_dic["sql"])) + break + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/system-test/2-query/test_ts4403.py b/tests/system-test/2-query/test_ts4403.py new file mode 100644 index 0000000000..7f0561d084 --- /dev/null +++ b/tests/system-test/2-query/test_ts4403.py @@ -0,0 +1,77 @@ +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.sqlset import * +from util import constant +from util.common import * + + +class TDTestCase: + """Verify the jira TS-4403 + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.dbname = 'db' + self.stbname = 'st' + + def prepareData(self): + # db + tdSql.execute("create database {};".format(self.dbname)) + tdSql.execute("use {};".format(self.dbname)) + tdLog.debug("Create database %s" % self.dbname) + + # super table + tdSql.execute("create table {} (ts timestamp, col1 int) tags (t1 int, t2 binary(16), t3 nchar(16));".format(self.stbname)) + tdLog.debug("Create super table %s" % self.stbname) + + # create index for all tags + tdSql.execute("create index t2_st on {} (t2);".format(self.stbname)) + tdSql.execute("create index t3_st on {} (t3);".format(self.stbname)) + + def run(self): + self.prepareData() + # check index number + tdSql.query("show indexes from {};".format(self.stbname)) + assert(3 == len(tdSql.queryResult)) + tdLog.debug("All tags of super table have index successfully") + + # drop default first tag index + tdSql.execute("drop index t1_st;") + tdSql.query("show indexes from {};".format(self.stbname)) + assert(2 == len(tdSql.queryResult)) + tdLog.debug("Delete the default index of first tag successfully") + + # create index for first tag + tdSql.execute("create index t1_st on {} (t1);".format(self.stbname)) + tdSql.query("show indexes from {};".format(self.stbname)) + assert(3 == len(tdSql.queryResult)) + tdLog.debug("Create index for first tag successfully") + + # null as index value to create child table + tdSql.execute("create table ct1 using {} tags(null, null, null);".format(self.stbname)) + tdSql.query("show tables;") + assert(1 == len(tdSql.queryResult)) + tdLog.debug("Create child table with tags value as 'null' successfully") + + # redundant index with different index name for some tag + tdSql.error("create index t2_ct1 on st (t2);") + tdLog.debug("Verify redundant index with different index name for some tag successfully") + + # same index name for different tag + tdSql.error("create index t2_st on st (t3);") + tdLog.debug("Verify same index name for some tag successfully") + + # add index for multiple tags(TD-28078) + tdSql.error("create index tt on {} (t2, t3);".format(self.stbname)) + tdLog.debug("Verify add index for multiple tags successfully") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/ts-4233.py b/tests/system-test/2-query/ts-4233.py index 9b0a2f175c..a0feb1bf48 100644 --- a/tests/system-test/2-query/ts-4233.py +++ b/tests/system-test/2-query/ts-4233.py @@ -14,7 +14,7 @@ class TDTestCase: tdSql.init(conn.cursor(), True) def checksql(self, sql): - result = os.popen("taos -s '%s'" %sql) + result = os.popen(f"taos -s \"{sql}\" ") res = result.read() print(res) if ("Query OK" in res): @@ -23,14 +23,12 @@ class TDTestCase: tdLog.exit(f"checkEqual error") def check(self): - conn = taos.connect() sql = "select 'a;b' as x" tdSql.query(f"%s" %sql) tdSql.checkRows(1) - self.checksql('select "a;b" as x\G') - self.checksql('select "a;b" as x >> /tmp/res.txt') - return + self.checksql('select \\\"a;b\\\" as x\G') + self.checksql('select \\\"a;b\\\" as x >> temp.txt') def run(self): tdSql.prepare() diff --git a/tests/system-test/2-query/ts-4348-td-27939.py b/tests/system-test/2-query/ts-4348-td-27939.py new file mode 100644 index 0000000000..2a861899d5 --- /dev/null +++ b/tests/system-test/2-query/ts-4348-td-27939.py @@ -0,0 +1,46 @@ +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + tdSql.execute("drop database if exists ts_4338;") + tdSql.execute("create database ts_4338;") + tdSql.execute("drop table if exists ts_4338.t;") + tdSql.execute("create database if not exists ts_4338;") + tdSql.execute("create table ts_4338.t (ts timestamp, i8 tinyint);") + tdSql.execute("insert into ts_4338.t (ts, i8) values (now(), 1) (now()+1s, 2);") + + def run(self): + # TS-4348 + tdSql.query(f'select i8 from ts_4338.t;') + tdSql.checkRows(2) + + tdSql.query(f'select i8 from ts_4338.t where 1 = 1;') + tdSql.checkRows(2) + + tdSql.query(f'select i8 from ts_4338.t where i8 = 1;') + tdSql.checkRows(1) + + tdSql.query(f'select * from (select * from ts_4338.t where i8 = 3);') + tdSql.checkRows(0) + + # TD-27939 + tdSql.query(f'select * from (select * from ts_4338.t where 1 = 100);') + tdSql.checkRows(0) + + tdSql.query(f'select * from (select * from (select * from ts_4338.t where 1 = 200));') + tdSql.checkRows(0) + + tdSql.execute("drop database if exists ts_4338;") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/ts_3405_3398_3423.py b/tests/system-test/2-query/ts_3405_3398_3423.py index dcd8fb5a85..814d9de394 100644 --- a/tests/system-test/2-query/ts_3405_3398_3423.py +++ b/tests/system-test/2-query/ts_3405_3398_3423.py @@ -11,7 +11,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def run(self): """This test case is used to verify the query performance for the merge scans process of diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py index 0e2975cdba..5a49f653ec 100644 --- a/tests/system-test/2-query/tsbsQuery.py +++ b/tests/system-test/2-query/tsbsQuery.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py index 3ae602fa23..ba24579611 100644 --- a/tests/system-test/2-query/ttl_comment.py +++ b/tests/system-test/2-query/ttl_comment.py @@ -27,7 +27,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def run(self): dbname="db" diff --git a/tests/system-test/2-query/varbinary.py b/tests/system-test/2-query/varbinary.py index 8f8021c8ad..c2e2693762 100644 --- a/tests/system-test/2-query/varbinary.py +++ b/tests/system-test/2-query/varbinary.py @@ -10,7 +10,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def test(self): tdLog.info(" test") diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py index 808a4935e3..cf6aafc374 100644 --- a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py +++ b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal.py b/tests/system-test/7-tmq/dataFromTsdbNWal.py index 8386c22cd0..e0fbe89c36 100644 --- a/tests/system-test/7-tmq/dataFromTsdbNWal.py +++ b/tests/system-test/7-tmq/dataFromTsdbNWal.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py index 1e6011d5db..15fd4ed875 100644 --- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py +++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py index 5043c46f00..c7af55daae 100644 --- a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py +++ b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqAutoCreateTbl.py b/tests/system-test/7-tmq/tmqAutoCreateTbl.py index 5d0af636b2..e3fe9ae0b6 100644 --- a/tests/system-test/7-tmq/tmqAutoCreateTbl.py +++ b/tests/system-test/7-tmq/tmqAutoCreateTbl.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqClientConsLog.py b/tests/system-test/7-tmq/tmqClientConsLog.py index d708e6642c..ab0c5f6a72 100644 --- a/tests/system-test/7-tmq/tmqClientConsLog.py +++ b/tests/system-test/7-tmq/tmqClientConsLog.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py index f372a2b742..2af61f4bbb 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py index c7f95f6f41..d3087c0455 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py index 26dacf514d..275d80ca18 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py index d6f100041b..0572499079 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py index 11fc7dbcc0..7b31019572 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py @@ -26,7 +26,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py index 8ed4a6df97..a05b6a23ec 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py index 117c3ce637..85adb50e88 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py index 2864240441..facbb6c228 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py index a717c4966d..ede17130df 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py index d8606efe58..fa7aad9dae 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py index 05aa82c929..efc186c3bf 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py index dcaa6ceb7c..f66605f5f4 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py b/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py index dd2420dcfb..d94f8df35c 100644 --- a/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py +++ b/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py @@ -29,7 +29,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getPath(self, tool="taosBenchmark"): if (platform.system().lower() == 'windows'): diff --git a/tests/system-test/7-tmq/tmqDataPrecisionUnit.py b/tests/system-test/7-tmq/tmqDataPrecisionUnit.py index f050116a1b..13b0fcbe5d 100644 --- a/tests/system-test/7-tmq/tmqDataPrecisionUnit.py +++ b/tests/system-test/7-tmq/tmqDataPrecisionUnit.py @@ -16,7 +16,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.db_name = "tmq_db" self.topic_name = "tmq_topic" diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py index aa9c8d25d0..117bd66340 100644 --- a/tests/system-test/7-tmq/tmqDelete-1ctb.py +++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqDelete-multiCtb.py b/tests/system-test/7-tmq/tmqDelete-multiCtb.py index 7a47cd6025..0dc2dc35f0 100644 --- a/tests/system-test/7-tmq/tmqDelete-multiCtb.py +++ b/tests/system-test/7-tmq/tmqDelete-multiCtb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py index 0ac8482163..f4a63de53e 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqDnodeRestart1.py b/tests/system-test/7-tmq/tmqDnodeRestart1.py index 2bde32800b..b398178c61 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart1.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart1.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqDropConsumer.py b/tests/system-test/7-tmq/tmqDropConsumer.py index e3e9906ecf..5208d14069 100644 --- a/tests/system-test/7-tmq/tmqDropConsumer.py +++ b/tests/system-test/7-tmq/tmqDropConsumer.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getPath(self, tool="taosBenchmark"): if (platform.system().lower() == 'windows'): diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py index c8bcdd6235..2c87e4083c 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) # drop some ntbs def tmqCase1(self): diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py index 3fc5a2fdc7..7a35867ce7 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) # drop some ntbs def tmqCase1(self): diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py index c9e34136cc..eacacd913b 100644 --- a/tests/system-test/7-tmq/tmqDropStbCtb.py +++ b/tests/system-test/7-tmq/tmqDropStbCtb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqMaxGroupIds.py b/tests/system-test/7-tmq/tmqMaxGroupIds.py index 5049ee5bd7..eba15a4bef 100644 --- a/tests/system-test/7-tmq/tmqMaxGroupIds.py +++ b/tests/system-test/7-tmq/tmqMaxGroupIds.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getPath(self, tool="taosBenchmark"): if (platform.system().lower() == 'windows'): diff --git a/tests/system-test/7-tmq/tmqMaxTopic.py b/tests/system-test/7-tmq/tmqMaxTopic.py index 05b699ca00..4f25c39627 100644 --- a/tests/system-test/7-tmq/tmqMaxTopic.py +++ b/tests/system-test/7-tmq/tmqMaxTopic.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def modifyMaxTopics(self, tmqMaxTopicNum): # single dnode diff --git a/tests/system-test/7-tmq/tmqMultiConsumer.py b/tests/system-test/7-tmq/tmqMultiConsumer.py index cc217e0c4c..e9727042e2 100644 --- a/tests/system-test/7-tmq/tmqMultiConsumer.py +++ b/tests/system-test/7-tmq/tmqMultiConsumer.py @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqOffset.py b/tests/system-test/7-tmq/tmqOffset.py index 500c6f53e4..991d1185a9 100644 --- a/tests/system-test/7-tmq/tmqOffset.py +++ b/tests/system-test/7-tmq/tmqOffset.py @@ -31,7 +31,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getPath(self, tool="taosBenchmark"): if (platform.system().lower() == 'windows'): diff --git a/tests/system-test/7-tmq/tmqSeekAndCommit.py b/tests/system-test/7-tmq/tmqSeekAndCommit.py index 253edcd10d..65ee438a41 100644 --- a/tests/system-test/7-tmq/tmqSeekAndCommit.py +++ b/tests/system-test/7-tmq/tmqSeekAndCommit.py @@ -16,7 +16,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self.db_name = "tmq_db" self.topic_name = "tmq_topic" diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index 920e8e77e4..216f67ffe4 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py index 2f1d3e2631..e51ac9f029 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py index 6b8c10de27..3988565154 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py index 3975013e74..86e49d378a 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqUpdateWithConsume.py b/tests/system-test/7-tmq/tmqUpdateWithConsume.py index 6a9bb0ae92..e88e16eec0 100644 --- a/tests/system-test/7-tmq/tmqUpdateWithConsume.py +++ b/tests/system-test/7-tmq/tmqUpdateWithConsume.py @@ -24,7 +24,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqVnodeReplicate.py b/tests/system-test/7-tmq/tmqVnodeReplicate.py index 0ee11781ed..3e40cb13ad 100644 --- a/tests/system-test/7-tmq/tmqVnodeReplicate.py +++ b/tests/system-test/7-tmq/tmqVnodeReplicate.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-column.py b/tests/system-test/7-tmq/tmqVnodeSplit-column.py index 1fe2b5809a..74213c4536 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-column.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-column.py @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-db.py b/tests/system-test/7-tmq/tmqVnodeSplit-db.py index f66acf4fcd..b9eac4b692 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-db.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-db.py @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py index 68fb07b813..f6b523e5f4 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py @@ -30,7 +30,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py index 6140e8a544..d4c76c4f61 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py @@ -30,7 +30,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py index 18b80b7f8d..e79a0ca77d 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py @@ -32,7 +32,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb.py index c203350322..a96d52ee2b 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-stb.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb.py @@ -30,7 +30,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeTransform-db.py b/tests/system-test/7-tmq/tmqVnodeTransform-db.py index 5c61908d96..cef90ff65f 100644 --- a/tests/system-test/7-tmq/tmqVnodeTransform-db.py +++ b/tests/system-test/7-tmq/tmqVnodeTransform-db.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeTransform-stb.py b/tests/system-test/7-tmq/tmqVnodeTransform-stb.py index 64cdf6d153..95703c52ca 100644 --- a/tests/system-test/7-tmq/tmqVnodeTransform-stb.py +++ b/tests/system-test/7-tmq/tmqVnodeTransform-stb.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmqVnodeTransform.py b/tests/system-test/7-tmq/tmqVnodeTransform.py index 3698297618..811b72c35f 100644 --- a/tests/system-test/7-tmq/tmqVnodeTransform.py +++ b/tests/system-test/7-tmq/tmqVnodeTransform.py @@ -25,7 +25,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/7-tmq/tmq_offset.py b/tests/system-test/7-tmq/tmq_offset.py index 6453f452c6..8272449ccb 100644 --- a/tests/system-test/7-tmq/tmq_offset.py +++ b/tests/system-test/7-tmq/tmq_offset.py @@ -5,7 +5,7 @@ import time import socket import os import threading - +import platform from util.log import * from util.sql import * from util.cases import * @@ -21,23 +21,40 @@ class TDTestCase: tdSql.init(conn.cursor()) def run(self): - tdSql.prepare() - buildPath = tdCom.getBuildPath() - cmdStr1 = '%s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y &'%(buildPath) - tdLog.info(cmdStr1) - os.system(cmdStr1) - time.sleep(15) - cmdStr2 = '%s/build/bin/tmq_offset_test &'%(buildPath) - tdLog.info(cmdStr2) - os.system(cmdStr2) + if platform.system().lower() == 'windows': + buildPath = tdCom.getBuildPath() + cmdStr1 = ' mintty -h never %s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y '%(buildPath) + tdLog.info(cmdStr1) + os.system(cmdStr1) + time.sleep(15) - time.sleep(20) + cmdStr2 = ' mintty -h never %s/build/bin/tmq_offset_test '%(buildPath) + tdLog.info(cmdStr2) + os.system(cmdStr2) + time.sleep(20) - os.system("kill -9 `pgrep taosBenchmark`") - result = os.system("kill -9 `pgrep tmq_offset_test`") - if result != 0: - tdLog.exit("tmq_offset_test error!") + # tdLog.info("ps -a | grep taos | awk \'{print $2}\' | xargs kill -9") + os.system('ps -a | grep taosBenchmark | awk \'{print $2}\' | xargs kill -9') + result = os.system('ps -a | grep tmq_offset_test | awk \'{print $2}\' | xargs kill -9') + if result != 0: + tdLog.exit("tmq_offset_test error!") + else: + buildPath = tdCom.getBuildPath() + cmdStr1 = '%s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y &'%(buildPath) + tdLog.info(cmdStr1) + os.system(cmdStr1) + time.sleep(15) + + cmdStr2 = '%s/build/bin/tmq_offset_test &'%(buildPath) + tdLog.info(cmdStr2) + os.system(cmdStr2) + time.sleep(20) + + os.system("kill -9 `pgrep taosBenchmark`") + result = os.system("kill -9 `pgrep tmq_offset_test`") + if result != 0: + tdLog.exit("tmq_offset_test error!") def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmq_per.py b/tests/system-test/7-tmq/tmq_per.py index f3701dacab..bd319aa3a4 100644 --- a/tests/system-test/7-tmq/tmq_per.py +++ b/tests/system-test/7-tmq/tmq_per.py @@ -30,7 +30,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def getDataPath(self): selfPath = tdCom.getBuildPath() diff --git a/tests/system-test/8-stream/at_once_session.py b/tests/system-test/8-stream/at_once_session.py index 9a253a187f..3462561c4e 100644 --- a/tests/system-test/8-stream/at_once_session.py +++ b/tests/system-test/8-stream/at_once_session.py @@ -59,10 +59,14 @@ class TDTestCase: ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + time.sleep(1) # create stb/ctb/tb stream self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + + time.sleep(1) + for i in range(self.tdCom.range_count): ctb_name = self.tdCom.get_long_name() self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) diff --git a/tests/system-test/8-stream/at_once_state_window.py b/tests/system-test/8-stream/at_once_state_window.py index fa9f4ddd78..60a4f4e890 100644 --- a/tests/system-test/8-stream/at_once_state_window.py +++ b/tests/system-test/8-stream/at_once_state_window.py @@ -59,6 +59,9 @@ class TDTestCase: self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value) range_times = self.tdCom.range_count state_window_max = self.tdCom.dataDict['state_window_max'] + + time.sleep(2) + for i in range(range_times): state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times)) for i in range(2, range_times+3): diff --git a/tests/system-test/8-stream/max_delay_interval_ext.py b/tests/system-test/8-stream/max_delay_interval_ext.py index 653fcd997c..6536309a25 100644 --- a/tests/system-test/8-stream/max_delay_interval_ext.py +++ b/tests/system-test/8-stream/max_delay_interval_ext.py @@ -50,6 +50,8 @@ class TDTestCase: # create stb/ctb/tb stream self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + time.sleep(1) + init_num = 0 for i in range(self.tdCom.range_count): if i == 0: diff --git a/tests/system-test/8-stream/max_delay_session.py b/tests/system-test/8-stream/max_delay_session.py index 46c4c5801d..7e9d365fc8 100644 --- a/tests/system-test/8-stream/max_delay_session.py +++ b/tests/system-test/8-stream/max_delay_session.py @@ -27,6 +27,8 @@ class TDTestCase: self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + time.sleep(1) + if watermark is not None: watermark_value = f'{self.tdCom.dataDict["watermark"]}s' else: diff --git a/tests/system-test/8-stream/snode_restart.py b/tests/system-test/8-stream/snode_restart.py index 3657163ab0..e80ff54578 100644 --- a/tests/system-test/8-stream/snode_restart.py +++ b/tests/system-test/8-stream/snode_restart.py @@ -20,7 +20,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def case1(self): diff --git a/tests/system-test/8-stream/snode_restart_with_checkpoint.py b/tests/system-test/8-stream/snode_restart_with_checkpoint.py index d7bfd7b407..9eb8c09ca3 100644 --- a/tests/system-test/8-stream/snode_restart_with_checkpoint.py +++ b/tests/system-test/8-stream/snode_restart_with_checkpoint.py @@ -20,7 +20,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def case1(self): diff --git a/tests/system-test/8-stream/stream_basic.py b/tests/system-test/8-stream/stream_basic.py index 7f4d1d5ee3..3ebc255114 100644 --- a/tests/system-test/8-stream/stream_basic.py +++ b/tests/system-test/8-stream/stream_basic.py @@ -89,7 +89,9 @@ class TDTestCase: sql = "select count(*) from sta" # loop wait max 60s to check count is ok tdLog.info("loop wait result ...") - tdSql.checkDataLoop(0, 0, 99999, sql, loopCount=120, waitTime=0.5) + tdSql.checkDataLoop(0, 0, 100000, sql, loopCount=120, waitTime=0.5) + + time.sleep(5) # check all data is correct sql = "select * from sta where cnt != 20;" diff --git a/tests/system-test/8-stream/vnode_restart.py b/tests/system-test/8-stream/vnode_restart.py index a53432b77a..be0ee81787 100644 --- a/tests/system-test/8-stream/vnode_restart.py +++ b/tests/system-test/8-stream/vnode_restart.py @@ -20,7 +20,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def case1(self): diff --git a/tests/system-test/99-TDcase/TD-17255.py b/tests/system-test/99-TDcase/TD-17255.py index 0f83468754..fa28372ae3 100644 --- a/tests/system-test/99-TDcase/TD-17255.py +++ b/tests/system-test/99-TDcase/TD-17255.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") diff --git a/tests/system-test/loop.bat b/tests/system-test/loop.bat new file mode 100644 index 0000000000..41e3901b3f --- /dev/null +++ b/tests/system-test/loop.bat @@ -0,0 +1,13 @@ +@echo off +set LoopNumner=%1 +set cmd=%2 + +echo "Loop Number: %LoopNumner%" +echo "command : %cmd:"=%" + +For /l %%i in (1,1,%LoopNumner% ) do ( + echo "execute times:"%%i + %cmd:"=% +) + +% case loop.bat 100 "python3 .\test.py -f 2-query\query_cols_tags_and_or.py" % \ No newline at end of file diff --git a/tests/system-test/pytest.sh b/tests/system-test/pytest.sh index bae0fdf278..2837c817be 100755 --- a/tests/system-test/pytest.sh +++ b/tests/system-test/pytest.sh @@ -60,16 +60,16 @@ echo "ASAN_DIR : $ASAN_DIR" # prevent delete / folder or /usr/bin if [ ${#SIM_DIR} -lt 10 ]; then - echo "len(SIM_DIR) < 10 , danger so exit. SIM_DIR=$SIM_DIR" - exit 1 + echo "len(SIM_DIR) < 10 , danger so exit. SIM_DIR=$SIM_DIR" + exit 1 fi -rm -rf $SIM_DIR/* +rm -rf "${SIM_DIR:?}"/* mkdir -p $PRG_DIR mkdir -p $ASAN_DIR -cd $CODE_DIR +cd "$CODE_DIR" || exit ulimit -n 600000 ulimit -c unlimited @@ -78,10 +78,10 @@ ulimit -c unlimited echo "ExcuteCmd:" $* if [[ "$TD_OS" == "Alpine" ]]; then - $* + "$@" else AsanFile=$ASAN_DIR/psim.info - echo "AsanFile:" $AsanFile + echo "AsanFile:" "$AsanFile" unset LD_PRELOAD #export LD_PRELOAD=libasan.so.5 diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index aefdb1e824..1ab0fee7bf 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -1,4 +1,5 @@ python3 ./test.py -f 2-query/tbname_vgroup.py +python3 ./test.py -f 2-query/compact-col.py python3 ./test.py -f 2-query/stbJoin.py python3 ./test.py -f 2-query/stbJoin.py -Q 2 python3 ./test.py -f 2-query/stbJoin.py -Q 3 @@ -99,6 +100,10 @@ python3 ./test.py -f 2-query/ts-4233.py python3 ./test.py -f 2-query/ts-4233.py -Q 2 python3 ./test.py -f 2-query/ts-4233.py -Q 3 python3 ./test.py -f 2-query/ts-4233.py -Q 4 +python3 ./test.py -f 2-query/like.py +python3 ./test.py -f 2-query/like.py -Q 2 +python3 ./test.py -f 2-query/like.py -Q 3 +python3 ./test.py -f 2-query/like.py -Q 4 python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False @@ -149,7 +154,7 @@ python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py -python3 ./test.py -f 7-tmq/tmq_taosx.py +# python3 ./test.py -f 7-tmq/tmq_taosx.py python3 ./test.py -f 7-tmq/tmq_replay.py python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py python3 ./test.py -f 7-tmq/tmq_offset.py @@ -163,6 +168,7 @@ python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1 python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 2 -n 1 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3 +python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-column.py -N 3 -n 3 @@ -196,14 +202,18 @@ python3 ./test.py -f 0-others/ttlChangeOnWrite.py python3 ./test.py -f 0-others/compress_tsz1.py python3 ./test.py -f 0-others/compress_tsz2.py python3 ./test.py -f 0-others/view/non_marterial_view/test_view.py +python3 ./test.py -f 0-others/test_show_table_distributed.py python3 ./test.py -f 0-others/compatibility.py python3 ./test.py -f 0-others/tag_index_basic.py python3 ./test.py -N 3 -f 0-others/walRetention.py -python3 ./test.py -f 0-others/splitVGroupRep1.py -N 3 -python3 ./test.py -f 0-others/splitVGroupRep3.py -N 3 +python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 1 +python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 1 +python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 3 +python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 3 python3 ./test.py -f 0-others/timeRangeWise.py -N 3 python3 ./test.py -f 0-others/delete_check.py python3 ./test.py -f 0-others/test_hot_refresh_configurations.py +python3 ./test.py -f 1-insert/insert_double.py python3 ./test.py -f 1-insert/alter_database.py python3 ./test.py -f 1-insert/alter_replica.py -N 3 python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py @@ -252,6 +262,10 @@ python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4 python3 ./test.py -f 1-insert/precisionUS.py python3 ./test.py -f 1-insert/precisionNS.py python3 ./test.py -f 1-insert/test_ts4219.py +python3 ./test.py -f 1-insert/ts-4272.py +python3 ./test.py -f 1-insert/test_ts4295.py +python3 ./test.py -f 1-insert/test_td27388.py +python3 ./test.py -f 1-insert/insert_timestamp.py python3 ./test.py -f 0-others/show.py python3 ./test.py -f 0-others/show_tag_index.py python3 ./test.py -f 0-others/information_schema.py @@ -456,6 +470,7 @@ python3 ./test.py -f 2-query/concat2.py python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/nestedQueryInterval.py python3 ./test.py -f 2-query/systable_func.py +python3 ./test.py -f 2-query/test_ts4382.py python3 ./test.py -f 2-query/stablity.py python3 ./test.py -f 2-query/stablity_1.py python3 ./test.py -f 2-query/elapsed.py @@ -795,6 +810,7 @@ python3 ./test.py -f 2-query/blockSMA.py -Q 4 python3 ./test.py -f 2-query/projectionDesc.py -Q 4 python3 ./test.py -f 2-query/odbc.py python3 ./test.py -f 2-query/fill_with_group.py +python3 ./test.py -f 2-query/state_window.py -Q 3 python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 python3 ./test.py -f 99-TDcase/TD-20582.py python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 diff --git a/tests/taosc_test/taoscTest.cpp b/tests/taosc_test/taoscTest.cpp index fbdb152ab4..3f49b11b70 100644 --- a/tests/taosc_test/taoscTest.cpp +++ b/tests/taosc_test/taoscTest.cpp @@ -190,10 +190,12 @@ void queryCallback2(void* param, void* res, int32_t code) { ASSERT_TRUE(param == pUserParam); // After using taos_query_a to query, using taos_fetch_row in the callback will cause blocking. // Reason: schProcessOnCbBegin SCH_LOCK_TASK(pTask) - /* TAOS_ROW row; - while ((row = taos_fetch_row(res))) { - getRecordCounts++; - } */ + TAOS_ROW row; + row = taos_fetch_row(res); + ASSERT_TRUE(row == NULL); + int* errCode = taosGetErrno(); + ASSERT_TRUE(*errCode = TSDB_CODE_TSC_INVALID_OPERATION); + tsem_post(&query_sem); taos_free_result(res); } diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index 1c885b151c..ba3dadc646 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -66,6 +66,7 @@ typedef struct { char file[PATH_MAX]; char password[TSDB_USET_PASSWORD_LEN]; bool is_gen_auth; + bool is_bi_mode; bool is_raw_time; bool is_version; bool is_dump_config; diff --git a/tools/shell/inc/shellTire.h b/tools/shell/inc/shellTire.h index e87c3ee4f3..472f604a2c 100644 --- a/tools/shell/inc/shellTire.h +++ b/tools/shell/inc/shellTire.h @@ -19,8 +19,8 @@ // // The prefix search tree is a efficient storage words and search words tree, it support 95 visible ascii code character // -#define FIRST_ASCII 40 // first visible char is '0' -#define LAST_ASCII 122 // last visilbe char is 'z' +#define FIRST_ASCII 32 // first visible char is '0' +#define LAST_ASCII 126 // last visilbe char is 'z' // capacity save char is 95 #define CHAR_CNT (LAST_ASCII - FIRST_ASCII + 1) diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 4817b23029..71fa777d63 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -44,6 +44,7 @@ #define SHELL_NET_ROLE "Net role when network connectivity test, options: client|server." #define SHELL_PKT_LEN "Packet length used for net test, default is 1024 bytes." #define SHELL_PKT_NUM "Packet numbers used for net test, default is 100." +#define SHELL_BI_MODE "Set BI mode" #define SHELL_VERSION "Print program version." #ifdef WEBSOCKET @@ -59,6 +60,7 @@ void shellPrintHelp() { printf("Usage: taos [OPTION...] \r\n\r\n"); printf("%s%s%s%s\r\n", indent, "-a,", indent, SHELL_AUTH); printf("%s%s%s%s\r\n", indent, "-A,", indent, SHELL_GEN_AUTH); + printf("%s%s%s%s\r\n", indent, "-B,", indent, SHELL_BI_MODE); printf("%s%s%s%s\r\n", indent, "-c,", indent, SHELL_CFG_DIR); printf("%s%s%s%s\r\n", indent, "-C,", indent, SHELL_DMP_CFG); printf("%s%s%s%s\r\n", indent, "-d,", indent, SHELL_DB); @@ -127,6 +129,7 @@ static struct argp_option shellOptions[] = { {"timeout", 'T', "SECONDS", 0, SHELL_TIMEOUT}, #endif {"pktnum", 'N', "PKTNUM", 0, SHELL_PKT_NUM}, + {"bimode", 'B', 0, 0, SHELL_BI_MODE}, {0}, }; @@ -173,6 +176,9 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) { case 'A': pArgs->is_gen_auth = true; break; + case 'B': + pArgs->is_bi_mode = true; + break; case 'c': #ifdef WEBSOCKET pArgs->cloud = false; diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index bd5329d810..e9b9b9e944 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -84,8 +84,8 @@ SWords shellCommands[] = { {"alter topic", 0, 0, NULL}, {"alter user ;", 0, 0, NULL}, #ifdef TD_ENTERPRISE - {"balance vgroup;", 0, 0, NULL}, - {"balance vgroup leader ", 0, 0, NULL}, + {"balance vgroup ;", 0, 0, NULL}, + {"balance vgroup leader on ", 0, 0, NULL}, #endif // 20 @@ -531,8 +531,8 @@ void showHelp() { printf( "\n\n\ ----- special commands on enterpise version ----- \n\ - balance vgroup; \n\ - balance vgroup leader \n\ + balance vgroup ;\n\ + balance vgroup leader on \n\ compact database ; \n\ redistribute vgroup dnode ;\n\ split vgroup ;"); diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c index 8c91ff53e2..c6459c4590 100644 --- a/tools/shell/src/shellCommand.c +++ b/tools/shell/src/shellCommand.c @@ -62,7 +62,6 @@ int32_t shellCountPrefixOnes(uint8_t c) { } void shellGetPrevCharSize(const char *str, int32_t pos, int32_t *size, int32_t *width) { - ASSERT(pos > 0); if (pos <= 0) return; TdWchar wc; @@ -82,7 +81,6 @@ void shellGetPrevCharSize(const char *str, int32_t pos, int32_t *size, int32_t * } void shellGetNextCharSize(const char *str, int32_t pos, int32_t *size, int32_t *width) { - ASSERT(pos >= 0); if(pos < 0) return; TdWchar wc; @@ -91,7 +89,6 @@ void shellGetNextCharSize(const char *str, int32_t pos, int32_t *size, int32_t * } void shellInsertChar(SShellCmd *cmd, char *c, int32_t size) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; TdWchar wc; @@ -138,7 +135,6 @@ void shellInsertStr(SShellCmd *cmd, char *str, int32_t size) { } void shellBackspaceChar(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (cmd->cursorOffset > 0) { @@ -159,7 +155,6 @@ void shellBackspaceChar(SShellCmd *cmd) { } void shellClearLineBefore(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; shellClearScreen(cmd->endOffset + PSIZE, cmd->screenOffset + PSIZE); @@ -174,7 +169,6 @@ void shellClearLineBefore(SShellCmd *cmd) { } void shellClearLineAfter(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; shellClearScreen(cmd->endOffset + PSIZE, cmd->screenOffset + PSIZE); @@ -184,7 +178,6 @@ void shellClearLineAfter(SShellCmd *cmd) { } void shellDeleteChar(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (cmd->cursorOffset < cmd->commandSize) { @@ -203,7 +196,6 @@ void shellDeleteChar(SShellCmd *cmd) { } void shellMoveCursorLeft(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (cmd->cursorOffset > 0) { @@ -218,7 +210,6 @@ void shellMoveCursorLeft(SShellCmd *cmd) { } void shellMoveCursorRight(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (cmd->cursorOffset < cmd->commandSize) { @@ -233,7 +224,6 @@ void shellMoveCursorRight(SShellCmd *cmd) { } void shellPositionCursorHome(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (cmd->cursorOffset > 0) { @@ -254,7 +244,6 @@ void positionCursorMiddle(SShellCmd *cmd) { } void shellPositionCursorEnd(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (cmd->cursorOffset < cmd->commandSize) { @@ -290,7 +279,6 @@ void shellPositionCursor(int32_t step, int32_t direction) { } void shellUpdateBuffer(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; if (shellRegexMatch(cmd->buffer, "(\\s+$)|(^$)", REG_EXTENDED)) strcat(cmd->command, " "); @@ -306,7 +294,6 @@ void shellUpdateBuffer(SShellCmd *cmd) { } bool shellIsReadyGo(SShellCmd *cmd) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return false; char *total = (char *)taosMemoryCalloc(1, SHELL_MAX_COMMAND_SIZE); @@ -334,7 +321,6 @@ void shellGetMbSizeInfo(const char *str, int32_t *size, int32_t *width) { } void shellResetCommand(SShellCmd *cmd, const char s[]) { - ASSERT(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); if(cmd->cursorOffset > cmd->commandSize || cmd->endOffset < cmd->screenOffset) return; shellClearScreen(cmd->endOffset + PSIZE, cmd->screenOffset + PSIZE); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e8a5b04178..23424cea98 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -195,6 +195,30 @@ int32_t shellRunCommand(char *command, bool recordHistory) { return shellRunSingleCommand(cmd); } + +char * strendG(const char* pstr) { + if(pstr == NULL) { + return NULL; + } + + size_t len = strlen(pstr); + if(len < 4) { + return NULL; + } + + char * p = (char *)pstr + len - 2; + if (strcmp(p, "\\G") == 0 ){ + return p; + } + + p = (char *)pstr + len - 3; + if (strcmp(p, "\\G;") == 0) { + return p; + } + + return NULL; +} + void shellRunSingleCommandImp(char *command) { int64_t st, et; char *sptr = NULL; @@ -213,7 +237,7 @@ void shellRunSingleCommandImp(char *command) { } } - if ((sptr = strstr(command, "\\G")) != NULL) { + if ((sptr = strendG(command)) != NULL) { *sptr = '\0'; printMode = true; // When output to a file, the switch does not work. } @@ -1291,6 +1315,14 @@ int32_t shellExecute() { shellSetConn(shell.conn, runOnce); shellReadHistory(); + if(shell.args.is_bi_mode) { + // need set bi mode + printf("Set BI mode is true.\n"); +#ifndef WEBSOCKET + taos_set_conn_mode(shell.conn, TAOS_CONN_MODE_BI, 1); +#endif + } + if (runOnce) { if (pArgs->commands != NULL) { printf("%s%s\r\n", shell.info.promptHeader, pArgs->commands); diff --git a/tools/shell/src/shellTire.c b/tools/shell/src/shellTire.c index a8726f3126..8aa67457f2 100644 --- a/tools/shell/src/shellTire.c +++ b/tools/shell/src/shellTire.c @@ -95,7 +95,7 @@ bool insertToTree(STire* tire, char* word, int len) { STireNode** nodes = tire->root.d; for (int i = 0; i < len; i++) { m = word[i] - FIRST_ASCII; - if (m < 0 || m > CHAR_CNT) { + if (m < 0 || m >= CHAR_CNT) { return false; } diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index bff2ef7592..2d3aa27a3b 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -30,7 +30,16 @@ int shell_conn_ws_server(bool first) { fprintf(stdout, "trying to connect %s****** ", cuttedDsn); fflush(stdout); for (int i = 0; i < shell.args.timeout; i++) { - shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); + if(shell.args.is_bi_mode) { + size_t len = strlen(shell.args.dsn); + char * dsn = taosMemoryMalloc(len + 32); + sprintf(dsn, "%s&conn_mode=1", shell.args.dsn); + shell.ws_conn = ws_connect_with_dsn(dsn); + taosMemoryFree(dsn); + } else { + shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); + } + if (NULL == shell.ws_conn) { int errNo = ws_errno(NULL); if (0xE001 == errNo) { @@ -238,6 +247,7 @@ static int shellDumpWebsocket(WS_RES *wres, char *fname, return numOfRows; } +char * strendG(const char* pstr); void shellRunSingleCommandWebsocketImp(char *command) { int64_t st, et; char *sptr = NULL; @@ -256,7 +266,7 @@ void shellRunSingleCommandWebsocketImp(char *command) { } } - if ((sptr = strstr(command, "\\G")) != NULL) { + if ((sptr = strendG(command)) != NULL) { *sptr = '\0'; printMode = true; // When output to a file, the switch does not work. } diff --git a/utils/tsim/inc/simInt.h b/utils/tsim/inc/simInt.h index 02c7540cde..f2360277e0 100644 --- a/utils/tsim/inc/simInt.h +++ b/utils/tsim/inc/simInt.h @@ -184,7 +184,6 @@ extern SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; extern SCommand simCmdList[]; extern int32_t simScriptPos; extern int32_t simScriptSucceed; -extern int32_t simDebugFlag; extern char simScriptDir[]; extern bool abortExecution; extern bool useValgrind; diff --git a/utils/tsim/src/simSystem.c b/utils/tsim/src/simSystem.c index 98f9217fd6..dcf5d6ab12 100644 --- a/utils/tsim/src/simSystem.c +++ b/utils/tsim/src/simSystem.c @@ -21,7 +21,6 @@ SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; SCommand simCmdList[SIM_CMD_END]; int32_t simScriptPos = -1; int32_t simScriptSucceed = 0; -int32_t simDebugFlag = 143; void simCloseTaosdConnect(SScript *script); char simScriptDir[PATH_MAX] = {0}; @@ -32,7 +31,6 @@ int32_t simInitCfg() { taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1); SConfig *pCfg = taosGetCfg(); - simDebugFlag = cfgGetItem(pCfg, "simDebugFlag")->i32; tstrncpy(simScriptDir, cfgGetItem(pCfg, "scriptDir")->str, PATH_MAX); return 0; }