diff --git a/cmake/cmake.platform b/cmake/cmake.platform index 76ac6ba004..30a33fcdb4 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -176,6 +176,14 @@ IF(APPLE) set(THREADS_PREFER_PTHREAD_FLAG ON) ENDIF() +IF(TD_WINDOWS) + IF(MSVC) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:8388608") + ELSEIF(MINGW) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--stack,8388608") + ENDIF() +ENDIF() + MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR}) set(TD_DEPS_DIR "x86") diff --git a/cmake/cmake.version b/cmake/cmake.version index a6bf90fa3c..0e4785f643 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.1.1.0.alpha") + SET(TD_VER_NUMBER "3.2.0.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 1ac90ebdd4..762df40e10 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -1,7 +1,7 @@ # cos ExternalProject_Add(mxml GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git - GIT_TAG release-2.12 + GIT_TAG v2.12 SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md index ec9162c859..e204f7765e 100644 --- a/docs/en/10-deployment/01-deploy.md +++ b/docs/en/10-deployment/01-deploy.md @@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf ### Step 1 -If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. +If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. :::note FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command. diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 38a8048a21..e65046f65d 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen There are two ways to install taosBenchmark: -- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](/operation/pkg-install) for details. +- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details. - Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md index cfc2554387..48b3d04b24 100644 --- a/docs/en/14-reference/14-taosKeeper.md +++ b/docs/en/14-reference/14-taosKeeper.md @@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep There are two ways to install taosKeeper: Methods of installing taosKeeper: -- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](/operation/pkg-install) for details. +- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details. - You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. ## Configuration and Launch diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md index a98c3e3a6b..02a12e55bc 100644 --- a/docs/en/20-third-party/11-kafka.md +++ b/docs/en/20-third-party/11-kafka.md @@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se 1. Linux operating system 2. Java 8 and Maven installed 3. Git/curl/vi is installed -4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install) +4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install) ## Install Kafka diff --git a/docs/en/20-third-party/70-seeq.md b/docs/en/20-third-party/70-seeq.md new file mode 100644 index 0000000000..e7ad5c8173 --- /dev/null +++ b/docs/en/20-third-party/70-seeq.md @@ -0,0 +1,440 @@ +--- +sidebar_label: Seeq +title: Seeq +description: How to use Seeq and TDengine to perform time series data analysis +--- + +# How to use Seeq and TDengine to perform time series data analysis + +## Introduction + +Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers. + +With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting. + +### Install Seeq + +Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download). + +### Install and start Seeq Server + +``` +tar xvzf seeq-server-xxx.tar.gz +cd seeq-server-installer +sudo ./install + +sudo seeq service enable +sudo seeq start +``` + +### Install and start Seeq Data Lab Server + +Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842). + +``` +tar xvf seeq-data-lab--64bit-linux.tar.gz +sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq +sudo seeq config set Network/DataLab/Hostname localhost +sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231) +sudo seeq config set Network/Hostname # the host IP or URL of the main Seeq Server + +# If the main Seeq server is configured to listen over HTTPS +sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443) + +# If the main Seeq server is NOT configured to listen over HTTPS +sudo seeq config set Network/Webserver/Port + +#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server: +sudo seeq config set Network/DataLab/Hostname # the host IP (not URL) of the Data Lab server +sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231 +``` + +### Install TDengine on-premise instance + +See [Quick Install from Package](../../get-started). + +### Or use TDengine Cloud + +Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account. + +## Make Seeq be able to access TDengine + +1. Get data location configuration + +``` +sudo seeq config get Folders/Data +``` + +2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar). + +3. Restart Seeq server + +``` +sudo seeq restart +``` + +4. Input License + +Use a browser to access ip:34216 and input the license according to the guide. + +## How to use Seeq to analyze time-series data that TDengine serves + +This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis. + +### Scenario Overview + +The example scenario involves a power system where users collect electricity consumption data from metering devices at a power station on a daily basis. This data is stored in a TDengine cluster. The user now wants to predict how the electricity consumption will develop and purchase additional equipment to support it. The electricity consumption varies with monthly orders, and seasonal variations also affect the power consumption. Since the city is located in the Northern Hemisphere, more electricity is consumed during the summer. We will use simulated data to reflect these assumptions. + +### Schema + +``` +CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20)); +CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT); +``` + +![Seeq demo schema](./seeq/seeq-demo-schema.webp) + +### Mock data + +``` +python mockdata.py +taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);" +``` + +The source code is hosted at [GitHub Repository](https://github.com/sangshuduo/td-forecasting). + +### Using Seeq for data analysis + +#### Data Source configuration + +Please login with Seeq administrator and create a few data sources as following. + +- Power + +``` +{ + "QueryDefinitions": [ + { + "Name": "PowerNum", + "Type": "SIGNAL", + "Sql": "SELECT ts, num FROM meters", + "Enabled": true, + "TestMode": false, + "TestQueriesDuringSync": true, + "InProgressCapsulesEnabled": false, + "Variables": null, + "Properties": [ + { + "Name": "Name", + "Value": "Num", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Interpolation Method", + "Value": "linear", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Maximum Interpolation", + "Value": "2day", + "Sql": null, + "Uom": "string" + } + ], + "CapsuleProperties": null + } + ], + "Type": "GENERIC", + "Hostname": null, + "Port": 0, + "DatabaseName": null, + "Username": "root", + "Password": "taosdata", + "InitialSql": null, + "TimeZone": null, + "PrintRows": false, + "UseWindowsAuth": false, + "SqlFetchBatchSize": 100000, + "UseSSL": false, + "JdbcProperties": null, + "GenericDatabaseConfig": { + "DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata", + "SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver", + "ResolutionInNanoseconds": 1000, + "ZonedColumnTypes": [] + } +} +``` + +- Goods + +``` +{ + "QueryDefinitions": [ + { + "Name": "PowerGoods", + "Type": "CONDITION", + "Sql": "SELECT ts1, ts2, goods FROM power.goods", + "Enabled": true, + "TestMode": false, + "TestQueriesDuringSync": true, + "InProgressCapsulesEnabled": false, + "Variables": null, + "Properties": [ + { + "Name": "Name", + "Value": "Goods", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Maximum Duration", + "Value": "10days", + "Sql": null, + "Uom": "string" + } + ], + "CapsuleProperties": [ + { + "Name": "goods", + "Value": "${columnResult}", + "Column": "goods", + "Uom": "string" + } + ] + } + ], + "Type": "GENERIC", + "Hostname": null, + "Port": 0, + "DatabaseName": null, + "Username": "root", + "Password": "taosdata", + "InitialSql": null, + "TimeZone": null, + "PrintRows": false, + "UseWindowsAuth": false, + "SqlFetchBatchSize": 100000, + "UseSSL": false, + "JdbcProperties": null, + "GenericDatabaseConfig": { + "DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata", + "SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver", + "ResolutionInNanoseconds": 1000, + "ZonedColumnTypes": [] + } +} +``` + +- Temperature + +``` +{ + "QueryDefinitions": [ + { + "Name": "PowerNum", + "Type": "SIGNAL", + "Sql": "SELECT ts, temperature FROM meters", + "Enabled": true, + "TestMode": false, + "TestQueriesDuringSync": true, + "InProgressCapsulesEnabled": false, + "Variables": null, + "Properties": [ + { + "Name": "Name", + "Value": "Temperature", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Interpolation Method", + "Value": "linear", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Maximum Interpolation", + "Value": "2day", + "Sql": null, + "Uom": "string" + } + ], + "CapsuleProperties": null + } + ], + "Type": "GENERIC", + "Hostname": null, + "Port": 0, + "DatabaseName": null, + "Username": "root", + "Password": "taosdata", + "InitialSql": null, + "TimeZone": null, + "PrintRows": false, + "UseWindowsAuth": false, + "SqlFetchBatchSize": 100000, + "UseSSL": false, + "JdbcProperties": null, + "GenericDatabaseConfig": { + "DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata", + "SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver", + "ResolutionInNanoseconds": 1000, + "ZonedColumnTypes": [] + } +} +``` + +#### Launch Seeq Workbench + +Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details. + +![Seeq Workbench](./seeq/seeq-demo-workbench.webp) + +#### Use Seeq Data Lab Server for advanced data analysis + +Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis. + +```Python +from seeq import spy +spy.options.compatibility = 189 +import pandas as pd +import matplotlib +import matplotlib.pyplot as plt +import mlforecast +import lightgbm as lgb +from mlforecast.target_transforms import Differences +from sklearn.linear_model import LinearRegression + +ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"}) +print(ds) + +sig = ds.loc[ds['Name'].isin(['Num'])] +print(sig) + +data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None) +print("data.info()") +data.info() +print(data) +#data.plot() + +print("data[Num].info()") +data['Num'].info() +da = data['Num'].index.tolist() +#print(da) + +li = data['Num'].tolist() +#print(li) + +data2 = pd.DataFrame() +data2['ds'] = da +print('1st data2 ds info()') +data2['ds'].info() + +#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp() +data2['ds'] = pd.to_datetime(data2['ds']).astype('int64') +data2['y'] = li +print('2nd data2 ds info()') +data2['ds'].info() +print(data2) + +data2.insert(0, column = "unique_id", value="unique_id") + +print("Forecasting ...") + +forecast = mlforecast.MLForecast( + models = lgb.LGBMRegressor(), + freq = 1, + lags=[365], + target_transforms=[Differences([365])], +) + +forecast.fit(data2) +predicts = forecast.predict(365) + +pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast") +plt.show() +``` + +Example output: + +![Seeq forecast result](./seeq/seeq-forecast-result.webp) + +### How to configure Seeq data source to access TDengine Cloud + +Configuring a Seeq data source connection to TDengine Cloud or a local installation instance does not have any essential differences. After logging in to TDengine Cloud, select "Programming - Java" and copy the JDBC URL string with the token provided. Then, use this JDBC URL string to fill in the DatabaseJdbcUrl value in the Seeq Data Source configuration. + +Please note that when using TDengine Cloud, you need to specify the database name in your SQL commands. + +#### The data source of TDengine Cloud example + +``` +{ + "QueryDefinitions": [ + { + "Name": "CloudVoltage", + "Type": "SIGNAL", + "Sql": "SELECT ts, voltage FROM test.meters", + "Enabled": true, + "TestMode": false, + "TestQueriesDuringSync": true, + "InProgressCapsulesEnabled": false, + "Variables": null, + "Properties": [ + { + "Name": "Name", + "Value": "Voltage", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Interpolation Method", + "Value": "linear", + "Sql": null, + "Uom": "string" + }, + { + "Name": "Maximum Interpolation", + "Value": "2day", + "Sql": null, + "Uom": "string" + } + ], + "CapsuleProperties": null + } + ], + "Type": "GENERIC", + "Hostname": null, + "Port": 0, + "DatabaseName": null, + "Username": "root", + "Password": "taosdata", + "InitialSql": null, + "TimeZone": null, + "PrintRows": false, + "UseWindowsAuth": false, + "SqlFetchBatchSize": 100000, + "UseSSL": false, + "JdbcProperties": null, + "GenericDatabaseConfig": { + "DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX", + "SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver", + "ResolutionInNanoseconds": 1000, + "ZonedColumnTypes": [] + } +} +``` + +#### Seeq Workbench with TDengine Cloud data source example + +![Seeq workbench with TDengine Cloud](./seeq/seeq-workbench-with-tdengine-cloud.webp) + +## Conclusion + +By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users. + +This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions. + +Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications. diff --git a/docs/en/20-third-party/seeq/seeq-demo-schema.webp b/docs/en/20-third-party/seeq/seeq-demo-schema.webp new file mode 100644 index 0000000000..3f603749ad Binary files /dev/null and b/docs/en/20-third-party/seeq/seeq-demo-schema.webp differ diff --git a/docs/en/20-third-party/seeq/seeq-demo-workbench.webp b/docs/en/20-third-party/seeq/seeq-demo-workbench.webp new file mode 100644 index 0000000000..5c3d7a4158 Binary files /dev/null and b/docs/en/20-third-party/seeq/seeq-demo-workbench.webp differ diff --git a/docs/en/20-third-party/seeq/seeq-forecast-result.webp b/docs/en/20-third-party/seeq/seeq-forecast-result.webp new file mode 100644 index 0000000000..13144207eb Binary files /dev/null and b/docs/en/20-third-party/seeq/seeq-forecast-result.webp differ diff --git a/docs/en/20-third-party/seeq/seeq-workbench-with-tdengine-cloud.webp b/docs/en/20-third-party/seeq/seeq-workbench-with-tdengine-cloud.webp new file mode 100644 index 0000000000..f486c3ea29 Binary files /dev/null and b/docs/en/20-third-party/seeq/seeq-workbench-with-tdengine-cloud.webp differ diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index c02b3227ca..6f863d8c25 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t import Release from "/components/ReleaseV3"; +## 3.1.1.0 + + + ## 3.1.0.3 diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 0ff00d1710..64e9a3daed 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -1004,7 +1004,7 @@ TaosConsumer consumer = new TaosConsumer<>(config); - httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。 - messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。 - httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。 - 其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group) + 其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group) #### 订阅消费数据 @@ -1082,7 +1082,7 @@ consumer.unsubscribe(); consumer.close() ``` -详情请参考:[数据订阅](../../../develop/tmq) +详情请参考:[数据订阅](../../develop/tmq) #### 完整示例 @@ -1373,7 +1373,7 @@ public static void main(String[] args) throws Exception { **解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。 -其它问题请参考 [FAQ](../../../train-faq/faq) +其它问题请参考 [FAQ](../../train-faq/faq) ## API 参考 diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx index 3e51aa72bb..018552117e 100644 --- a/docs/zh/08-connector/26-rust.mdx +++ b/docs/zh/08-connector/26-rust.mdx @@ -352,7 +352,7 @@ client.put(&sml_data)? ### 数据订阅 -TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。 +TDengine 通过消息队列 [TMQ](../../taos-sql/tmq/) 启动一个订阅。 #### 创建 Topic @@ -491,7 +491,7 @@ let taos = pool.get()?; ## 常见问题 -请参考 [FAQ](../../../train-faq/faq) +请参考 [FAQ](../../train-faq/faq) ## API 参考 diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index 8be3d4d0a8..cc9f467138 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -11,7 +11,11 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能 ## 安装 -- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark +taosBenchmark 有两种安装方式: + +- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../get-started/)。 + +- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 ## 运行 diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md index 7e870def72..92c85e7a2c 100644 --- a/docs/zh/14-reference/14-taosKeeper.md +++ b/docs/zh/14-reference/14-taosKeeper.md @@ -13,7 +13,12 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的 ## 安装 -- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper +taosKeeper 有两种安装方式: +taosKeeper 安装方式: + +- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../get-started/)。 + +- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。 ## 配置和运行方式 diff --git a/docs/zh/17-operation/07-cluster.md b/docs/zh/17-operation/07-cluster.md deleted file mode 100644 index cf4bfafd53..0000000000 --- a/docs/zh/17-operation/07-cluster.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 集群运维 -description: TDengine 提供了多种集群运维手段以使集群运行更健康更高效 ---- - -为了使集群运行更健康更高效,TDengine 企业版提供了一些运维手段来帮助系统管理员更好地运维集群。 - -## 数据重整 - -TDengine 面向多种写入场景,在有些写入场景下,TDengine 的存储会导致数据存储的放大或数据文件的空洞等。这一方面影响数据的存储效率,另一方面也会影响查询效率。为了解决上述问题,TDengine 企业版提供了对数据的重整功能,即 DATA COMPACT 功能,将存储的数据文件重新整理,删除文件空洞和无效数据,提高数据的组织度,从而提高存储和查询的效率。 - -**语法** - -```sql -COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; -``` - -**效果** - -- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件 -- COMPCAT 会删除被删除数据以及被删除的表的数据 -- COMPACT 会合并多个 STT 文件 -- 可通过 start with 关键字指定 COMPACT 数据的起始时间 -- 可通过 end with 关键字指定 COMPACT 数据的终止时间 - -**补充说明** - -- COMPACT 为异步,执行 COMPACT 命令后不会等 COMPACT 结束就会返回。如果上一个 COMPACT 没有完成则再发起一个 COMPACT 任务,则会等上一个任务完成后再返回。 -- COMPACT 可能阻塞写入,但不阻塞查询 -- COMPACT 的进度不可观测 - -## 集群负载再平衡 - -当多副本集群中的一个或多个节点因为升级或其它原因而重启后,有可能出现集群中各个 dnode 负载不均衡的现象,极端情况下会出现所有 vgroup 的 leader 都位于同一个 dnode 的情况。为了解决这个问题,可以使用下面的命令 - -```sql -balance vgroup leader; -``` - -**功能** - -让所有的 vgroup 的 leade r在各自的replica节点上均匀分布。这个命令会让 vgroup 强制重新选举,通过重新选举,在选举的过程中,变换 vgroup 的leader,通过这个方式,最终让leader均匀分布。 - -**注意** - -Raft选举本身带有随机性,所以通过选举的重新分布产生的均匀分布也是带有一定的概率,不会完全的均匀。**该命令的副作用是影响查询和写入**,在vgroup重新选举时,从开始选举到选举出新的 leader 这段时间,这 个vgroup 无法写入和查询。选举过程一般在秒级完成。所有的vgroup会依次逐个重新选举。 - -## 恢复数据节点 - -在多节点三副本的集群环境中,如果某个 dnode 的磁盘损坏,该 dnode 会自动退出,但集群中其它的 dnode 仍然能够继续提供写入和查询服务。 - -在更换了损坏的磁盘后,如果想要让曾经主动退出的 dnode 重新加入集群提供服务,可以通过 `restore dnode` 命令来恢复该数据节点上的部分或全部逻辑节点,该功能依赖多副本中的其它副本进行数据复制,所以只在集群中 dnode 数量大于等于 3 且副本数为 3 的情况下能够工作。 - - -```sql -restore dnode ;# 恢复dnode上的mnode,所有vnode和qnode -restore mnode on dnode ;# 恢复dnode上的mnode -restore vnode on dnode ;# 恢复dnode上的所有vnode -restore qnode on dnode ;# 恢复dnode上的qnode -``` - -**限制** -- 该功能是基于已有的复制功能的恢复,不是灾难恢复或者备份恢复,所以对于要恢复的 mnode 和 vnode来说,使用该命令的前提是还存在该 mnode 或 vnode 的其它两个副本仍然能够正常工作。 -- 该命令不能修复数据目录中的个别文件的损坏或者丢失。例如,如果某个 mnode 或者 vnode 中的个别文件或数据损坏,无法单独恢复损坏的某个文件或者某块数据。此时,可以选择将该 mnode/vnode 的数据全部清空再进行恢复。 - - -## 虚拟组分裂 (Scale Out) - -当一个 vgroup 因为子表数过多而导致 CPU 或 Disk 资源使用量负载过高时,增加 dnode 节点后,可通过 `split vgroup` 命令把该 vgroup 分裂为两个虚拟组。分裂完成后,新产生的两个 vgroup 承担原来由一个 vgroup 提供的读写服务。这也是 TDengine 为企业版用户提供的 scale out 集群的能力。 - -```sql -split vgroup -``` - -**注意** -- 单副本库虚拟组,在分裂完成后,历史时序数据总磁盘空间使用量,可能会翻倍。所以,在执行该操作之前,通过增加 dnode 节点方式,确保集群中有足够的 CPU 和磁盘资源,避免资源不足现象发生。 -- 该命令为 DB 级事务;执行过程,当前DB的其它管理事务将会被拒绝。集群中,其它DB不受影响。 -- 分裂任务执行过程中,可持续提供读写服务;期间,可能存在可感知的短暂的读写业务中断。 -- 在分裂过程中,不支持流和订阅。分裂结束后,历史 WAL 会清空。 -- 分裂过程中,可支持节点宕机重启容错;但不支持节点磁盘故障容错。 \ No newline at end of file diff --git a/docs/zh/17-operation/09-storage.md b/docs/zh/17-operation/09-storage.md deleted file mode 100644 index 185b2c40ec..0000000000 --- a/docs/zh/17-operation/09-storage.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: 多级存储 ---- - -## 多级存储 - -说明:多级存储功能仅企业版支持。 - -在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。 - -除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。 - -多级存储支持 3 级,每级最多可配置 16 个挂载点。 - -TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中): - -``` -dataDir [path] -``` - -- path: 挂载点的文件夹路径 -- level: 介质存储等级,取值为 0,1,2。 - 0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。 - 各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。 - 同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。 - 需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。 -- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。 - -在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式: - -``` -dataDir /mnt/data1 0 1 -dataDir /mnt/data2 0 0 -dataDir /mnt/data3 1 0 -dataDir /mnt/data4 1 0 -dataDir /mnt/data5 2 0 -dataDir /mnt/data6 2 0 -``` - -:::note - -1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。 -2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。 -3. 多级存储目前不支持删除已经挂载的硬盘的功能。 - -::: - -## 0 级负载均衡 - -在多级存储中,有且只有一个主挂载点,主挂载点承担了系统中最重要的元数据在座,同时各个 vnode 的主目录均存在于当前 dnode 主挂载点上,从而导致该 dnode 的写入性能受限于单个磁盘的 IO 吞吐能力。 - -从 TDengine 3.1.0.0 开始,如果一个 dnode 配置了多个 0 级挂载点,我们将该 dnode 上所有 vnode 的主目录均衡分布在所有的 0 级挂载点上,由这些 0 级挂载点共同承担写入负荷。在网络 I/O 及其它处理资源不成为瓶颈的情况下,通过优化集群配置,测试结果证明整个系统的写入能力和 0 级挂载点的数量呈现线性关系,即随着 0 级挂载点数量的增加,整个系统的写入能力也成倍增加。 - -## 同级挂载点选择策略 - -一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 `minDiskFreeSize`,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。 diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md index 64391916ab..3d8822fdae 100644 --- a/docs/zh/20-third-party/11-kafka.md +++ b/docs/zh/20-third-party/11-kafka.md @@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送 1. Linux 操作系统 2. 已安装 Java 8 和 Maven 3. 已安装 Git、curl、vi -4. 已安装并启动 TDengine。 +4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../get-started/) ## 安装 Kafka diff --git a/docs/zh/20-third-party/70-seeq.md b/docs/zh/20-third-party/70-seeq.md index 0bdf58955d..d5b7463925 100644 --- a/docs/zh/20-third-party/70-seeq.md +++ b/docs/zh/20-third-party/70-seeq.md @@ -14,7 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在 ### Seeq 安装方法 -从 (Seeq 官网)[https://www.seeq.com/customer-download]下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。 +从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。 ### Seeq Server 安装和启动 @@ -29,7 +29,7 @@ sudo seeq start ### Seeq Data Lab Server 安装和启动 -Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见(Seeq 官方文档)[https://support.seeq.com/space/KB/1034059842]。 +Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。 ``` tar xvf seeq-data-lab--64bit-linux.tar.gz @@ -51,7 +51,7 @@ sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab serve ## TDengine 本地实例安装方法 -请参考(官网文档)[https://docs.taosdata.com/get-started/package/]。 +请参考[官网文档](../../get-started)。 ## TDengine Cloud 访问方法 如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。 @@ -64,7 +64,7 @@ sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab serve sudo seeq config get Folders/Data ``` -2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为(3.2.4)[https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar],并拷贝至 data 存储位置的 plugins\lib 中。 +2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为[3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar),并拷贝至 data 存储位置的 plugins\lib 中。 3. 重新启动 seeq server @@ -91,7 +91,7 @@ CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT); ``` -!(Seeq demo schema)[./seeq/seeq-demo-schema.webp] +![Seeq demo schema](./seeq/seeq-demo-schema.webp) ### 构造数据方法 @@ -99,7 +99,8 @@ CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT); python mockdata.py taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);" ``` -源代码托管在(github 仓库)[https://github.com/sangshuduo/td-forecasting]。 + +源代码托管在[GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。 ### 使用 Seeq 进行数据分析 @@ -287,9 +288,9 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from #### 使用 Seeq Workbench -登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见(官方知识库)[https://support.seeq.com/space/KB/146440193/Seeq+Workbench]。 +登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见[官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。 -!(Seeq Workbench)[./seeq/seeq-demo-workbench.webp] +![Seeq Workbench](./seeq/seeq-demo-workbench.webp) #### 用 Seeq Data Lab Server 进行进一步的数据分析 @@ -358,7 +359,7 @@ plt.show() 运行程序输出结果: -!(Seeq forecast result)[./seeq/seeq-forecast-result.webp] +![Seeq forecast result](./seeq/seeq-forecast-result.webp) ### 配置 Seeq 数据源连接 TDengine Cloud @@ -426,7 +427,7 @@ plt.show() #### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例 -!(Seeq workbench with TDengine cloud)[./seeq/seeq-workbench-with-tdengine-cloud.webp] +![Seeq workbench with TDengine cloud](./seeq/seeq-workbench-with-tdengine-cloud.webp) ## 方案总结 diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index d4e4b116b7..89bb8aaf8f 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.1.1.0 + + + ## 3.1.0.3 diff --git a/include/client/taos.h b/include/client/taos.h index 5b7946c9ad..a640e12ac5 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -268,6 +268,13 @@ typedef enum tmq_conf_res_t { TMQ_CONF_OK = 0, } tmq_conf_res_t; +typedef enum tmq_res_t { + TMQ_RES_INVALID = -1, + TMQ_RES_DATA = 1, + TMQ_RES_TABLE_META = 2, + TMQ_RES_METADATA = 3, +} tmq_res_t; + typedef struct tmq_topic_assignment { int32_t vgId; int64_t currentOffset; @@ -302,6 +309,8 @@ DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1 DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); +DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); +DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res); DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); @@ -309,34 +318,22 @@ DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------------ TAOSX -----------------------------------*/ -// note: following apis are unstable -enum tmq_res_t { - TMQ_RES_INVALID = -1, - TMQ_RES_DATA = 1, - TMQ_RES_TABLE_META = 2, - TMQ_RES_METADATA = 3, -}; - typedef struct tmq_raw_data { void *raw; uint32_t raw_len; uint16_t raw_type; } tmq_raw_data; -typedef enum tmq_res_t tmq_res_t; - -DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); -DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw); DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw); DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname); DLL_EXPORT int taos_write_raw_block_with_fields(TAOS *taos, int rows, char *pData, const char *tbname, TAOS_FIELD *fields, int numFields); DLL_EXPORT void tmq_free_raw(tmq_raw_data raw); + // Returning null means error. Returned result need to be freed by tmq_free_json_meta DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); DLL_EXPORT void tmq_free_json_meta(char *jsonMeta); - /* ---------------------------- TAOSX END -------------------------------- */ typedef enum { diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h index 7747df8595..4312da6f2c 100644 --- a/include/libs/stream/streamState.h +++ b/include/libs/stream/streamState.h @@ -29,54 +29,6 @@ extern "C" { #include "storageapi.h" -// void* streamBackendInit(const char* path); -// void streamBackendCleanup(void* arg); -// SListNode* streamBackendAddCompare(void* backend, void* arg); -// void streamBackendDelCompare(void* backend, void* arg); - -// <<<<<<< HEAD -// typedef struct STdbState { -// rocksdb_t* rocksdb; -// rocksdb_column_family_handle_t** pHandle; -// rocksdb_writeoptions_t* writeOpts; -// rocksdb_readoptions_t* readOpts; -// rocksdb_options_t** cfOpts; -// rocksdb_options_t* dbOpt; -// struct SStreamTask* pOwner; -// void* param; -// void* env; -// SListNode* pComparNode; -// void* pBackend; -// char idstr[64]; -// void* compactFactory; -// TdThreadRwlock rwLock; -// ======= -// typedef struct STdbState { -// rocksdb_t* rocksdb; -// rocksdb_column_family_handle_t** pHandle; -// rocksdb_writeoptions_t* writeOpts; -// rocksdb_readoptions_t* readOpts; -// rocksdb_options_t** cfOpts; -// rocksdb_options_t* dbOpt; -// struct SStreamTask* pOwner; -// void* param; -// void* env; -// SListNode* pComparNode; -// void* pBackendHandle; -// char idstr[64]; -// void* compactFactory; -// -// TDB* db; -// TTB* pStateDb; -// TTB* pFuncStateDb; -// TTB* pFillStateDb; // todo refactor -// TTB* pSessionStateDb; -// TTB* pParNameDb; -// TTB* pParTagDb; -// TXN* txn; -//} STdbState; -//>>>>>>> enh/dev3.0 - SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages); void streamStateClose(SStreamState* pState, bool remove); int32_t streamStateBegin(SStreamState* pState); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 99160f1519..ea3524d12d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -41,16 +41,16 @@ enum { STREAM_STATUS__PAUSE, }; -enum { +typedef enum ETaskStatus { TASK_STATUS__NORMAL = 0, TASK_STATUS__DROPPING, - TASK_STATUS__FAIL, + TASK_STATUS__UNINIT, // not used, an placeholder TASK_STATUS__STOP, TASK_STATUS__SCAN_HISTORY, // stream task scan history data by using tsdbread in the stream scanner TASK_STATUS__HALT, // pause, but not be manipulated by user command TASK_STATUS__PAUSE, // pause TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore -}; +} ETaskStatus; enum { TASK_SCHED_STATUS__INACTIVE = 1, diff --git a/include/os/osSysinfo.h b/include/os/osSysinfo.h index a6a3655a55..29b6f07dca 100644 --- a/include/os/osSysinfo.h +++ b/include/os/osSysinfo.h @@ -39,7 +39,7 @@ int64_t taosGetOsUptime(); int32_t taosGetEmail(char *email, int32_t maxLen); int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t maxLen); int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores); -int32_t taosGetCpuCores(float *numOfCores); +int32_t taosGetCpuCores(float *numOfCores, bool physical); void taosGetCpuUsage(double *cpu_system, double *cpu_engine); int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma); int32_t taosGetTotalMemory(int64_t *totalKB); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index e861bd4b92..c4021e5302 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -152,7 +152,6 @@ typedef struct { int32_t vgId; int32_t vgStatus; int32_t vgSkipCnt; // here used to mark the slow vgroups -// bool receivedInfoFromVnode; // has already received info from vnode int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp. SEpSet epSet; @@ -190,17 +189,13 @@ typedef struct { typedef struct { int64_t refId; - int32_t epoch; void* pParam; __tmq_askep_fn_t pUserFn; } SMqAskEpCbParam; typedef struct { int64_t refId; - int32_t epoch; char topicName[TSDB_TOPIC_FNAME_LEN]; -// SMqClientVg* pVg; -// SMqClientTopic* pTopic; int32_t vgId; uint64_t requestId; // request id for debug purpose } SMqPollCbParam; @@ -237,7 +232,6 @@ typedef struct { int64_t refId; int32_t epoch; int32_t waitingRspNum; - int32_t totalRspNum; int32_t code; tmq_commit_cb* callbackFn; /*SArray* successfulOffsets;*/ @@ -445,7 +439,6 @@ static int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) { SMqCommitCbParam* pParam = (SMqCommitCbParam*)param; SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params; -// taosMemoryFree(pParam->pOffset); taosMemoryFree(pBuf->pData); taosMemoryFree(pBuf->pEpSet); @@ -516,14 +509,14 @@ static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffse pMsgSendInfo->fp = tmqCommitCb; pMsgSendInfo->msgType = TDMT_VND_TMQ_COMMIT_OFFSET; - atomic_add_fetch_32(&pParamSet->waitingRspNum, 1); - atomic_add_fetch_32(&pParamSet->totalRspNum, 1); - - SEp* pEp = GET_ACTIVE_EP(epSet); - - int64_t transporterId = 0; - return asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, pMsgSendInfo); + atomic_add_fetch_32(&pParamSet->waitingRspNum, 1); + code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, pMsgSendInfo); + if(code != 0){ + atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1); + return code; + } + return code; } static SMqClientTopic* getTopicByName(tmq_t* tmq, const char* pTopicName) { @@ -556,8 +549,6 @@ static SMqCommitCbParamSet* prepareCommitCbParamSet(tmq_t* tmq, tmq_commit_cb* p return pParamSet; } - - static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClientVg** pVg){ SMqClientTopic* pTopic = getTopicByName(tmq, pTopicName); if (pTopic == NULL) { @@ -578,11 +569,10 @@ static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClient } static int32_t asyncCommitOffset(tmq_t* tmq, char* pTopicName, int32_t vgId, STqOffsetVal* offsetVal, tmq_commit_cb* pCommitFp, void* userParam) { - int32_t code = 0; tscInfo("consumer:0x%" PRIx64 " do manual commit offset for %s, vgId:%d", tmq->consumerId, pTopicName, vgId); taosRLockLatch(&tmq->lock); SMqClientVg* pVg = NULL; - code = getClientVg(tmq, pTopicName, vgId, &pVg); + int32_t code = getClientVg(tmq, pTopicName, vgId, &pVg); if(code != 0){ goto end; } @@ -711,7 +701,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us tscInfo("consumer:0x%" PRIx64 " total commit:%d for %d topics", tmq->consumerId, pParamSet->waitingRspNum - 1, numOfTopics); // request is sent - if (pParamSet->totalRspNum != 0) { + if (pParamSet->waitingRspNum != 1) { // count down since waiting rsp num init as 1 commitRspCountDown(pParamSet, tmq->consumerId, "", 0); return; @@ -750,20 +740,6 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) { taosMemoryFree(param); } -//void tmqAssignDelayedReportTask(void* param, void* tmrId) { -// int64_t refId = *(int64_t*)param; -// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); -// if (tmq != NULL) { -// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); -// *pTaskType = TMQ_DELAYED_TASK__REPORT; -// taosWriteQitem(tmq->delayedTask, pTaskType); -// tsem_post(&tmq->rspSem); -// } -// -// taosReleaseRef(tmqMgmt.rsetId, refId); -// taosMemoryFree(param); -//} - int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { if (pMsg) { taosMemoryFree(pMsg->pData); @@ -1092,7 +1068,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->status = TMQ_CONSUMER_STATUS__INIT; pTmq->pollCnt = 0; pTmq->epoch = 0; -// pTmq->needReportOffsetRows = true; // set conf strcpy(pTmq->clientId, conf->clientId); @@ -1153,7 +1128,7 @@ _failed: } int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { - if(tmq == NULL) return TSDB_CODE_INVALID_PARA; + if(tmq == NULL || topic_list == NULL) return TSDB_CODE_INVALID_PARA; const int32_t MAX_RETRY_COUNT = 120 * 2; // let's wait for 2 mins at most const SArray* container = &topic_list->container; int32_t sz = taosArrayGetSize(container); @@ -1229,7 +1204,10 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp); int64_t transporterId = 0; - asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); + code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); + if(code != 0){ + goto FAIL; + } // avoid double free if msg is sent buf = NULL; @@ -1246,7 +1224,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { int32_t retryCnt = 0; while (TSDB_CODE_MND_CONSUMER_NOT_READY == doAskEp(tmq)) { if (retryCnt++ > MAX_RETRY_COUNT) { - tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt); + tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry more than 2 minutes", tmq->consumerId); code = TSDB_CODE_MND_CONSUMER_NOT_READY; goto FAIL; } @@ -1311,54 +1289,50 @@ static SMqClientTopic* getTopicInfo(tmq_t* tmq, char* topicName){ return NULL; } +static void setVgIdle(tmq_t* tmq, char* topicName, int32_t vgId){ + taosWLockLatch(&tmq->lock); + SMqClientVg* pVg = getVgInfo(tmq, topicName, vgId); + if(pVg){ + atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); + } + taosWUnLockLatch(&tmq->lock); +} + int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { SMqPollCbParam* pParam = (SMqPollCbParam*)param; - - int64_t refId = pParam->refId; -// SMqClientVg* pVg = pParam->pVg; -// SMqClientTopic* pTopic = pParam->pTopic; - + int64_t refId = pParam->refId; + int32_t vgId = pParam->vgId; + uint64_t requestId = pParam->requestId; tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); if (tmq == NULL) { taosMemoryFree(pParam); - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); + taosMemoryFreeClear(pMsg->pData); + taosMemoryFreeClear(pMsg->pEpSet); terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED; return -1; } - int32_t epoch = pParam->epoch; - int32_t vgId = pParam->vgId; - uint64_t requestId = pParam->requestId; - if (code != 0) { - if (pMsg->pData) taosMemoryFree(pMsg->pData); - if (pMsg->pEpSet) taosMemoryFree(pMsg->pEpSet); - - // in case of consumer mismatch, wait for 500ms and retry if (code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) { -// taosMsleep(500); atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__RECOVER); - tscDebug("consumer:0x%" PRIx64 " wait for the re-balance, wait for 500ms and set status to be RECOVER", + tscDebug("consumer:0x%" PRIx64 " wait for the re-balance, set status to be RECOVER", tmq->consumerId); } else if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) { SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0); if (pRspWrapper == NULL) { - tscWarn("consumer:0x%" PRIx64 " msg from vgId:%d discarded, epoch %d since out of memory, reqId:0x%" PRIx64, - tmq->consumerId, vgId, epoch, requestId); - goto CREATE_MSG_FAIL; + tscWarn("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since out of memory, reqId:0x%" PRIx64, + tmq->consumerId, vgId, requestId); + goto END; } pRspWrapper->tmqRspType = TMQ_MSG_TYPE__END_RSP; taosWriteQitem(tmq->mqueue, pRspWrapper); -// } else if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) { // poll data while insert -// taosMsleep(5); } else{ - tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, epoch %d, since %s, reqId:0x%" PRIx64, tmq->consumerId, - vgId, epoch, tstrerror(code), requestId); + tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since %s, reqId:0x%" PRIx64, tmq->consumerId, + vgId, tstrerror(code), requestId); } - goto CREATE_MSG_FAIL; + goto END; } int32_t msgEpoch = ((SMqRspHead*)pMsg->pData)->epoch; @@ -1368,43 +1342,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tscWarn("consumer:0x%" PRIx64 " msg discard from vgId:%d since from earlier epoch, rsp epoch %d, current epoch %d, reqId:0x%" PRIx64, tmq->consumerId, vgId, msgEpoch, clientEpoch, requestId); - - tsem_post(&tmq->rspSem); - taosReleaseRef(tmqMgmt.rsetId, refId); - - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); - taosMemoryFree(pParam); - - return 0; + code = -1; + goto END; } - if (msgEpoch != clientEpoch) { - tscWarn("consumer:0x%" PRIx64 " mismatch rsp from vgId:%d, epoch %d, current epoch %d, reqId:0x%" PRIx64, - tmq->consumerId, vgId, msgEpoch, clientEpoch, requestId); - } + ASSERT(msgEpoch == clientEpoch); // handle meta rsp int8_t rspType = ((SMqRspHead*)pMsg->pData)->mqMsgType; SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0); if (pRspWrapper == NULL) { - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); - tscWarn("consumer:0x%" PRIx64 " msg discard from vgId:%d, epoch %d since out of memory", tmq->consumerId, vgId, - epoch); - goto CREATE_MSG_FAIL; + tscWarn("consumer:0x%" PRIx64 " msg discard from vgId:%d, since out of memory", tmq->consumerId, vgId); + code = TSDB_CODE_OUT_OF_MEMORY; + goto END; } pRspWrapper->tmqRspType = rspType; -// pRspWrapper->vgHandle = pVg; -// pRspWrapper->topicHandle = pTopic; pRspWrapper->reqId = requestId; pRspWrapper->pEpset = pMsg->pEpSet; + pMsg->pEpSet = NULL; pRspWrapper->vgId = vgId; strcpy(pRspWrapper->topicName, pParam->topicName); - pMsg->pEpSet = NULL; if (rspType == TMQ_MSG_TYPE__POLL_DATA_RSP) { SDecoder decoder; tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead)); @@ -1432,34 +1392,24 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tscError("consumer:0x%" PRIx64 " invalid rsp msg received, type:%d ignored", tmq->consumerId, rspType); } - taosMemoryFree(pMsg->pData); taosWriteQitem(tmq->mqueue, pRspWrapper); int32_t total = taosQueueItemSize(tmq->mqueue); tscDebug("consumer:0x%" PRIx64 " put poll res into mqueue, type:%d, vgId:%d, total in queue:%d, reqId:0x%" PRIx64, tmq->consumerId, rspType, vgId, total, requestId); - tsem_post(&tmq->rspSem); - taosReleaseRef(tmqMgmt.rsetId, refId); - taosMemoryFree(pParam); - - return 0; - -CREATE_MSG_FAIL: - if (epoch == tmq->epoch) { - taosWLockLatch(&tmq->lock); - SMqClientVg* pVg = getVgInfo(tmq, pParam->topicName, vgId); - if(pVg){ - atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); - } - taosWUnLockLatch(&tmq->lock); +END: + if(code != 0){ + setVgIdle(tmq, pParam->topicName, vgId); } tsem_post(&tmq->rspSem); taosReleaseRef(tmqMgmt.rsetId, refId); taosMemoryFree(pParam); + taosMemoryFreeClear(pMsg->pData); + taosMemoryFreeClear(pMsg->pEpSet); - return -1; + return code; } typedef struct SVgroupSaveInfo { @@ -1467,6 +1417,7 @@ typedef struct SVgroupSaveInfo { STqOffsetVal commitOffset; STqOffsetVal seekOffset; int64_t numOfRows; + int32_t vgStatus; } SVgroupSaveInfo; static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopicEp, SHashObj* pVgOffsetHashMap, @@ -1475,7 +1426,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic pTopicEp->schema.nCols = 0; pTopicEp->schema.pSchema = NULL; - char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; + char vgKey[TSDB_TOPIC_FNAME_LEN + 22] = {0}; int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs); tstrncpy(pTopic->topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN); @@ -1497,7 +1448,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic .pollCnt = 0, .vgId = pVgEp->vgId, .epSet = pVgEp->epSet, - .vgStatus = TMQ_VG_STATUS__IDLE, + .vgStatus = pInfo ? pInfo->vgStatus : TMQ_VG_STATUS__IDLE, .vgSkipCnt = 0, .emptyBlockReceiveTs = 0, .numOfRows = pInfo ? pInfo->numOfRows : 0, @@ -1509,7 +1460,6 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic clientVg.offsetInfo.walVerBegin = -1; clientVg.offsetInfo.walVerEnd = -1; clientVg.seekUpdated = false; -// clientVg.receivedInfoFromVnode = false; taosArrayPush(pTopic->vgs, &clientVg); } @@ -1546,10 +1496,9 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) taosWLockLatch(&tmq->lock); int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); - char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; + char vgKey[TSDB_TOPIC_FNAME_LEN + 22] = {0}; tscInfo("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur); - // todo extract method for (int32_t i = 0; i < topicNumCur; i++) { // find old topic SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i); @@ -1565,7 +1514,9 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) tscInfo("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId, vgKey, buf); - SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.endOffset, .seekOffset = pVgCur->offsetInfo.beginOffset, .commitOffset = pVgCur->offsetInfo.committedOffset, .numOfRows = pVgCur->numOfRows}; + SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.endOffset, .seekOffset = pVgCur->offsetInfo.beginOffset, + .commitOffset = pVgCur->offsetInfo.committedOffset, .numOfRows = pVgCur->numOfRows, + .vgStatus = pVgCur->vgStatus}; taosHashPut(pVgOffsetHashMap, vgKey, strlen(vgKey), &info, sizeof(SVgroupSaveInfo)); } } @@ -1598,32 +1549,17 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) { SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param; tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId); - if (tmq == NULL) { - terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED; -// pParam->pUserFn(tmq, terrno, NULL, pParam->pParam); - - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); - taosMemoryFree(pParam); - return terrno; + code = TSDB_CODE_TMQ_CONSUMER_CLOSED; + goto END; } if (code != TSDB_CODE_SUCCESS) { tscError("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); - pParam->pUserFn(tmq, code, NULL, pParam->pParam); - taosReleaseRef(tmqMgmt.rsetId, pParam->refId); - - taosMemoryFree(pMsg->pData); - taosMemoryFree(pMsg->pEpSet); - taosMemoryFree(pParam); - return code; + goto END; } - // tmq's epoch is monotonically increase, - // so it's safe to discard any old epoch msg. - // Epoch will only increase when received newer epoch ep msg SMqRspHead* head = pMsg->pData; int32_t epoch = atomic_load_32(&tmq->epoch); if (head->epoch <= epoch) { @@ -1642,10 +1578,10 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) { tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId, head->epoch, epoch); } - - pParam->pUserFn(tmq, code, pMsg, pParam->pParam); taosReleaseRef(tmqMgmt.rsetId, pParam->refId); +END: + pParam->pUserFn(tmq, code, pMsg, pParam->pParam); taosMemoryFree(pMsg->pEpSet); taosMemoryFree(pMsg->pData); taosMemoryFree(pParam); @@ -1734,50 +1670,46 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClie return pRspObj; } -static int32_t handleErrorBeforePoll(SMqClientVg* pVg, tmq_t* pTmq) { - atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); - tsem_post(&pTmq->rspSem); - return -1; -} - static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* pVg, int64_t timeout) { SMqPollReq req = {0}; + char* msg = NULL; + SMqPollCbParam* pParam = NULL; + SMsgSendInfo* sendInfo = NULL; + int code = 0; tmqBuildConsumeReqImpl(&req, pTmq, timeout, pTopic, pVg); int32_t msgSize = tSerializeSMqPollReq(NULL, 0, &req); - if (msgSize < 0) { - return handleErrorBeforePoll(pVg, pTmq); + if (msgSize < 0){ + code = TSDB_CODE_INVALID_MSG; + goto FAIL; } - char* msg = taosMemoryCalloc(1, msgSize); + msg = taosMemoryCalloc(1, msgSize); if (NULL == msg) { - return handleErrorBeforePoll(pVg, pTmq); + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; } if (tSerializeSMqPollReq(msg, msgSize, &req) < 0) { - taosMemoryFree(msg); - return handleErrorBeforePoll(pVg, pTmq); + code = TSDB_CODE_INVALID_MSG; + goto FAIL; } - SMqPollCbParam* pParam = taosMemoryMalloc(sizeof(SMqPollCbParam)); + pParam = taosMemoryMalloc(sizeof(SMqPollCbParam)); if (pParam == NULL) { - taosMemoryFree(msg); - return handleErrorBeforePoll(pVg, pTmq); + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; } pParam->refId = pTmq->refId; - pParam->epoch = pTmq->epoch; -// pParam->pVg = pVg; // pVg may be released,fix it -// pParam->pTopic = pTopic; strcpy(pParam->topicName, pTopic->topicName); pParam->vgId = pVg->vgId; pParam->requestId = req.reqId; - SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); if (sendInfo == NULL) { - taosMemoryFree(pParam); - taosMemoryFree(msg); - return handleErrorBeforePoll(pVg, pTmq); + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; } sendInfo->msgInfo = (SDataBuf){.pData = msg, .len = msgSize, .handle = NULL}; @@ -1793,13 +1725,21 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId, pTopic->topicName, pVg->vgId, pTmq->epoch, offsetFormatBuf, req.reqId); - asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo); + code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo); + if(code != 0){ + goto FAIL; + } pVg->pollCnt++; pVg->seekUpdated = false; // reset this flag. pTmq->pollCnt++; - return TSDB_CODE_SUCCESS; + return 0; + +FAIL: + taosMemoryFreeClear(pParam); + taosMemoryFreeClear(msg); + return code; } // broadcast the poll request to all related vnodes @@ -1836,6 +1776,8 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { atomic_store_32(&pVg->vgSkipCnt, 0); code = doTmqPollImpl(tmq, pTopic, pVg, timeout); if (code != TSDB_CODE_SUCCESS) { + atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); + tsem_post(&tmq->rspSem); goto end; } } @@ -1847,19 +1789,15 @@ end: return code; } -static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset) { +static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper) { if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__EP_RSP) { - /*printf("ep %d %d\n", rspMsg->head.epoch, tmq->epoch);*/ if (rspWrapper->epoch > atomic_load_32(&tmq->epoch)) { SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper; SMqAskEpRsp* rspMsg = &pEpRspWrapper->msg; doUpdateLocalEp(tmq, rspWrapper->epoch, rspMsg); - /*tmqClearUnhandleMsg(tmq);*/ tDeleteSMqAskEpRsp(rspMsg); - *pReset = true; } else { tmqFreeRspWrapper(rspWrapper); - *pReset = false; } } else { return -1; @@ -1882,10 +1820,9 @@ static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal // update the valid wal version range pVg->offsetInfo.walVerBegin = sver; pVg->offsetInfo.walVerEnd = ever + 1; -// pVg->receivedInfoFromVnode = true; } -static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { +static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) { tscDebug("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, tmq->qall->numOfItems); while (1) { @@ -1961,6 +1898,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { } else { tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->vgId, pDataRsp->head.epoch, consumerEpoch); + setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId); pRspWrapper = tmqFreeRspWrapper(pRspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1992,6 +1930,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { } else { tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch); + setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId); pRspWrapper = tmqFreeRspWrapper(pRspWrapper); taosFreeQitem(pollRspWrapper); } @@ -2036,12 +1975,12 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tmq->totalRows += numOfRows; - char buf[TSDB_OFFSET_LEN] = {0}; - tFormatOffset(buf, TSDB_OFFSET_LEN, &pVg->offsetInfo.endOffset); - tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64 - ", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64, - tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows, - tmq->totalRows, pollRspWrapper->reqId); + char buf[TSDB_OFFSET_LEN] = {0}; + tFormatOffset(buf, TSDB_OFFSET_LEN, &pVg->offsetInfo.endOffset); + tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64 + ", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64, + tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows, + tmq->totalRows, pollRspWrapper->reqId); taosFreeQitem(pollRspWrapper); taosWUnLockLatch(&tmq->lock); @@ -2049,19 +1988,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { } else { tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); + setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId); pRspWrapper = tmqFreeRspWrapper(pRspWrapper); taosFreeQitem(pollRspWrapper); } } else { tscDebug("consumer:0x%" PRIx64 " not data msg received", tmq->consumerId); - - bool reset = false; - tmqHandleNoPollRsp(tmq, pRspWrapper, &reset); + tmqHandleNoPollRsp(tmq, pRspWrapper); taosFreeQitem(pRspWrapper); - if (pollIfReset && reset) { - tscDebug("consumer:0x%" PRIx64 ", reset and repoll", tmq->consumerId); - tmqPollImpl(tmq, timeout); - } } } } @@ -2069,7 +2003,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { if(tmq == NULL) return NULL; - void* rspObj; + void* rspObj = NULL; int64_t startTime = taosGetTimestampMs(); tscInfo("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime, @@ -2101,7 +2035,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { tscError("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId); } - rspObj = tmqHandleAllRsp(tmq, timeout, false); + rspObj = tmqHandleAllRsp(tmq, timeout); if (rspObj) { tscDebug("consumer:0x%" PRIx64 " return rsp %p", tmq->consumerId, rspObj); return (TAOS_RES*)rspObj; @@ -2473,23 +2407,29 @@ end: } } -void updateEpCallbackFn(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* param) { +void defaultAskEpCb(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* param) { SAskEpInfo* pInfo = param; pInfo->code = code; - if (code == TSDB_CODE_SUCCESS) { - SMqRspHead* head = pDataBuf->pData; - - SMqAskEpRsp rsp; - tDecodeSMqAskEpRsp(POINTER_SHIFT(pDataBuf->pData, sizeof(SMqRspHead)), &rsp); - doUpdateLocalEp(pTmq, head->epoch, &rsp); - tDeleteSMqAskEpRsp(&rsp); + if (pTmq == NULL || code != TSDB_CODE_SUCCESS){ + goto END; } + SMqRspHead* head = pDataBuf->pData; + SMqAskEpRsp rsp; + tDecodeSMqAskEpRsp(POINTER_SHIFT(pDataBuf->pData, sizeof(SMqRspHead)), &rsp); + doUpdateLocalEp(pTmq, head->epoch, &rsp); + tDeleteSMqAskEpRsp(&rsp); + +END: tsem_post(&pInfo->sem); } void addToQueueCallbackFn(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* param) { + if (pTmq == NULL){ + terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED; + return; + } if (code != TSDB_CODE_SUCCESS) { terrno = code; return; @@ -2515,7 +2455,7 @@ int32_t doAskEp(tmq_t* pTmq) { SAskEpInfo* pInfo = taosMemoryMalloc(sizeof(SAskEpInfo)); tsem_init(&pInfo->sem, 0, 0); - asyncAskEp(pTmq, updateEpCallbackFn, pInfo); + asyncAskEp(pTmq, defaultAskEpCb, pInfo); tsem_wait(&pInfo->sem); int32_t code = pInfo->code; @@ -2529,49 +2469,45 @@ void asyncAskEp(tmq_t* pTmq, __tmq_askep_fn_t askEpFn, void* param) { req.consumerId = pTmq->consumerId; req.epoch = pTmq->epoch; strcpy(req.cgroup, pTmq->groupId); + int code = 0; + SMqAskEpCbParam* pParam = NULL; + void* pReq = NULL; int32_t tlen = tSerializeSMqAskEpReq(NULL, 0, &req); if (tlen < 0) { tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq failed", pTmq->consumerId); - askEpFn(pTmq, TSDB_CODE_INVALID_PARA, NULL, param); - return; + code = TSDB_CODE_INVALID_PARA; + goto FAIL; } - void* pReq = taosMemoryCalloc(1, tlen); + pReq = taosMemoryCalloc(1, tlen); if (pReq == NULL) { tscError("consumer:0x%" PRIx64 ", failed to malloc askEpReq msg, size:%d", pTmq->consumerId, tlen); - askEpFn(pTmq, TSDB_CODE_OUT_OF_MEMORY, NULL, param); - return; + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; } if (tSerializeSMqAskEpReq(pReq, tlen, &req) < 0) { tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq %d failed", pTmq->consumerId, tlen); - taosMemoryFree(pReq); - - askEpFn(pTmq, TSDB_CODE_INVALID_PARA, NULL, param); - return; + code = TSDB_CODE_INVALID_PARA; + goto FAIL; } - SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam)); + pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam)); if (pParam == NULL) { tscError("consumer:0x%" PRIx64 ", failed to malloc subscribe param", pTmq->consumerId); - taosMemoryFree(pReq); - - askEpFn(pTmq, TSDB_CODE_OUT_OF_MEMORY, NULL, param); - return; + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; } pParam->refId = pTmq->refId; - pParam->epoch = pTmq->epoch; pParam->pUserFn = askEpFn; pParam->pParam = param; SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); if (sendInfo == NULL) { - taosMemoryFree(pParam); - taosMemoryFree(pReq); - askEpFn(pTmq, TSDB_CODE_OUT_OF_MEMORY, NULL, param); - return; + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; } sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = tlen, .handle = NULL}; @@ -2586,7 +2522,15 @@ void asyncAskEp(tmq_t* pTmq, __tmq_askep_fn_t askEpFn, void* param) { tscInfo("consumer:0x%" PRIx64 " ask ep from mnode, reqId:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId); int64_t transporterId = 0; - asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); + code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); + if(code == 0){ + return; + } + +FAIL: + taosMemoryFreeClear(pParam); + taosMemoryFreeClear(pReq); + askEpFn(pTmq, code, NULL, param); } int32_t makeTopicVgroupKey(char* dst, const char* topicName, int32_t vg) { @@ -2609,8 +2553,6 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) { } taosMemoryFree(pParamSet); -// tmq->needReportOffsetRows = true; - taosReleaseRef(tmqMgmt.rsetId, refId); return 0; } @@ -2783,7 +2725,14 @@ int64_t getCommittedFromServer(tmq_t *tmq, char* tname, int32_t vgId, SEpSet* ep sendInfo->msgType = TDMT_VND_TMQ_VG_COMMITTEDINFO; int64_t transporterId = 0; - asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, sendInfo); + code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, sendInfo); + if(code != 0){ + taosMemoryFree(buf); + taosMemoryFree(sendInfo); + tsem_destroy(&pParam->sem); + taosMemoryFree(pParam); + return code; + } tsem_wait(&pParam->sem); code = pParam->code; @@ -3042,7 +2991,12 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, tmq->consumerId, pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId); - asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo); + code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo); + if(code != 0){ + taosMemoryFree(pParam); + taosMemoryFree(msg); + goto end; + } } tsem_wait(&pCommon->rsp); @@ -3192,7 +3146,14 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_ sendInfo->msgType = TDMT_VND_TMQ_SEEK; int64_t transporterId = 0; - asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); + code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); + if(code != 0){ + taosMemoryFree(msg); + taosMemoryFree(sendInfo); + tsem_destroy(&pParam->sem); + taosMemoryFree(pParam); + return code; + } tsem_wait(&pParam->sem); code = pParam->code; diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index 7b8f0e67fb..f05f7dc3f9 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -2460,10 +2460,10 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0); if (code) goto _exit; } else { - if (ASSERT(varDataTLen(data + offset) <= bytes)) { + if (varDataTLen(data + offset) > bytes) { uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset), bytes); - code = TSDB_CODE_INVALID_PARA; + code = TSDB_CODE_PAR_VALUE_TOO_LONG; goto _exit; } code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](pColData, (uint8_t *)varDataVal(data + offset), diff --git a/source/dnode/mgmt/node_util/src/dmUtil.c b/source/dnode/mgmt/node_util/src/dmUtil.c index 648a9ab9ce..bab7d068f3 100644 --- a/source/dnode/mgmt/node_util/src/dmUtil.c +++ b/source/dnode/mgmt/node_util/src/dmUtil.c @@ -56,7 +56,7 @@ void *dmSetMgmtHandle(SArray *pArray, tmsg_t msgType, void *nodeMsgFp, bool need void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) { taosGetCpuUsage(&pInfo->cpu_system, &pInfo->cpu_engine); - taosGetCpuCores(&pInfo->cpu_cores); + taosGetCpuCores(&pInfo->cpu_cores, false); taosGetProcMemory(&pInfo->mem_engine); taosGetSysMemory(&pInfo->mem_system); pInfo->mem_total = tsTotalMemoryKB; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index f25bd2cffb..7f96255b1e 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -401,7 +401,6 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName); if(pSub == NULL){ - ASSERT(0); continue; } taosWLockLatch(&pSub->lock); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index cbb7a27ef5..9a9dbf7c29 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1017,7 +1017,10 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { } code = mndSetDbCfgFromAlterDbReq(&dbObj, &alterReq); - if (code != 0) goto _OVER; + if (code != 0) { + if (code == TSDB_CODE_MND_DB_OPTION_UNCHANGED) code = 0; + goto _OVER; + } code = mndCheckInChangeDbCfg(pMnode, &dbObj.cfg); if (code != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 8484148642..431864ae97 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1255,12 +1255,12 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { sprintf(detail, "colVer:%d, delay1:%" PRId64 ", delay2:%" PRId64 ", deleteMark1:%" PRId64 ", " "deleteMark2:%" PRId64 ", igExists:%d, numOfColumns:%d, numOfFuncs:%d, numOfTags:%d, " "source:%d, suid:%" PRId64 ", tagVer:%d, ttl:%d, " - "watermark1:%" PRId64 ", watermark2:%" PRId64, - createReq.colVer, createReq.delay1, createReq.delay2, createReq.deleteMark1, + "watermark1:%" PRId64 ", watermark2:%" PRId64, + createReq.colVer, createReq.delay1, createReq.delay2, createReq.deleteMark1, createReq.deleteMark2, createReq.igExists, createReq.numOfColumns, createReq.numOfFuncs, createReq.numOfTags, createReq.source, createReq.suid, createReq.tagVer, createReq.ttl, createReq.watermark1, createReq.watermark2); - + mndAuditFieldStr(detail, createReq.pColumns, createReq.numOfColumns, AUDIT_DETAIL_MAX); mndAuditFieldStr(detail, createReq.pTags, createReq.numOfTags, AUDIT_DETAIL_MAX); @@ -2610,7 +2610,7 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { char detail[2000] = {0}; sprintf(detail, "igNotExists:%d, source:%d" , dropReq.igNotExists, dropReq.source); - + SName name = {0}; tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 9455aae8e3..66acbcc05b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -868,6 +868,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { mndTransDrop(pTrans); taosThreadMutexLock(&execNodeList.lock); + mDebug("register to stream task node list"); keepStreamTasksInBuf(&streamObj, &execNodeList); taosThreadMutexUnlock(&execNodeList.lock); @@ -876,13 +877,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { char detail[2000] = {0}; sprintf(detail, "checkpointFreq:%" PRId64 ", createStb:%d, deleteMark:%" PRId64 - ", " - "fillHistory:%d, igExists:%d, " - "igExpired:%d, igUpdate:%d, lastTs:%" PRId64 - ", " - "maxDelay:%" PRId64 - ", numOfTags:%d, sourceDB:%s, " - "targetStbFullName:%s, triggerType:%d, watermark:%" PRId64, + ", fillHistory:%d, igExists:%d, igExpired:%d, igUpdate:%d, lastTs:%" PRId64 ", maxDelay:%" PRId64 + ", numOfTags:%d, sourceDB:%s, targetStbFullName:%s, triggerType:%d, watermark:%" PRId64, createStreamReq.checkpointFreq, createStreamReq.createStb, createStreamReq.deleteMark, createStreamReq.fillHistory, createStreamReq.igExists, createStreamReq.igExpired, createStreamReq.igUpdate, createStreamReq.lastTs, createStreamReq.maxDelay, createStreamReq.numOfTags, createStreamReq.sourceDB, @@ -1579,8 +1575,8 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } else if (taskStatus == TASK_STATUS__DROPPING) { memcpy(varDataVal(status), "dropping", 8); varDataSetLen(status, 8); - } else if (taskStatus == TASK_STATUS__FAIL) { - memcpy(varDataVal(status), "fail", 4); + } else if (taskStatus == TASK_STATUS__UNINIT) { + memcpy(varDataVal(status), "uninit", 6); varDataSetLen(status, 4); } else if (taskStatus == TASK_STATUS__STOP) { memcpy(varDataVal(status), "stop", 4); @@ -2021,14 +2017,11 @@ static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgr static bool isNodeEpsetChanged(const SEpSet *pPrevEpset, const SEpSet *pCurrent) { const SEp *pEp = GET_ACTIVE_EP(pPrevEpset); + const SEp* p = GET_ACTIVE_EP(pCurrent); - for (int32_t i = 0; i < pCurrent->numOfEps; ++i) { - const SEp *p = &(pCurrent->eps[i]); - if (pEp->port == p->port && strncmp(pEp->fqdn, p->fqdn, TSDB_FQDN_LEN) == 0) { - return false; - } + if (pEp->port == p->port && strncmp(pEp->fqdn, p->fqdn, TSDB_FQDN_LEN) == 0) { + return false; } - return true; } @@ -2125,6 +2118,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange mDebug("stream:0x%" PRIx64 " involved node changed, create update trans", pStream->uid); int32_t code = createStreamUpdateTrans(pMnode, pStream, pChangeInfo); if (code != TSDB_CODE_SUCCESS) { + sdbCancelFetch(pSdb, pIter); return code; } } @@ -2228,18 +2222,22 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) { SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot); if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) { code = mndProcessVgroupChange(pMnode, &changeInfo); + + // keep the new vnode snapshot + if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) { + mDebug("create trans successfully, update cached node list"); + taosArrayDestroy(execNodeList.pNodeEntryList); + execNodeList.pNodeEntryList = pNodeSnapshot; + execNodeList.ts = ts; + } + } else { + mDebug("no update found in nodeList"); + taosArrayDestroy(pNodeSnapshot); } taosArrayDestroy(changeInfo.pUpdateNodeList); taosHashCleanup(changeInfo.pDBMap); - // keep the new vnode snapshot - if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) { - taosArrayDestroy(execNodeList.pNodeEntryList); - execNodeList.pNodeEntryList = pNodeSnapshot; - execNodeList.ts = ts; - } - mDebug("end to do stream task node change checking"); atomic_store_32(&mndNodeCheckSentinel, 0); return 0; @@ -2289,7 +2287,6 @@ static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *p // todo: this process should be executed by the write queue worker of the mnode int32_t mndProcessStreamHb(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; - SStreamHbMsg req = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -2314,10 +2311,13 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { for (int32_t i = 0; i < req.numOfTasks; ++i) { STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i); - int64_t k[2] = {p->streamId, p->taskId}; - int32_t index = *(int32_t *)taosHashGet(execNodeList.pTaskMap, &k, sizeof(k)); + int64_t k[2] = {p->streamId, p->taskId}; + int32_t *index = taosHashGet(execNodeList.pTaskMap, &k, sizeof(k)); + if (index == NULL) { + continue; + } - STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, index); + STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, *index); pStatusEntry->status = p->status; if (p->status != TASK_STATUS__NORMAL) { mDebug("received s-task:0x%x not in ready status:%s", p->taskId, streamGetTaskStatusStr(p->status)); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 0b243e0a9c..a9fb5096fb 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -642,7 +642,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { SName tableName = {0}; tNameFromString(&tableName, createTopicReq.subStbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - sprintf(detail, "igExists:%d, subStbName:%s, subType:%d, withMeta:%d, sql:%s", + sprintf(detail, "igExists:%d, subStbName:%s, subType:%d, withMeta:%d, sql:%s", createTopicReq.igExists, tableName.tname, createTopicReq.subType, createTopicReq.withMeta, sql); SName dbname = {0}; diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 5ae257aef8..e15f5f911d 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -288,6 +288,7 @@ typedef struct { int64_t numOfSTables; int64_t numOfCTables; int64_t numOfNTables; + int64_t numOfCmprTables; int64_t numOfNTimeSeries; int64_t numOfTimeSeries; int64_t itvTimeSeries; diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 1146cfdc46..14ca7f3b02 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -165,6 +165,7 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver); int32_t tqScanWal(STQ* pTq); int32_t tqCheckAndRunStreamTask(STQ* pTq); +int32_t tqStartStreamTasks(STQ* pTq); int32_t tqStopStreamTasks(STQ* pTq); // tq util diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index d262567953..1494325657 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -61,5 +61,12 @@ int metaPrepareAsyncCommit(SMeta *pMeta) { // abort the meta txn int metaAbort(SMeta *pMeta) { if (!pMeta->txn) return 0; - return tdbAbort(pMeta->pEnv, pMeta->txn); + int code = tdbAbort(pMeta->pEnv, pMeta->txn); + if (code) { + metaError("vgId:%d, failed to abort meta since %s", TD_VID(pMeta->pVnode), tstrerror(terrno)); + } else { + pMeta->txn = NULL; + } + + return code; } diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index ca5346eee3..c74f36eaa2 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -699,13 +699,16 @@ int64_t metaGetTbNum(SMeta *pMeta) { // N.B. Called by statusReq per second int64_t metaGetTimeSeriesNum(SMeta *pMeta) { // sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column) - if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || + int64_t nTables = metaGetTbNum(pMeta); + if (nTables - pMeta->pVnode->config.vndStats.numOfCmprTables > 100 || + pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % (60 * 5) == 0) { int64_t num = 0; vnodeGetTimeSeriesNum(pMeta->pVnode, &num); pMeta->pVnode->config.vndStats.numOfTimeSeries = num; pMeta->pVnode->config.vndStats.itvTimeSeries = (TD_VID(pMeta->pVnode) % 100) * 2; + pMeta->pVnode->config.vndStats.numOfCmprTables = nTables; } return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 447a7e2d90..b91e902574 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -269,19 +269,21 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) { } tqDebug("tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s", req.consumerId, vgId, req.subKey); + taosWLockLatch(&pTq->lock); + STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey)); if (pHandle == NULL) { tqWarn("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", req.consumerId, vgId, req.subKey); code = 0; + taosWUnLockLatch(&pTq->lock); goto end; } // 2. check consumer-vg assignment status - taosRLockLatch(&pTq->lock); if (pHandle->consumerId != req.consumerId) { tqError("ERROR tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64, req.consumerId, vgId, req.subKey, pHandle->consumerId); - taosRUnLockLatch(&pTq->lock); + taosWUnLockLatch(&pTq->lock); code = TSDB_CODE_TMQ_CONSUMER_MISMATCH; goto end; } @@ -289,7 +291,7 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) { // if consumer register to push manager, push empty to consumer to change vg status from TMQ_VG_STATUS__WAIT to // TMQ_VG_STATUS__IDLE, otherwise poll data failed after seek. tqUnregisterPushHandle(pTq, pHandle); - taosRUnLockLatch(&pTq->lock); + taosWUnLockLatch(&pTq->lock); end: rsp.code = code; @@ -496,15 +498,16 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); // 1. find handle + taosRLockLatch(&pTq->lock); STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey)); if (pHandle == NULL) { tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s not found", consumerId, vgId, req.subKey); terrno = TSDB_CODE_INVALID_MSG; + taosRUnLockLatch(&pTq->lock); return -1; } // 2. check re-balance status - taosRLockLatch(&pTq->lock); if (pHandle->consumerId != consumerId) { tqDebug("ERROR consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64, consumerId, vgId, req.subKey, pHandle->consumerId); @@ -580,7 +583,7 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg bool exec = tqIsHandleExec(pHandle); if(exec){ - tqInfo("vgId:%d, topic:%s, subscription is executing, wait for 10ms and retry, pHandle:%p", vgId, + tqInfo("vgId:%d, topic:%s, subscription is executing, delete wait for 10ms and retry, pHandle:%p", vgId, pHandle->subKey, pHandle); taosWUnLockLatch(&pTq->lock); taosMsleep(10); @@ -689,19 +692,29 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg } ret = tqMetaSaveHandle(pTq, req.subKey, &handle); } else { - taosWLockLatch(&pTq->lock); - - if (pHandle->consumerId == req.newConsumerId) { // do nothing - tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId); - } else { - tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId, - req.newConsumerId); - atomic_store_64(&pHandle->consumerId, req.newConsumerId); - atomic_store_32(&pHandle->epoch, 0); - tqUnregisterPushHandle(pTq, pHandle); - ret = tqMetaSaveHandle(pTq, req.subKey, pHandle); + while(1){ + taosWLockLatch(&pTq->lock); + bool exec = tqIsHandleExec(pHandle); + if(exec){ + tqInfo("vgId:%d, topic:%s, subscription is executing, sub wait for 10ms and retry, pHandle:%p", pTq->pVnode->config.vgId, + pHandle->subKey, pHandle); + taosWUnLockLatch(&pTq->lock); + taosMsleep(10); + continue; + } + if (pHandle->consumerId == req.newConsumerId) { // do nothing + tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId); + } else { + tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId, + req.newConsumerId); + atomic_store_64(&pHandle->consumerId, req.newConsumerId); + atomic_store_32(&pHandle->epoch, 0); + tqUnregisterPushHandle(pTq, pHandle); + ret = tqMetaSaveHandle(pTq, req.subKey, pHandle); + } + taosWUnLockLatch(&pTq->lock); + break; } - taosWUnLockLatch(&pTq->lock); } end: @@ -1416,7 +1429,7 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, } int8_t status = pTask->status.taskStatus; - if (status == TASK_STATUS__NORMAL || status == TASK_STATUS__SCAN_HISTORY) { + if (status == TASK_STATUS__NORMAL || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) { // no lock needs to secure the access of the version if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) { // discard all the data when the stream task is suspended. @@ -1670,6 +1683,7 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) { char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t len = pMsg->contLen - sizeof(SMsgHead); SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS}; + bool allStopped = false; SStreamTaskNodeUpdateMsg req = {0}; @@ -1700,22 +1714,49 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) { tqDebug("s-task:%s receive task nodeEp update msg from mnode", pTask->id.idStr); streamTaskUpdateEpsetInfo(pTask, req.pNodeList); + streamSetStatusNormal(pTask); + + SStreamTask** ppHTask = NULL; + if (pTask->historyTaskId.taskId != 0) { + keys[0] = pTask->historyTaskId.streamId; + keys[1] = pTask->historyTaskId.taskId; + + ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); + if (ppHTask == NULL || *ppHTask == NULL) { + tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already", + pMeta->vgId, req.taskId); + } else { + tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr); + streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList); + } + } { - streamSetStatusNormal(pTask); streamMetaSaveTask(pMeta, pTask); + if (ppHTask != NULL) { + streamMetaSaveTask(pMeta, *ppHTask); + } + if (streamMetaCommit(pMeta) < 0) { // persist to disk } } streamTaskStop(pTask); + if (ppHTask != NULL) { + streamTaskStop(*ppHTask); + } + tqDebug("s-task:%s task nodeEp update completed", pTask->id.idStr); pMeta->closedTask += 1; + if (ppHTask != NULL) { + pMeta->closedTask += 1; + } + // possibly only handle the stream task. int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); - bool allStopped = (pMeta->closedTask == numOfTasks); + allStopped = (pMeta->closedTask == numOfTasks); if (allStopped) { pMeta->closedTask = 0; } else { @@ -1752,6 +1793,7 @@ _end: taosWUnLockLatch(&pMeta->lock); if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) { vInfo("vgId:%d, restart all stream tasks", vgId); + tqStartStreamTasks(pTq); tqCheckAndRunStreamTaskAsync(pTq); } } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 0839a2bfa3..329451a0d4 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -366,7 +366,7 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con bool tqNextBlockInWal(STqReader* pReader, const char* id) { SWalReader* pWalReader = pReader->pWalReader; -// uint64_t st = taosGetTimestampMs(); + uint64_t st = taosGetTimestampMs(); while (1) { SArray* pBlockList = pReader->submit.aSubmitTbData; if (pBlockList == NULL || pReader->nextBlk >= taosArrayGetSize(pBlockList)) { @@ -441,9 +441,9 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { pReader->msg.msgStr = NULL; -// if(taosGetTimestampMs() - st > 5){ -// return false; -// } + if(taosGetTimestampMs() - st > 1000){ + return false; + } } } diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 3c0321f300..eb587b8be2 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -224,6 +224,35 @@ int32_t tqStopStreamTasks(STQ* pTq) { return 0; } +int32_t tqStartStreamTasks(STQ* pTq) { + SStreamMeta* pMeta = pTq->pStreamMeta; + int32_t vgId = TD_VID(pTq->pVnode); + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + + tqDebug("vgId:%d start to stop all %d stream task(s)", vgId, numOfTasks); + + if (numOfTasks == 0) { + return TSDB_CODE_SUCCESS; + } + + taosWLockLatch(&pMeta->lock); + + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); + + int64_t key[2] = {pTaskId->streamId, pTaskId->taskId}; + SStreamTask** pTask = taosHashGet(pMeta->pTasks, key, sizeof(key)); + + int8_t status = (*pTask)->status.taskStatus; + if (status == TASK_STATUS__STOP) { + streamSetStatusNormal(*pTask); + } + } + + taosWUnLockLatch(&pMeta->lock); + return 0; +} + int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) { // seek the stored version and extract data from WAL int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader); @@ -383,6 +412,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus); taosThreadMutexUnlock(&pTask->lock); streamMetaReleaseTask(pStreamMeta, pTask); + if (pItem != NULL) { + streamFreeQitem(pItem); + } continue; } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index f1b154a4ac..3d7e2eb6c1 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -213,11 +213,11 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; -// uint64_t st = taosGetTimestampMs(); + uint64_t st = taosGetTimestampMs(); int totalRows = 0; while (1) { -// int32_t savedEpoch = atomic_load_32(&pHandle->epoch); -// ASSERT (savedEpoch <= pRequest->epoch); + int32_t savedEpoch = atomic_load_32(&pHandle->epoch); + ASSERT(savedEpoch <= pRequest->epoch); if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer); @@ -260,8 +260,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, goto end; } -// if (totalRows >= 4096 || taosxRsp.createTableNum > 0 || (taosGetTimestampMs() - st > 5)) { - if (totalRows >= 4096 || taosxRsp.createTableNum > 0) { + if (totalRows >= 4096 || taosxRsp.createTableNum > 0 || (taosGetTimestampMs() - st > 1000)) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer + 1); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 8601248a69..ed4257b86d 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -857,6 +857,9 @@ static int32_t tsdbSnapWriteTombRecord(STsdbSnapWriter* writer, const STombRecor } else { break; } + + code = tsdbIterMergerNext(writer->ctx->tombIterMerger); + TSDB_CHECK_CODE(code, lino, _exit); } if (record->suid == INT64_MAX) { diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 1554d58d56..01dd062866 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -545,13 +545,10 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { } static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) { - STSchema *pTSchema = metaGetTbTSchema(pVnode->pMeta, suid, -1, 1); - // metaGetTbTSchemaEx(pVnode->pMeta, suid, suid, -1, &pTSchema); - - if (pTSchema) { - *num = pTSchema->numOfCols; - - taosMemoryFree(pTSchema); + SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 1); + if (pSW) { + *num = pSW->nCols; + tDeleteSchemaWrapper(pSW); } else { *num = 2; } diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c index f19068ea88..733f073ce9 100644 --- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c +++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c @@ -425,6 +425,20 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot * if (code) goto _exit; } + if (pWriter->pStreamTaskWriter) { + code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback); + if (code) goto _exit; + } + + if (pWriter->pStreamStateWriter) { + code = streamStateSnapWriterClose(pWriter->pStreamStateWriter, rollback); + if (code) goto _exit; + + code = streamStateRebuildFromSnap(pWriter->pStreamStateWriter, 0); + pWriter->pStreamStateWriter = NULL; + if (code) goto _exit; + } + if (pWriter->pRsmaSnapWriter) { code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback); if (code) goto _exit; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index d580b41093..43850ebfee 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -560,6 +560,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) vInfo("vgId:%d, not launch stream tasks, since stream tasks are disabled", vgId); } else { vInfo("vgId:%d start to launch stream tasks", pVnode->config.vgId); + tqStartStreamTasks(pVnode->pTq); tqCheckAndRunStreamTaskAsync(pVnode->pTq); } } else { diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index c0e2a44153..1c909cb47d 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -786,6 +786,8 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat if (startPos < 0) { break; } + qDebug("===stream===ignore expired data, window end ts:%" PRId64 ", maxts - wartermak:%" PRId64, nextWin.ekey, + pInfo->twAggSup.maxTs - pInfo->twAggSup.waterMark); continue; } @@ -1552,6 +1554,7 @@ void destroyStreamSessionAggOperatorInfo(void* param) { tSimpleHashCleanup(pInfo->pStUpdated); tSimpleHashCleanup(pInfo->pStDeleted); pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated); + cleanupGroupResInfo(&pInfo->groupResInfo); taosArrayDestroy(pInfo->historyWins); blockDataDestroy(pInfo->pCheckpointRes); @@ -2197,6 +2200,7 @@ void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL pGroupResInfo->pRows = pArrayList; pGroupResInfo->index = 0; pGroupResInfo->pBuf = NULL; + pGroupResInfo->freeItem = false; } void doBuildSessionResult(SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo, SSDataBlock* pBlock) { @@ -2874,10 +2878,12 @@ void destroyStreamStateOperatorInfo(void* param) { } colDataDestroy(&pInfo->twAggSup.timeWindowData); blockDataDestroy(pInfo->pDelRes); - taosArrayDestroy(pInfo->historyWins); tSimpleHashCleanup(pInfo->pSeUpdated); tSimpleHashCleanup(pInfo->pSeDeleted); pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated); + cleanupGroupResInfo(&pInfo->groupResInfo); + + taosArrayDestroy(pInfo->historyWins); blockDataDestroy(pInfo->pCheckpointRes); taosMemoryFreeClear(param); diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 5b9f44c812..7e344866a5 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -121,7 +121,8 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { snprintf(dnodeIdEnvItem, 32, "%s=%d", "DNODE_ID", pData->dnodeId); float numCpuCores = 4; - taosGetCpuCores(&numCpuCores); + taosGetCpuCores(&numCpuCores, false); + numCpuCores = TMAX(numCpuCores, 2); snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2); char pathTaosdLdLib[512] = {0}; diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index 5ae2cf12c9..79e305989b 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -683,8 +683,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate pStart += BitmapLen(numOfRows); } char* pData = pStart; - - tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); + ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); + if(ret != 0){ + goto end; + } fields += sizeof(int8_t) + sizeof(int32_t); if (needChangeLength) { pStart += htonl(colLength[j]); @@ -712,7 +714,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate char* pData = pStart; SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, j); - tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); + ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); + if(ret != 0){ + goto end; + } fields += sizeof(int8_t) + sizeof(int32_t); if (needChangeLength) { pStart += htonl(colLength[i]); @@ -729,7 +734,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate for (int c = 0; c < boundInfo->numOfBound; ++c) { if( boundInfo->pColIndex[c] != -1){ SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, c); - tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL); + ret = tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL); + if(ret != 0){ + goto end; + } }else{ boundInfo->pColIndex[c] = c; // restore for next block } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index ddd3324c91..978f2d14d5 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3734,7 +3734,7 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t removeConstantValueFromList(SNodeList** pList) { SNode* pNode = NULL; WHERE_EACH(pNode, *pList) { - if (nodeType(pNode) == QUERY_NODE_VALUE || + if (nodeType(pNode) == QUERY_NODE_VALUE || (nodeType(pNode) == QUERY_NODE_FUNCTION && fmIsConstantResFunc((SFunctionNode*)pNode) && fmIsScalarFunc(((SFunctionNode*)pNode)->funcId))) { ERASE_NODE(*pList); continue; @@ -3753,7 +3753,11 @@ static int32_t removeConstantValueFromList(SNodeList** pList) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + + if (pSelect->pPartitionByList) { + code = removeConstantValueFromList(&pSelect->pPartitionByList); + } + if (TSDB_CODE_SUCCESS == code && pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 792d00833d..1b8bac4cbc 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -37,6 +37,8 @@ static char* getSyntaxErrFormat(int32_t errCode) { return "Column ambiguously defined: %s"; case TSDB_CODE_PAR_WRONG_VALUE_TYPE: return "Invalid value type: %s"; + case TSDB_CODE_PAR_INVALID_VARBINARY: + return "Invalid varbinary value: %s"; case TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION: return "There mustn't be aggregation"; case TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT: diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 1c17db606c..d460c8074d 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -373,6 +373,11 @@ static bool tagScanNodeHasTbname(SNode* pKeys) { static int32_t tagScanSetExecutionMode(SScanLogicNode* pScan) { pScan->onlyMetaCtbIdx = false; + if (pScan->tableType == TSDB_CHILD_TABLE) { + pScan->onlyMetaCtbIdx = false; + return TSDB_CODE_SUCCESS; + } + if (tagScanNodeListHasTbname(pScan->pScanPseudoCols)) { pScan->onlyMetaCtbIdx = false; return TSDB_CODE_SUCCESS; @@ -442,7 +447,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pScan->pScanPseudoCols = pNewScanPseudoCols; } } -*/ +*/ } if (NULL != pScan->pScanCols) { @@ -511,7 +516,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL; pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_NONE; pJoin->isLowLevelJoin = pJoinTable->isLowLevelJoin; - + int32_t code = TSDB_CODE_SUCCESS; // set left and right node @@ -559,7 +564,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect code = createColumnByRewriteExprs(pColList, &pJoin->node.pTargets); } } - + if (TSDB_CODE_SUCCESS == code) { SNodeList* pColList = NULL; if (QUERY_NODE_REAL_TABLE == nodeType(pJoinTable->pRight) && !pJoin->isLowLevelJoin) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 52c3546a39..5628f5cf0f 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -539,7 +539,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla return TSDB_CODE_OUT_OF_MEMORY; } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); - + pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); @@ -726,7 +726,7 @@ static int32_t mergeEqCond(SNode** ppDst, SNode** ppSrc) { *ppSrc = NULL; return TSDB_CODE_SUCCESS; } - + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION); if (NULL == pLogicCond) { return TSDB_CODE_OUT_OF_MEMORY; @@ -754,7 +754,7 @@ static int32_t getJoinDataBlockDescNode(SNodeList* pChildren, int32_t idx, SData planError("Invalid join children num:%d or child type:%d", pChildren->length, nodeType(nodesListGetNode(pChildren, 0))); return TSDB_CODE_PLAN_INTERNAL_ERROR; } - + return TSDB_CODE_SUCCESS; } @@ -775,12 +775,12 @@ static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi if (TSDB_CODE_SUCCESS == code) { code = getJoinDataBlockDescNode(pChildren, 1, &pRightDesc); } - + if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pPrimKeyEqCond, &pJoin->pPrimKeyCond); } - + if (TSDB_CODE_SUCCESS == code) { code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets, &pJoin->pTargets); @@ -869,7 +869,7 @@ static int32_t createHashJoinColList(int16_t lBlkId, int16_t rBlkId, SNode* pEq1 if (NULL == pJoin->pOnLeft || NULL == pJoin->pOnRight) { return TSDB_CODE_OUT_OF_MEMORY; } - + code = extractHashJoinOnCols(lBlkId, rBlkId, pEq1, pJoin); if (TSDB_CODE_SUCCESS == code) { code = extractHashJoinOnCols(lBlkId, rBlkId, pEq2, pJoin); @@ -893,10 +893,10 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys return TSDB_CODE_OUT_OF_MEMORY; } SNodeList* pNew = nodesMakeList(); - + FOREACH(pNode, pJoin->pTargets) { SColumnNode* pCol = (SColumnNode*)pNode; - int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN); + int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN); tSimpleHashPut(pHash, name, len, &pCol, POINTER_BYTES); } @@ -905,7 +905,7 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys FOREACH(pNode, pJoin->pOnLeft) { SColumnNode* pCol = (SColumnNode*)pNode; - int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN); + int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN); SNode** p = tSimpleHashGet(pHash, name, len); if (p) { nodesListStrictAppend(pJoin->pTargets, *p); @@ -914,7 +914,7 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys } FOREACH(pNode, pJoin->pOnRight) { SColumnNode* pCol = (SColumnNode*)pNode; - int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN); + int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN); SNode** p = tSimpleHashGet(pHash, name, len); if (p) { nodesListStrictAppend(pJoin->pTargets, *p); @@ -930,7 +930,7 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys if (p == NULL) { break; } - + nodesListStrictAppend(pJoin->pTargets, *p); } } @@ -958,10 +958,10 @@ static int32_t createHashJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pPrimKeyEqCond, &pJoin->pPrimKeyCond); if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pColEqCond, &pJoin->pColEqCond); - } + } if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pTagEqCond, &pJoin->pTagEqCond); - } + } if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOtherOnCond) { code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOtherOnCond, &pJoin->pFilterConditions); } @@ -973,10 +973,10 @@ static int32_t createHashJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil } if (TSDB_CODE_SUCCESS == code) { code = createHashJoinColList(pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoin->pPrimKeyCond, pJoin->pColEqCond, pJoin->pTagEqCond, pJoin); - } + } if (TSDB_CODE_SUCCESS == code) { code = sortHashJoinTargets(pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoin); - } + } if (TSDB_CODE_SUCCESS == code) { code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc); } @@ -1001,7 +1001,7 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren planError("Invalid join algorithm:%d", pJoinLogicNode->joinAlgo); break; } - + return TSDB_CODE_FAILED; } @@ -1019,7 +1019,7 @@ static int32_t createGroupCachePhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh pGrpCache->batchFetch = pLogicNode->batchFetch; SDataBlockDescNode* pChildDesc = ((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc; int32_t code = TSDB_CODE_SUCCESS; -/* +/* if (TSDB_CODE_SUCCESS == code) { code = setListSlotId(pCxt, pChildDesc->dataBlockId, -1, pLogicNode->pGroupCols, &pGrpCache->pGroupCols); } @@ -1045,7 +1045,7 @@ static int32_t updateDynQueryCtrlStbJoinInfo(SPhysiPlanContext* pCxt, SNodeList* } if (TSDB_CODE_SUCCESS == code) { memcpy(pDynCtrl->stbJoin.srcScan, pLogicNode->stbJoin.srcScan, sizeof(pDynCtrl->stbJoin.srcScan)); - + SNode* pNode = NULL; int32_t i = 0; FOREACH(pNode, pVgList) { @@ -1062,7 +1062,7 @@ static int32_t updateDynQueryCtrlStbJoinInfo(SPhysiPlanContext* pCxt, SNodeList* return code; } - + static int32_t createDynQueryCtrlPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SDynQueryCtrlLogicNode* pLogicNode, SPhysiNode** pPhyNode) { int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index cf04bcc1b8..916cc6e9ee 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -391,8 +391,8 @@ static void doRetryDispatchData(void* param, void* tmrId) { SStreamTask* pTask = param; if (streamTaskShouldStop(&pTask->status)) { - atomic_sub_fetch_8(&pTask->status.timerActive, 1); - qDebug("s-task:%s should stop, abort from timer", pTask->id.idStr); + int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1); + qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref); return; } @@ -409,17 +409,22 @@ static void doRetryDispatchData(void* param, void* tmrId) { streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); } } else { - atomic_sub_fetch_8(&pTask->status.timerActive, 1); - qDebug("s-task:%s should stop, abort from timer", pTask->id.idStr); + int32_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1); + qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref); } } else { - atomic_sub_fetch_8(&pTask->status.timerActive, 1); + int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1); + qDebug("s-task:%s send success, jump out of timer, ref:%d", pTask->id.idStr, ref); } } void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration) { - qError("s-task:%s dispatch data in %" PRId64 "ms", pTask->id.idStr, waitDuration); - taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->launchTaskTimer); + qWarn("s-task:%s dispatch data in %" PRId64 "ms, in timer", pTask->id.idStr, waitDuration); + if (pTask->launchTaskTimer != NULL) { + taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->launchTaskTimer); + } else { + pTask->launchTaskTimer = taosTmrStart(doRetryDispatchData, waitDuration, pTask, streamEnv.timer); + } } int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz, @@ -540,8 +545,10 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { } if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry - qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms", - pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS); + int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1); + + qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d", + pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref); streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); break; } @@ -982,8 +989,6 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens } - streamFreeQitem(pTask->msgInfo.pData); - pTask->msgInfo.pData = NULL; return TSDB_CODE_SUCCESS; } @@ -997,8 +1002,9 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time - qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data", - id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS); + int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1); + qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data, ref:%d", + id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, ref); streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); } else { // pipeline send data in output queue // this message has been sent successfully, let's try next one. diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index ff5e9adaee..5bc21286d7 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -521,6 +521,13 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t ASSERT(pTask->status.timerActive == 0); doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id); + if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) { + qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); + taosTmrStop(pTask->schedInfo.pTimer); + pTask->info.triggerParam = 0; + streamMetaReleaseTask(pMeta, pTask); + } + streamMetaRemoveTask(pMeta, keys); streamMetaReleaseTask(pMeta, pTask); } else { @@ -659,6 +666,8 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) { int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId}; void* p = taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (p == NULL) { + // pTask->chkInfo.checkpointVer may be 0, when a follower is become a leader + // In this case, we try not to start fill-history task anymore. if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.checkpointVer) < 0) { doClear(pKey, pVal, pCur, pRecycleList); tFreeStreamTask(pTask); @@ -757,7 +766,6 @@ void metaHbToMnode(void* param, void* tmrId) { SStreamHbMsg hbMsg = {0}; SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid); if (pMeta == NULL) { - // taosMemoryFree(param); return; } @@ -779,6 +787,7 @@ void metaHbToMnode(void* param, void* tmrId) { int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); SEpSet epset = {0}; + bool hasValEpset = false; hbMsg.vgId = pMeta->vgId; hbMsg.pTaskStatus = taosArrayInit(numOfTasks, sizeof(STaskStatusEntry)); @@ -797,51 +806,53 @@ void metaHbToMnode(void* param, void* tmrId) { if (i == 0) { epsetAssign(&epset, &(*pTask)->info.mnodeEpset); + hasValEpset = true; } } hbMsg.numOfTasks = taosArrayGetSize(hbMsg.pTaskStatus); taosRUnLockLatch(&pMeta->lock); - int32_t code = 0; - int32_t tlen = 0; + if (hasValEpset) { + int32_t code = 0; + int32_t tlen = 0; - tEncodeSize(tEncodeStreamHbMsg, &hbMsg, tlen, code); - if (code < 0) { - qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code)); - taosArrayDestroy(hbMsg.pTaskStatus); - taosReleaseRef(streamMetaId, rid); - return; - } + tEncodeSize(tEncodeStreamHbMsg, &hbMsg, tlen, code); + if (code < 0) { + qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code)); + taosArrayDestroy(hbMsg.pTaskStatus); + taosReleaseRef(streamMetaId, rid); + return; + } - void* buf = rpcMallocCont(tlen); - if (buf == NULL) { - qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - taosArrayDestroy(hbMsg.pTaskStatus); - taosReleaseRef(streamMetaId, rid); - return; - } + void* buf = rpcMallocCont(tlen); + if (buf == NULL) { + qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(TSDB_CODE_OUT_OF_MEMORY)); + taosArrayDestroy(hbMsg.pTaskStatus); + taosReleaseRef(streamMetaId, rid); + return; + } - SEncoder encoder; - tEncoderInit(&encoder, buf, tlen); - if ((code = tEncodeStreamHbMsg(&encoder, &hbMsg)) < 0) { - rpcFreeCont(buf); - qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code)); - taosArrayDestroy(hbMsg.pTaskStatus); - taosReleaseRef(streamMetaId, rid); - return; + SEncoder encoder; + tEncoderInit(&encoder, buf, tlen); + if ((code = tEncodeStreamHbMsg(&encoder, &hbMsg)) < 0) { + rpcFreeCont(buf); + qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code)); + taosArrayDestroy(hbMsg.pTaskStatus); + taosReleaseRef(streamMetaId, rid); + return; + } + tEncoderClear(&encoder); + + SRpcMsg msg = {0}; + initRpcMsg(&msg, TDMT_MND_STREAM_HEARTBEAT, buf, tlen); + msg.info.noResp = 1; + + qDebug("vgId:%d, build and send hb to mnode", pMeta->vgId); + tmsgSendReq(&epset, &msg); } - tEncoderClear(&encoder); taosArrayDestroy(hbMsg.pTaskStatus); - - SRpcMsg msg = {0}; - initRpcMsg(&msg, TDMT_MND_STREAM_HEARTBEAT, buf, tlen); - msg.info.noResp = 1; - - qDebug("vgId:%d, build and send hb to mnode", pMeta->vgId); - - tmsgSendReq(&epset, &msg); taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamEnv.timer, &pMeta->hbInfo.hbTmr); taosReleaseRef(streamMetaId, rid); } @@ -905,4 +916,4 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) { int64_t el = taosGetTimestampMs() - st; qDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el); -} \ No newline at end of file +} diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 4b86b9713c..743d87e938 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -235,7 +235,13 @@ static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) { qDebug("s-task:%s enter into scan-history data stage, status:%s", id, str); streamTaskLaunchScanHistory(pTask); } else { - qDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str); + if (pTask->info.fillHistory == 1) { + qDebug("s-task:%s fill-history is set normal when start it, try to remove it,set it task to be dropping", id); + pTask->status.taskStatus = TASK_STATUS__DROPPING; + ASSERT(pTask->historyTaskId.taskId == 0); + } else { + qDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str); + } } // when current stream task is ready, check the related fill history task. @@ -579,19 +585,17 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) { // todo fix the bug: 2. race condition // an fill history task needs to be started. int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { - int32_t tId = pTask->historyTaskId.taskId; - if (tId == 0) { + SStreamMeta* pMeta = pTask->pMeta; + int32_t hTaskId = pTask->historyTaskId.taskId; + if (hTaskId == 0) { return TSDB_CODE_SUCCESS; } ASSERT(pTask->status.downstreamReady == 1); qDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr, - pTask->historyTaskId.streamId, tId); + pTask->historyTaskId.streamId, hTaskId); - SStreamMeta* pMeta = pTask->pMeta; - int32_t hTaskId = pTask->historyTaskId.taskId; - - int64_t keys[2] = {pTask->historyTaskId.streamId, pTask->historyTaskId.taskId}; + int64_t keys[2] = {pTask->historyTaskId.streamId, hTaskId}; // Set the execute conditions, including the query time window and the version range SStreamTask** pHTask = taosHashGet(pMeta->pTasks, keys, sizeof(keys)); @@ -610,11 +614,12 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { // todo failed to create timer taosMemoryFree(pInfo); } else { - atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active + int32_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active + ASSERT(ref == 1); qDebug("s-task:%s set timer active flag", pTask->id.idStr); } } else { // timer exists - ASSERT(pTask->status.timerActive > 0); + ASSERT(pTask->status.timerActive == 1); qDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr); taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer); } @@ -918,6 +923,13 @@ void streamTaskHalt(SStreamTask* pTask) { return; } + // wait for checkpoint completed + while(pTask->status.taskStatus == TASK_STATUS__CK) { + qDebug("s-task:%s status:%s during generating checkpoint, wait for 1sec and retry set status:halt", pTask->id.idStr, + streamGetTaskStatusStr(TASK_STATUS__CK)); + taosMsleep(1000); + } + // upgrade to halt status if (status == TASK_STATUS__PAUSE) { qDebug("s-task:%s upgrade status to %s from %s", pTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__HALT), diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 83aed42fe2..44c7b4f2e0 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -345,7 +345,7 @@ bool streamStateCheck(SStreamState* pState, const SWinKey* key) { return hasRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey)); #else SStateKey sKey = {.key = *key, .opNum = pState->number}; - return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen); + return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), NULL, NULL); #endif } diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index bca9dcabda..be3ad73472 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -25,7 +25,8 @@ #define FLUSH_RATIO 0.5 #define FLUSH_NUM 4 -#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024); +#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024) +#define MIN_NUM_OF_ROW_BUFF 10240 struct SStreamFileState { SList* usedBuffs; @@ -67,7 +68,7 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_ pFileState->usedBuffs = tdListNew(POINTER_BYTES); pFileState->freeBuffs = tdListNew(POINTER_BYTES); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - int32_t cap = TMIN(10240, pFileState->maxRowCount); + int32_t cap = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount); pFileState->rowBuffMap = tSimpleHashInit(cap, hashFn); if (!pFileState->usedBuffs || !pFileState->freeBuffs || !pFileState->rowBuffMap) { goto _error; @@ -272,10 +273,12 @@ int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, voi *pVLen = pFileState->rowSize; *pVal = *pos; (*pos)->beUsed = true; + (*pos)->beFlushed = false; return TSDB_CODE_SUCCESS; } SRowBuffPos* pNewPos = getNewRowPos(pFileState); pNewPos->beUsed = true; + pNewPos->beFlushed = false; ASSERT(pNewPos->pRowBuff); memcpy(pNewPos->pKey, pKey, keyLen); @@ -375,6 +378,10 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) { SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data; ASSERT(pPos->pRowBuff && pFileState->rowSize > 0); + if (pPos->beFlushed) { + continue; + } + pPos->beFlushed = true; if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) { streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); @@ -513,6 +520,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) { ASSERT(pVLen == pFileState->rowSize); memcpy(pNewPos->pRowBuff, pVal, pVLen); taosMemoryFreeClear(pVal); + pNewPos->beFlushed = true; code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { destroyRowBuffPos(pNewPos); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 1ff266ad8f..edecfcb2bc 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2562,7 +2562,11 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex); } - SSyncLogReplMgr oldLogReplMgrs[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA] = {0}; + SSyncLogReplMgr* oldLogReplMgrs = NULL; + int64_t length = sizeof(SSyncLogReplMgr) * (TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA); + oldLogReplMgrs = taosMemoryMalloc(length); + if (NULL == oldLogReplMgrs) return -1; + memset(oldLogReplMgrs, 0, length); for(int i = 0; i < oldtotalReplicaNum; i++){ oldLogReplMgrs[i] = *(ths->logReplMgrs[i]); @@ -2643,6 +2647,8 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum } } + taosMemoryFree(oldLogReplMgrs); + return 0; } diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c index 262f3d27e6..54baee350a 100644 --- a/source/libs/tdb/src/db/tdbPCache.c +++ b/source/libs/tdb/src/db/tdbPCache.c @@ -229,7 +229,15 @@ void tdbPCacheInvalidatePage(SPCache *pCache, SPager *pPager, SPgno pgno) { } if (pPage) { + bool moveToFreeList = false; + if (pPage->pLruNext) { + tdbPCachePinPage(pCache, pPage); + moveToFreeList = true; + } tdbPCacheRemovePageFromHash(pCache, pPage); + if (moveToFreeList) { + tdbPCacheFreePage(pCache, pPage); + } } } diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 445c24159f..f2b45c5b84 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -296,7 +296,7 @@ int32_t tfsMkdirRecurAt(STfs *pTfs, const char *rname, SDiskID diskId) { // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/dirname.3.html char *dir = taosStrdup(taosDirName(s)); - if (tfsMkdirRecurAt(pTfs, dir, diskId) < 0) { + if (strlen(dir) >= strlen(rname) || tfsMkdirRecurAt(pTfs, dir, diskId) < 0) { taosMemoryFree(s); taosMemoryFree(dir); return -1; diff --git a/source/libs/tfs/src/tfsTier.c b/source/libs/tfs/src/tfsTier.c index a24e3cf021..1c47182e2a 100644 --- a/source/libs/tfs/src/tfsTier.c +++ b/source/libs/tfs/src/tfsTier.c @@ -116,7 +116,11 @@ int32_t tfsAllocDiskOnTier(STfsTier *pTier) { if (pDisk == NULL) continue; - if (pDisk->size.avail < tsMinDiskFreeSize) continue; + if (pDisk->size.avail < tsMinDiskFreeSize) { + uInfo("disk %s is full and skip it, level:%d id:%d free size:%" PRId64 " min free size:%" PRId64, pDisk->path, + pDisk->level, pDisk->id, pDisk->size.avail, tsMinDiskFreeSize); + continue; + } retId = diskId; terrno = 0; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index aa264f3126..3dbb224e79 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -2156,12 +2156,10 @@ static void cliSchedMsgToNextNode(SCliMsg* pMsg, SCliThrd* pThrd) { if (rpcDebugFlag & DEBUG_DEBUG) { STraceId* trace = &pMsg->msg.info.traceId; - char* tbuf = taosMemoryCalloc(1, TSDB_FQDN_LEN * 5); - + char tbuf[512] = {0}; EPSET_TO_STR(&pCtx->epSet, tbuf); tGDebug("%s retry on next node,use:%s, step: %d,timeout:%" PRId64 "", transLabel(pThrd->pTransInst), tbuf, pCtx->retryStep, pCtx->retryNextInterval); - taosMemoryFree(tbuf); } STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg)); @@ -2387,7 +2385,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { bool hasEpSet = cliTryExtractEpSet(pResp, &pCtx->epSet); if (hasEpSet) { if (rpcDebugFlag & DEBUG_TRACE) { - char tbuf[256] = {0}; + char tbuf[512] = {0}; EPSET_TO_STR(&pCtx->epSet, tbuf); tGTrace("%s conn %p extract epset from msg", CONN_GET_INST_LABEL(pConn), pConn); } diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index 46e28b529d..6438ce7ed0 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -43,7 +43,7 @@ target_link_libraries( ) if(TD_WINDOWS) target_link_libraries( - os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump dbghelp version + os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump dbghelp version KtmW32 ) elseif(TD_DARWIN_64) find_library(CORE_FOUNDATION_FRAMEWORK CoreFoundation) diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 1177ff562e..120dbe920c 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -19,6 +19,8 @@ #ifdef WINDOWS #include +#include +#include #define F_OK 0 #define W_OK 2 #define R_OK 4 @@ -175,12 +177,32 @@ int32_t taosRemoveFile(const char *path) { return remove(path); } int32_t taosRenameFile(const char *oldName, const char *newName) { #ifdef WINDOWS - bool code = MoveFileEx(oldName, newName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED); - if (!code) { - printf("failed to rename file %s to %s, reason:%s\n", oldName, newName, strerror(errno)); + bool finished = false; + + HANDLE transactionHandle = CreateTransaction(NULL, NULL, 0, 0, 0, INFINITE, NULL); + if (transactionHandle == INVALID_HANDLE_VALUE) { + printf("failed to rename file %s to %s, reason: CreateTransaction failed.\n", oldName, newName); + return -1; } - return code ? 0 : -1; + BOOL result = MoveFileTransacted(oldName, newName, NULL, NULL, MOVEFILE_REPLACE_EXISTING, transactionHandle); + + if (result) { + finished = CommitTransaction(transactionHandle); + if (!finished) { + DWORD error = GetLastError(); + printf("failed to rename file %s to %s, reason: CommitTransaction errcode %d.\n", oldName, newName, error); + } + } else { + RollbackTransaction(transactionHandle); + DWORD error = GetLastError(); + finished = false; + printf("failed to rename file %s to %s, reason: MoveFileTransacted errcode %d.\n", oldName, newName, error); + } + + CloseHandle(transactionHandle); + + return finished ? 0 : -1; #else int32_t code = rename(oldName, newName); if (code < 0) { @@ -904,10 +926,16 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { goto cmp_end; } - dstFp = gzdopen(pFile->fd, "wb6f"); + // Both gzclose() and fclose() will close the associated fd, so they need to have different fds. + FileFd gzFd = dup(pFile->fd); + if (gzFd < 0) { + ret = -4; + goto cmp_end; + } + dstFp = gzdopen(gzFd, "wb6f"); if (dstFp == NULL) { ret = -3; - taosCloseFile(&pFile); + close(gzFd); goto cmp_end; } diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 5f73251e3b..562328a198 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -121,6 +121,8 @@ LONG WINAPI exceptionHandler(LPEXCEPTION_POINTERS exception); static pid_t tsProcId; static char tsSysNetFile[] = "/proc/net/dev"; static char tsSysCpuFile[] = "/proc/stat"; +static char tsCpuPeriodFile[] = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"; +static char tsCpuQuotaFile[] = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"; static char tsProcCpuFile[25] = {0}; static char tsProcMemFile[25] = {0}; static char tsProcIOFile[25] = {0}; @@ -234,7 +236,7 @@ bool taosCheckSystemIsLittleEnd() { void taosGetSystemInfo() { #ifdef WINDOWS - taosGetCpuCores(&tsNumOfCores); + taosGetCpuCores(&tsNumOfCores, false); taosGetTotalMemory(&tsTotalMemoryKB); taosGetCpuUsage(NULL, NULL); #elif defined(_TD_DARWIN_64) @@ -245,7 +247,7 @@ void taosGetSystemInfo() { tsNumOfCores = sysconf(_SC_NPROCESSORS_ONLN); #else taosGetProcIOnfos(); - taosGetCpuCores(&tsNumOfCores); + taosGetCpuCores(&tsNumOfCores, false); taosGetTotalMemory(&tsTotalMemoryKB); taosGetCpuUsage(NULL, NULL); taosGetCpuInstructions(&tsSSE42Enable, &tsAVXEnable, &tsAVX2Enable, &tsFMAEnable); @@ -493,7 +495,55 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) { #endif } -int32_t taosGetCpuCores(float *numOfCores) { +// Returns the container's CPU quota if successful, otherwise returns the physical CPU cores +static int32_t taosCntrGetCpuCores(float *numOfCores) { +#ifdef WINDOWS + return -1; +#elif defined(_TD_DARWIN_64) + return -1; +#else + TdFilePtr pFile = NULL; + if (!(pFile = taosOpenFile(tsCpuQuotaFile, TD_FILE_READ | TD_FILE_STREAM))) { + goto _sys; + } + char qline[32] = {0}; + if (taosGetsFile(pFile, sizeof(qline), qline) < 0) { + taosCloseFile(&pFile); + goto _sys; + } + taosCloseFile(&pFile); + float quota = taosStr2Float(qline, NULL); + if (quota < 0) { + goto _sys; + } + + if (!(pFile = taosOpenFile(tsCpuPeriodFile, TD_FILE_READ | TD_FILE_STREAM))) { + goto _sys; + } + char pline[32] = {0}; + if (taosGetsFile(pFile, sizeof(pline), pline) < 0) { + taosCloseFile(&pFile); + goto _sys; + } + taosCloseFile(&pFile); + + float period = taosStr2Float(pline, NULL); + float quotaCores = quota / period; + float sysCores = sysconf(_SC_NPROCESSORS_ONLN); + if (quotaCores < sysCores && quotaCores > 0) { + *numOfCores = quotaCores; + } else { + *numOfCores = sysCores; + } + goto _end; +_sys: + *numOfCores = sysconf(_SC_NPROCESSORS_ONLN); +_end: + return 0; +#endif +} + +int32_t taosGetCpuCores(float *numOfCores, bool physical) { #ifdef WINDOWS SYSTEM_INFO info; GetSystemInfo(&info); @@ -503,7 +553,11 @@ int32_t taosGetCpuCores(float *numOfCores) { *numOfCores = sysconf(_SC_NPROCESSORS_ONLN); return 0; #else - *numOfCores = sysconf(_SC_NPROCESSORS_ONLN); + if (physical) { + *numOfCores = sysconf(_SC_NPROCESSORS_ONLN); + } else { + taosCntrGetCpuCores(numOfCores); + } return 0; #endif } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 1e5338c9c9..1b7ed1c616 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -538,7 +538,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ROW_LENGTH, "Row length exceeds TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of columns") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TOO_MANY_COLUMNS, "Too many columns") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FIRST_COLUMN, "First column must be timestamp") -TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN, "Invalid binary/nchar column/tag length") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN, "Invalid varbinary/binary/nchar column/tag length") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TAGS_NUM, "Invalid number of tag columns") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Invalid stream query") @@ -573,7 +573,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed") -TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VARBINARY, "Invalidate varbinary type") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VARBINARY, "Invalidate varbinary value") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_IP_RANGE, "Invalid IPV4 address ranges") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh index 2a44ee7552..5de7261e02 100644 --- a/tests/docs-examples-test/python.sh +++ b/tests/docs-examples-test/python.sh @@ -86,7 +86,7 @@ pip3 install kafka-python python3 kafka_example_consumer.py # 21 -pip3 install taos-ws-py +pip3 install taos-ws-py==0.2.6 python3 conn_websocket_pandas.py # 22 diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 63aebe9de9..a2f3cdddbd 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -6,6 +6,21 @@ ,,y,unit-test,bash test.sh #system test +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_session.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_state_window.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_state_window.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_session.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval_ext.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval_ext.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session_ext.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/partition_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py + ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 3 @@ -24,7 +39,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 2 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 4 @@ -79,10 +94,10 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 4 -,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False +,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False +,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False +,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/create_wrong_topic.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 @@ -135,6 +150,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py ,,n,system-test,python3 ./test.py -f 7-tmq/tmq_offset.py ,,n,system-test,python3 ./test.py -f 7-tmq/tmqDataPrecisionUnit.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/raw_block_interface_test.py @@ -171,6 +187,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py @@ -467,7 +485,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +#,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index 206f99ff3d..a8755f160b 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -79,7 +79,7 @@ md5sum /home/TDinternal/debug/build/lib/libtaos.so #define taospy 2.7.10 pip3 list|grep taospy pip3 uninstall taospy -y -pip3 install --default-timeout=120 taospy==2.7.10 +pip3 install --default-timeout=120 taospy==2.7.12 $TIMEOUT_CMD $cmd RET=$? diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 00e1786399..343cbd72c3 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -219,11 +219,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments) else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012'%(crash_gen_path ,arguments) return crash_gen_cmd diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index e37cda0a27..29d9d61732 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -220,11 +220,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments) else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012'%(crash_gen_path ,arguments) return crash_gen_cmd diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index af19836a83..8546d436de 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -220,11 +220,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203 '%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203,0x4012 '%(crash_gen_path ,arguments) else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203'%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203,0x4012'%(crash_gen_path ,arguments) return crash_gen_cmd diff --git a/tests/pytest/util/cluster.py b/tests/pytest/util/cluster.py index a6e3530dc9..7c653f9f2e 100644 --- a/tests/pytest/util/cluster.py +++ b/tests/pytest/util/cluster.py @@ -52,8 +52,8 @@ class ConfigureyCluster: dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}") # configure dnoe of independent mnodes - if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == "True" : - tdLog.info("set mnode supportVnodes 0") + if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True : + tdLog.info(f"set mnode:{num} supportVnodes 0") dnode.addExtraCfg("supportVnodes", 0) # print(dnode) self.dnodes.append(dnode) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 6d813a4166..ea042829d6 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -29,7 +29,7 @@ from util.constant import * from dataclasses import dataclass,field from typing import List from datetime import datetime - +import re @dataclass class DataSet: ts_data : List[int] = field(default_factory=list) @@ -102,8 +102,8 @@ class TDCom: self.smlChildTableName_value = None self.defaultJSONStrType_value = None self.smlTagNullName_value = None - self.default_varchar_length = 256 - self.default_nchar_length = 256 + self.default_varchar_length = 6 + self.default_nchar_length = 6 self.default_varchar_datatype = "letters" self.default_nchar_datatype = "letters" self.default_tagname_prefix = "t" @@ -122,6 +122,7 @@ class TDCom: self.stb_name = "stb" self.ctb_name = "ctb" self.tb_name = "tb" + self.tbname = str() self.need_tagts = False self.tag_type_str = "" self.column_type_str = "" @@ -132,6 +133,73 @@ class TDCom: self.full_type_list = ["tinyint", "smallint", "int", "bigint", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned", "float", "double", "binary", "nchar", "bool"] self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] self.Boundary = DataBoundary() + self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] + self.case_name = str() + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.range_count = 5 + self.default_interval = 5 + self.stream_timeout = 12 + self.record_history_ts = str() + self.precision = "ms" + self.date_time = self.genTs(precision=self.precision)[0] + self.subtable = True + self.partition_tbname_alias = "ptn_alias" if self.subtable else "" + self.partition_col_alias = "pcol_alias" if self.subtable else "" + self.partition_tag_alias = "ptag_alias" if self.subtable else "" + self.partition_expression_alias = "pexp_alias" if self.subtable else "" + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.subtable_prefix = "prefix_" if self.subtable else "" + self.subtable_suffix = "_suffix" if self.subtable else "" + self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) + self.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) + self.stb_source_select_str = ','.join(self.downsampling_function_list) + self.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) + self.fill_function_list = ["min(c1)", "max(c2)", "sum(c3)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.fill_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list))) + self.fill_stb_source_select_str = ','.join(self.fill_function_list) + self.fill_tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list[0:13]))) + self.fill_tb_source_select_str = ','.join(self.fill_function_list[0:13]) + self.ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:13]) + self.stream_case_when_tbname = "tbname" + + self.update = True + self.disorder = True + if self.disorder: + self.update = False + self.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] + + self.stb_data_filter_sql = f'ts >= {self.date_time}+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c9 <> 0 or c10 is not Null or c11 is Null or \ + c12 between "na" and "nchar4" and c11 not between "bi" and "binary" and c12 match "nchar[19]" and c12 nmatch "nchar[25]" or c13 = True or \ + c5 in (1, 2, 3) or c6 not in (6, 7) and c12 like "nch%" and c11 not like "bina_" and c6 < 10 or c12 is Null or c8 >= 4 and t1 = 1 or t2 > 1 \ + and t3 != 4 or c4 <= 3 and t9 <> 0 or t10 is not Null or t11 is Null or t12 between "na" and "nchar4" and t11 not between "bi" and "binary" \ + or t12 match "nchar[19]" or t12 nmatch "nchar[25]" or t13 = True or t5 in (1, 2, 3) or t6 not in (6, 7) and t12 like "nch%" \ + and t11 not like "bina_" and t6 <= 10 or t12 is Null or t8 >= 4' + self.tb_data_filter_sql = self.stb_data_filter_sql.partition(" and t1")[0] + + self.filter_source_select_elm = "*" + self.stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.partitial_stb_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.split(",")[:3]) + self.exchange_stb_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.split(",")[0], self.stb_filter_des_select_elm.split(",")[2], self.stb_filter_des_select_elm.split(",")[1]]) + self.partitial_ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:2]) + self.tb_filter_des_select_elm = self.stb_filter_des_select_elm.partition(", t1")[0] + self.tag_filter_des_select_elm = self.stb_filter_des_select_elm.partition("c13, ")[2] + self.partition_by_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.partition_by_downsampling_function_list))) + self.partition_by_stb_source_select_str = ','.join(self.partition_by_downsampling_function_list) + self.exchange_tag_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[0], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[2], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[1]]) + self.partitial_tag_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[:3]) + self.partitial_tag_stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t3, t2, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.cast_tag_filter_des_select_elm = "t5,t11,t13" + self.cast_tag_stb_filter_des_select_elm = "ts, t1, t2, t3, t4, cast(t1 as TINYINT UNSIGNED), t6, t7, t8, t9, t10, cast(t2 as varchar(256)), t12, cast(t3 as bool)" + self.tag_count = len(self.tag_filter_des_select_elm.split(",")) + self.state_window_range = list() # def init(self, conn, logSql): # # tdSql.init(conn.cursor(), logSql) @@ -745,6 +813,1018 @@ class TDCom: """ return ','.join(map(lambda i: f'{gen_type}{i} {data_type}', range(count))) + # stream + def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, watermark=None, max_delay=None, ignore_expired=None, ignore_update=None, subtable_value=None, fill_value=None, fill_history_value=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + """create_stream + + Args: + stream_name (str): stream_name + des_table (str): target stable + source_sql (str): stream sql + trigger_mode (str, optional): at_once/window_close/max_delay. Defaults to None. + watermark (str, optional): watermark time. Defaults to None. + max_delay (str, optional): max_delay time. Defaults to None. + ignore_expired (int, optional): ignore expired data. Defaults to None. + ignore_update (int, optional): ignore update data. Defaults to None. + subtable_value (str, optional): subtable. Defaults to None. + fill_value (str, optional): fill. Defaults to None. + fill_history_value (int, optional): 0/1. Defaults to None. + stb_field_name_value (str, optional): existed stb. Defaults to None. + tag_value (str, optional): custom tag. Defaults to None. + use_exist_stb (bool, optional): use existed stb tag. Defaults to False. + use_except (bool, optional): Exception tag. Defaults to False. + + Returns: + str: stream + """ + if subtable_value is None: + subtable = "" + else: + subtable = f'subtable({subtable_value})' + + if fill_value is None: + fill = "" + else: + fill = f'fill({fill_value})' + + if fill_history_value is None: + fill_history = "" + else: + fill_history = f'fill_history {fill_history_value}' + + if use_exist_stb: + if stb_field_name_value is None: + stb_field_name = "" + else: + stb_field_name = f'({stb_field_name_value})' + + if tag_value is None: + tags = "" + else: + tags = f'tags({tag_value})' + else: + stb_field_name = "" + tags = "" + + + if trigger_mode is None: + stream_options = "" + if watermark is not None: + stream_options = f'watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} trigger at_once {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};') + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};' + else: + if watermark is None: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay}' + else: + stream_options = f'trigger {trigger_mode}' + else: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay} watermark {watermark}' + else: + stream_options = f'trigger {trigger_mode} watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};') + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};' + + def pause_stream(self, stream_name, if_exist=True, if_not_exist=False): + """pause_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + tdSql.execute(f'pause stream {if_exist_value} {if_not_exist_value} {stream_name}') + + def resume_stream(self, stream_name, if_exist=True, if_not_exist=False, ignore_untreated=False): + """resume_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + ignore_untreated (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + ignore_untreated_value = "ignore untreated" if ignore_untreated else "" + tdSql.execute(f'resume stream {if_exist_value} {if_not_exist_value} {ignore_untreated_value} {stream_name}') + + def drop_all_streams(self): + """drop all streams + """ + tdSql.query("show streams") + stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) + for stream_name in stream_name_list: + tdSql.execute(f'drop stream if exists {stream_name};') + + def drop_db(self, dbname="test"): + """drop a db + + Args: + dbname (str, optional): Defaults to "test". + """ + if dbname[0].isdigit(): + tdSql.execute(f'drop database if exists `{dbname}`') + else: + tdSql.execute(f'drop database if exists {dbname}') + + def drop_all_db(self): + """drop all databases + """ + tdSql.query("show databases;") + db_list = list(map(lambda x: x[0], tdSql.queryResult)) + for dbname in db_list: + if dbname not in self.white_list and "telegraf" not in dbname: + tdSql.execute(f'drop database if exists `{dbname}`') + + def time_cast(self, time_value, split_symbol="+"): + """cast bigint to timestamp + + Args: + time_value (bigint): ts + split_symbol (str, optional): split sympol. Defaults to "+". + + Returns: + _type_: timestamp + """ + ts_value = str(time_value).split(split_symbol)[0] + if split_symbol in str(time_value): + ts_value_offset = str(time_value).split(split_symbol)[1] + else: + ts_value_offset = "0s" + return f'cast({ts_value} as timestamp){split_symbol}{ts_value_offset}' + + def clean_env(self): + """drop all streams and databases + """ + self.drop_all_streams() + self.drop_all_db() + + def set_precision_offset(self, precision): + if precision == "ms": + self.offset = 1000 + elif precision == "us": + self.offset = 1000000 + elif precision == "ns": + self.offset = 1000000000 + else: + pass + + def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): + """generate ts + + Args: + precision (str, optional): db precision. Defaults to "ms". + ts (str, optional): input ts. Defaults to "". + protype (str, optional): "taosc" or "restful". Defaults to "taosc". + ns_tag (_type_, optional): use ns. Defaults to None. + + Returns: + timestamp, datetime: timestamp and datetime + """ + if precision == "ns": + if ts == "" or ts is None: + ts = time.time_ns() + else: + ts = ts + if ns_tag is None: + dt = ts + else: + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + if protype == "restful": + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + else: + if ts == "" or ts is None: + ts = time.time() + else: + ts = ts + if precision == "ms" or precision is None: + ts = int(round(ts * 1000)) + dt = datetime.fromtimestamp(ts // 1000) + if protype == "taosc": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + '000' + elif protype == "restful": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + else: + pass + elif precision == "us": + ts = int(round(ts * 1000000)) + dt = datetime.fromtimestamp(ts // 1000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000)).zfill(6) + return ts, dt + + def sgen_column_type_str(self, column_elm_list): + """generage column type str + + Args: + column_elm_list (list): column_elm_list + """ + self.column_type_str = "" + if column_elm_list is None: + self.column_type_str = self.gen_default_column_str() + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.column_type_str += f'{self.default_colname_prefix}{self.default_column_index_start_num} {column_elm["type"]}, ' + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_type_str = self.column_type_str.rstrip()[:-1] + f'({column_elm["len"]}), ' + self.default_column_index_start_num += 1 + else: + continue + self.column_type_str = self.default_colts_name + " timestamp, " + self.column_type_str.rstrip()[:-1] + + def sgen_tag_type_str(self, tag_elm_list): + """generage tag type str + + Args: + tag_elm_list (list): tag_elm_list + """ + self.tag_type_str = "" + if tag_elm_list is None: + self.tag_type_str = self.gen_default_tag_str() + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.tag_type_str += f'{self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_elm["type"]}, ' + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + f'({tag_elm["len"]}), ' + self.default_tag_index_start_num += 1 + else: + continue + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + if self.need_tagts: + self.tag_type_str = self.default_tagts_name + " timestamp, " + self.tag_type_str + + def sgen_tag_value_list(self, tag_elm_list, ts_value=None): + """generage tag value str + + Args: + tag_elm_list (list): _description_ + ts_value (timestamp, optional): Defaults to None. + """ + if self.need_tagts: + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if tag_elm_list is None: + self.tag_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], tag_elm["len"], self.default_varchar_datatype, tag_elm["len"], self.default_nchar_datatype)) + else: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], "", "", "", "")) + else: + continue + # if self.need_tagts and self.ts_value is not None and len(str(self.ts_value)) > 0: + if self.need_tagts: + self.tag_value_list = [self.ts_value] + self.tag_value_list + + def screateDb(self, dbname="test", drop_db=True, **kwargs): + """create database + + Args: + dbname (str, optional): Defaults to "test". + drop_db (bool, optional): Defaults to True. + """ + tdLog.info("creating db ...") + db_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + if param == "precision": + db_params += f'{param} "{value}" ' + else: + db_params += f'{param} {value} ' + if drop_db: + self.drop_db(dbname) + tdSql.execute(f'create database if not exists {dbname} {db_params}') + tdSql.execute(f'use {dbname}') + + def screate_stable(self, dbname=None, stbname="stb", use_name="table", column_elm_list=None, tag_elm_list=None, + need_tagts=False, count=1, default_stbname_prefix="stb", default_stbname_index_start_num=1, + default_column_index_start_num=1, default_tag_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to "stb". + use_name (str, optional): stable/table, Defaults to "table". + column_elm_list (list, optional): use for sgen_column_type_str(), Defaults to None. + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + need_tagts (bool, optional): tag use timestamp, Defaults to False. + count (int, optional): stable count, Defaults to 1. + default_stbname_prefix (str, optional): Defaults to "stb". + default_stbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + default_tag_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating stable ...") + if dbname is not None: + self.dbname = dbname + self.need_tagts = need_tagts + self.default_stbname_prefix = default_stbname_prefix + self.default_stbname_index_start_num = default_stbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + self.default_tag_index_start_num = default_tag_index_start_num + stb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + stb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + self.sgen_tag_type_str(tag_elm_list) + if self.dbname is not None: + stb_name = f'{self.dbname}.{stbname}' + else: + stb_name = stbname + if int(count) <= 1: + create_stable_sql = f'create {use_name} {stb_name} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + tdSql.execute(create_stable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_stbname_prefix}{default_stbname_index_start_num} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + default_stbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def screate_ctable(self, dbname=None, stbname=None, ctbname="ctb", use_name="table", tag_elm_list=None, ts_value=None, count=1, default_varchar_datatype="letters", default_nchar_datatype="letters", default_ctbname_prefix="ctb", default_ctbname_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to None. + ctbname (str, optional): Defaults to "ctb". + use_name (str, optional): Defaults to "table". + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): ctb count, Defaults to 1. + default_varchar_datatype (str, optional): Defaults to "letters". + default_nchar_datatype (str, optional): Defaults to "letters". + default_ctbname_prefix (str, optional): Defaults to "ctb". + default_ctbname_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating childtable ...") + self.default_varchar_datatype = default_varchar_datatype + self.default_nchar_datatype = default_nchar_datatype + self.default_ctbname_prefix = default_ctbname_prefix + self.default_ctbname_index_start_num = default_ctbname_index_start_num + ctb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + ctb_params += f'{param} "{value}" ' + self.sgen_tag_value_list(tag_elm_list, ts_value) + tag_value_str = "" + # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) + for tag_value in self.tag_value_list: + if isinstance(tag_value, str): + tag_value_str += f'"{tag_value}", ' + else: + tag_value_str += f'{tag_value}, ' + tag_value_str = tag_value_str.rstrip()[:-1] + if dbname is not None: + self.dbname = dbname + ctb_name = f'{self.dbname}.{ctbname}' + else: + ctb_name = ctbname + if stbname is not None: + stb_name = stbname + if int(count) <= 1: + create_ctable_sql = f'create {use_name} {ctb_name} using {stb_name} tags ({tag_value_str}) {ctb_params};' + tdSql.execute(create_ctable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_ctbname_prefix}{default_ctbname_index_start_num} using {self.stb_name} tags ({tag_value_str}) {ctb_params};' + default_ctbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def sgen_column_value_list(self, column_elm_list, need_null, ts_value=None): + """_summary_ + + Args: + column_elm_list (list): gen_random_type_value() + need_null (bool): if insert null + ts_value (timestamp, optional): Defaults to None. + """ + self.column_value_list = list() + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if column_elm_list is None: + self.column_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], column_elm["len"], self.default_varchar_datatype, column_elm["len"], self.default_nchar_datatype)) + else: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], "", "", "", "")) + else: + continue + if need_null: + for i in range(int(len(self.column_value_list)/2)): + index_num = random.randint(0, len(self.column_value_list)-1) + self.column_value_list[index_num] = None + self.column_value_list = [self.ts_value] + self.column_value_list + + def screate_table(self, dbname=None, tbname="tb", use_name="table", column_elm_list=None, + count=1, default_tbname_prefix="tb", default_tbname_index_start_num=1, + default_column_index_start_num=1, **kwargs): + """create ctable + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to "tb". + use_name (str, optional): Defaults to "table". + column_elm_list (list, optional): Defaults to None. + count (int, optional): Defaults to 1. + default_tbname_prefix (str, optional): Defaults to "tb". + default_tbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating table ...") + if dbname is not None: + self.dbname = dbname + self.default_tbname_prefix = default_tbname_prefix + self.default_tbname_index_start_num = default_tbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + tb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + tb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + if self.dbname is not None: + tb_name = f'{self.dbname}.{tbname}' + else: + tb_name = tbname + if int(count) <= 1: + create_table_sql = f'create {use_name} {tb_name} ({self.column_type_str}) {tb_params};' + tdSql.execute(create_table_sql) + else: + for _ in range(count): + create_table_sql = f'create {use_name} {self.dbname}.{default_tbname_prefix}{default_tbname_index_start_num} ({self.column_type_str}) {tb_params};' + default_tbname_index_start_num += 1 + tdSql.execute(create_table_sql) + + def sinsert_rows(self, dbname=None, tbname=None, column_ele_list=None, ts_value=None, count=1, need_null=False): + """insert rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + column_ele_list (list, optional): Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): Defaults to 1. + need_null (bool, optional): Defaults to False. + """ + tdLog.info("stream inserting ...") + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tbname = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tbname = tbname + + self.sgen_column_value_list(column_ele_list, need_null, ts_value) + # column_value_str = ", ".join(str(v) for v in self.column_value_list) + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value and "-" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + if int(count) <= 1: + insert_sql = f'insert into {self.tbname} values ({column_value_str});' + tdSql.execute(insert_sql) + else: + for num in range(count): + ts_value = self.genTs()[0] + self.sgen_column_value_list(column_ele_list, need_null, f'{ts_value}+{num}s') + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + insert_sql = f'insert into {self.tbname} values ({column_value_str});' + tdSql.execute(insert_sql) + + def sdelete_rows(self, dbname=None, tbname=None, start_ts=None, end_ts=None, ts_key=None): + """delete rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + start_ts (timestamp, optional): range start. Defaults to None. + end_ts (timestamp, optional): range end. Defaults to None. + ts_key (str, optional): timestamp column name. Defaults to None. + """ + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tbname = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tbname = tbname + if ts_key is None: + ts_col_name = self.default_colts_name + else: + ts_col_name = ts_key + + base_del_sql = f'delete from {self.tbname} ' + if end_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + if ":" in end_ts and "-" in end_ts: + end_ts = f"{end_ts}" + base_del_sql += f'where {ts_col_name} between {start_ts} and {end_ts};' + else: + if start_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + base_del_sql += f'where {ts_col_name} = {start_ts};' + tdSql.execute(base_del_sql) + + def check_stream_field_type(self, sql, input_function): + """confirm stream field + + Args: + sql (str): input sql + input_function (str): scalar + """ + tdSql.query(sql) + res = tdSql.queryResult + if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: + tdSql.checkEqual(res[1][1], "DOUBLE") + tdSql.checkEqual(res[2][1], "DOUBLE") + elif input_function in ["lower", "ltrim", "rtrim", "upper"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + elif input_function in ["char_length", "length"]: + tdSql.checkEqual(res[1][1], "BIGINT") + tdSql.checkEqual(res[2][1], "BIGINT") + tdSql.checkEqual(res[3][1], "BIGINT") + elif input_function in ["concat", "concat_ws"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "NCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + elif input_function in ["substr"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "VARCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + else: + tdSql.checkEqual(res[1][1], "INT") + tdSql.checkEqual(res[2][1], "DOUBLE") + + def round_handle(self, input_list): + """round list elem + + Args: + input_list (list): input value list + + Returns: + _type_: round list + """ + tdLog.info("round rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j in i: + if type(j) != datetime and type(j) != str: + tmpl.append(round(j, 1)) + else: + tmpl.append(j) + final_list.append(tmpl) + return final_list + + def float_handle(self, input_list): + """float list elem + + Args: + input_list (list): input value list + + Returns: + _type_: float list + """ + tdLog.info("float rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j_i,j_v in enumerate(i): + if type(j_v) != datetime and j_v is not None and str(j_v).isdigit() and j_i <= 12: + tmpl.append(float(j_v)) + else: + tmpl.append(j_v) + final_list.append(tuple(tmpl)) + return final_list + + def str_ts_trans_bigint(self, str_ts): + """trans str ts to bigint + + Args: + str_ts (str): human-date + + Returns: + bigint: bigint-ts + """ + tdSql.query(f'select cast({str_ts} as bigint)') + return tdSql.queryResult[0][0] + + def cast_query_data(self, query_data): + """cast query-result for existed-stb + + Args: + query_data (list): query data list + + Returns: + list: new list after cast + """ + tdLog.info("cast query data ...") + col_type_list = self.column_type_str.split(',') + tag_type_list = self.tag_type_str.split(',') + col_tag_type_list = col_type_list + tag_type_list + nl = list() + for query_data_t in query_data: + query_data_l = list(query_data_t) + for i,v in enumerate(query_data_l): + if v is not None: + if " ".join(col_tag_type_list[i].strip().split(" ")[1:]) == "nchar(6)": + tdSql.query(f'select cast("{v}" as binary(6))') + else: + tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') + query_data_l[i] = tdSql.queryResult[0][0] + else: + query_data_l[i] = v + nl.append(tuple(query_data_l)) + return nl + + def trans_time_to_s(self, runtime): + """trans time to s + + Args: + runtime (str): 1d/1h/1m... + + Returns: + int: second + """ + if "d" in str(runtime).lower(): + d_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(d_num) * 24 * 60 * 60 + elif "h" in str(runtime).lower(): + h_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(h_num) * 60 * 60 + elif "m" in str(runtime).lower(): + m_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(m_num) * 60 + elif "s" in str(runtime).lower(): + s_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + else: + s_num = 60 + return int(s_num) + + def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_list=None, defined_tag_count=None, partition=True, use_exist_stb=False, subtable=None, reverse_check=False): + """confirm query result + + Args: + sql1 (str): select .... + sql2 (str): select .... + sorted (bool, optional): if sort result list. Defaults to False. + fill_value (str, optional): fill. Defaults to None. + tag_value_list (list, optional): Defaults to None. + defined_tag_count (int, optional): Defaults to None. + partition (bool, optional): Defaults to True. + use_exist_stb (bool, optional): Defaults to False. + subtable (str, optional): Defaults to None. + reverse_check (bool, optional): not equal. Defaults to False. + + Returns: + bool: False if failed + """ + tdLog.info("checking query data ...") + if tag_value_list: + dvalue = len(self.tag_type_str.split(',')) - defined_tag_count + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + new_list = list() + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + + latency = 0 + if sorted: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if not reverse_check: + while res1 != res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + else: + while res1 == res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkNotEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + + def check_stream_res(self, sql, expected_res, max_delay): + """confirm stream result + + Args: + sql (str): select ... + expected_res (str): expected result + max_delay (int): max_delay value + + Returns: + bool: False if failed + """ + tdSql.query(sql) + latency = 0 + + while tdSql.queryRows != expected_res: + tdSql.query(sql) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if max_delay is not None: + if latency == 0: + return False + tdSql.checkEqual(tdSql.queryRows, expected_res) + + def check_stream(self, sql1, sql2, expected_count, max_delay=None): + """confirm stream + + Args: + sql1 (str): select ... + sql2 (str): select ... + expected_count (int): expected_count + max_delay (int, optional): max_delay value. Defaults to None. + """ + self.check_stream_res(sql1, expected_count, max_delay) + self.check_query_data(sql1, sql2) + + def cal_watermark_window_close_session_endts(self, start_ts, watermark=None, session=None): + """cal endts for close window + + Args: + start_ts (epoch time): self.date_time + watermark (int, optional): > session. Defaults to None. + session (int, optional): Defaults to None. + + Returns: + int: as followed + """ + if watermark is not None: + return start_ts + watermark*self.offset + 1 + else: + return start_ts + session*self.offset + 1 + + def cal_watermark_window_close_interval_endts(self, start_ts, interval, watermark=None): + """cal endts for close window + + Args: + start_ts (epoch time): self.date_time + interval (int): [s] + watermark (int, optional): [s]. Defaults to None. + + Returns: + _type_: _description_ + """ + if watermark is not None: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + watermark*self.offset + else: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + + def update_delete_history_data(self, delete): + """update and delete history data + + Args: + delete (bool): True/False + """ + self.sinsert_rows(tbname=self.ctb_name, ts_value=self.record_history_ts) + self.sinsert_rows(tbname=self.tb_name, ts_value=self.record_history_ts) + if delete: + self.sdelete_rows(tbname=self.ctb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + self.sdelete_rows(tbname=self.tb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + + def prepare_data(self, interval=None, watermark=None, session=None, state_window=None, state_window_max=127, interation=3, range_count=None, precision="ms", fill_history_value=0, ext_stb=None): + """prepare stream data + + Args: + interval (int, optional): Defaults to None. + watermark (int, optional): Defaults to None. + session (int, optional): Defaults to None. + state_window (str, optional): Defaults to None. + state_window_max (int, optional): Defaults to 127. + interation (int, optional): Defaults to 3. + range_count (int, optional): Defaults to None. + precision (str, optional): Defaults to "ms". + fill_history_value (int, optional): Defaults to 0. + ext_stb (bool, optional): Defaults to None. + """ + self.clean_env() + self.dataDict = { + "stb_name" : f"{self.case_name}_stb", + "ctb_name" : f"{self.case_name}_ct1", + "tb_name" : f"{self.case_name}_tb1", + "ext_stb_name" : f"ext_{self.case_name}_stb", + "ext_ctb_name" : f"ext_{self.case_name}_ct1", + "ext_tb_name" : f"ext_{self.case_name}_tb1", + "interval" : interval, + "watermark": watermark, + "session": session, + "state_window": state_window, + "state_window_max": state_window_max, + "iteration": interation, + "range_count": range_count, + "start_ts": 1655903478508, + } + if range_count is not None: + self.range_count = range_count + if precision is not None: + self.precision = precision + self.set_precision_offset(self.precision) + + self.stb_name = self.dataDict["stb_name"] + self.ctb_name = self.dataDict["ctb_name"] + self.tb_name = self.dataDict["tb_name"] + self.ext_stb_name = self.dataDict["ext_stb_name"] + self.ext_ctb_name = self.dataDict["ext_ctb_name"] + self.ext_tb_name = self.dataDict["ext_tb_name"] + self.stb_stream_des_table = f'{self.stb_name}{self.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.des_table_suffix}' + self.ext_stb_stream_des_table = f'{self.ext_stb_name}{self.des_table_suffix}' + self.ext_ctb_stream_des_table = f'{self.ext_ctb_name}{self.des_table_suffix}' + self.ext_tb_stream_des_table = f'{self.ext_tb_name}{self.des_table_suffix}' + self.date_time = self.genTs(precision=self.precision)[0] + + self.screateDb(dbname=self.dbname, precision=self.precision) + if ext_stb: + self.screate_stable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table) + self.screate_ctable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table, ctbname=self.ext_ctb_stream_des_table) + self.screate_table(dbname=self.dbname, tbname=self.ext_tb_stream_des_table) + self.screate_stable(dbname=self.dbname, stbname=self.stb_name) + self.screate_ctable(dbname=self.dbname, stbname=self.stb_name, ctbname=self.ctb_name) + self.screate_table(dbname=self.dbname, tbname=self.tb_name) + if fill_history_value == 1: + for i in range(self.range_count): + ts_value = str(self.date_time)+f'-{self.default_interval*(i+1)}s' + self.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if i == 1: + self.record_history_ts = ts_value + def is_json(msg): if isinstance(msg, str): try: @@ -756,23 +1836,22 @@ def is_json(msg): return False def get_path(tool="taosd"): - selfPath = os.path.dirname(os.path.realpath(__file__)) + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files or ("%s.exe"%tool) in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - paths.append(os.path.join(root, tool)) - break - if (len(paths) == 0): - return "" - return paths[0] + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] def dict2toml(in_dict: dict, file:str): if not isinstance(in_dict, dict): @@ -780,5 +1859,4 @@ def dict2toml(in_dict: dict, file:str): with open(file, 'w') as f: toml.dump(in_dict, f) - tdCom = TDCom() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index cd873ca0f2..c4fc1ce654 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -668,7 +668,7 @@ class TDDnodes: self.testCluster = False self.valgrind = 0 self.asan = False - self.killValgrind = 1 + self.killValgrind = 0 def init(self, path, remoteIP = ""): binPath = self.dnodes[0].getPath() + "/../../../" @@ -775,9 +775,41 @@ class TDDnodes: tdLog.info("execute finished") return + def killProcesser(self, processerName): + if platform.system().lower() == 'windows': + killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName) + psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName)) + else: + killCmd = ( + "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" + % processerName + ) + psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName) + + processID = "" + + try: + processID = subprocess.check_output(psCmd, shell=True) + while processID: + os.system(killCmd) + time.sleep(1) + try: + processID = subprocess.check_output(psCmd, shell=True) + except Exception as err: + processID = "" + tdLog.debug('**** kill pid warn: {err}') + except Exception as err: + processID = "" + tdLog.debug(f'**** find pid warn: {err}') + + + def stopAll(self): tdLog.info("stop all dnodes, asan:%d" % self.asan) - distro_id = distro.id() + if platform.system().lower() != 'windows': + distro_id = distro.id() + else: + distro_id = "not alpine" if self.asan and distro_id != "alpine": tdLog.info("execute script: %s" % self.stopDnodesPath) os.system(self.stopDnodesPath) @@ -792,7 +824,6 @@ class TDDnodes: if (distro_id == "alpine"): - print(distro_id) psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() while(processID): @@ -803,36 +834,9 @@ class TDDnodes: processID = subprocess.check_output( psCmd, shell=True).decode("utf-8").strip() elif platform.system().lower() == 'windows': - psCmd = "for /f %a in ('wmic process where \"name='taosd.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - while(processID): - print(f"pid of taosd.exe:{processID}") - killCmd = "kill -9 %s > nul 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8").strip() - - psCmd = "for /f %a in ('wmic process where \"name='tmq_sim.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - while(processID): - print(f"pid of tmq_sim.exe:{processID}") - killCmd = "kill -9 %s > nul 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8").strip() - - psCmd = "for /f %a in ('wmic process where \"name='taosBenchmark.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() - while(processID): - print(f"pid of taosBenchmark.exe:{processID}") - killCmd = "kill -9 %s > nul 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8").strip() - + self.killProcesser("taosd") + self.killProcesser("tmq_sim") + self.killProcesser("taosBenchmark") else: psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() @@ -849,7 +853,6 @@ class TDDnodes: time.sleep(1) processID = subprocess.check_output( psCmd, shell=True).decode("utf-8").strip() - if self.killValgrind == 1: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 2fa21b1983..91aac1929f 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -111,7 +111,7 @@ class TDSql: return self.error_info - def query(self, sql, row_tag=None,queryTimes=10): + def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): self.sql = sql i=1 while i <= queryTimes: @@ -120,6 +120,17 @@ class TDSql: self.queryResult = self.cursor.fetchall() self.queryRows = len(self.queryResult) self.queryCols = len(self.cursor.description) + + if count_expected_res is not None: + counter = 0 + while count_expected_res != self.queryResult[0][0]: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + if counter < queryTimes: + counter += 0.5 + time.sleep(0.5) + else: + return False if row_tag: return self.queryResult return self.queryRows @@ -501,7 +512,8 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) - tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + # tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) def checkNotEqual(self, elm, expect_elm): if elm != expect_elm: @@ -509,7 +521,8 @@ class TDSql: else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) - tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + raise Exception def get_times(self, time_str, precision="ms"): caller = inspect.getframeinfo(inspect.stack()[1][0]) diff --git a/tests/script/tsim/parser/alter__for_community_version.sim b/tests/script/tsim/parser/alter__for_community_version.sim index b902daa2dd..48fb2f8246 100644 --- a/tests/script/tsim/parser/alter__for_community_version.sim +++ b/tests/script/tsim/parser/alter__for_community_version.sim @@ -35,7 +35,7 @@ sql_error alter database $db keep 20.0,20.0,20.0 sql_error alter database $db keep 0,0,0 sql_error alter database $db keep 3 sql_error alter database $db keep -1,-1,-1 -sql_error alter database $db keep 20,20 +sql alter database $db keep 20,20 sql_error alter database $db keep 9,9,9 sql_error alter database $db keep 20,20,19 sql_error alter database $db keep 20,19,20 @@ -44,7 +44,7 @@ sql_error alter database $db keep 20,19,18 sql_error alter database $db keep 20,20,20,20 sql_error alter database $db keep 365001,365001,365001 sql_error alter database $db keep 365001 -sql_error alter database $db keep 20 +sql alter database $db keep 20 sql select * from information_schema.ins_databases if $rows != 3 then return -1 diff --git a/tests/script/tsim/query/tag_scan.sim b/tests/script/tsim/query/tag_scan.sim index 3a64cf1a1c..3f0a23fbd6 100644 --- a/tests/script/tsim/query/tag_scan.sim +++ b/tests/script/tsim/query/tag_scan.sim @@ -90,7 +90,7 @@ endi sql select tags tbname,t,b from stt1 order by t print $rows -print $data00 $data01 $data02 $data10 $data11 $data12 $data20 $data21 $data22 $data30 $data31 $data32 +print $data00 $data01 $data02 $data10 $data11 $data12 $data20 $data21 $data22 $data30 $data31 $data32 if $rows != 4 then return -1 endi @@ -103,7 +103,7 @@ endi sql select tags t,b from stt1 where t=1 print $rows -print $data00 $data01 +print $data00 $data01 if $rows != 1 then return -1 endi @@ -116,7 +116,20 @@ endi sql select tags t,b from stt1 where tbname='ctt11' print $rows -print $data00 $data01 +print $data00 $data01 +if $rows != 1 then + return -1 +endi +if $data00 != @1@ then + return -1 +endi +if $data01 != @1aa@ then + return -1 +endi + +sql select tags t,b from ctt11 +print $rows +print $data00 $data01 if $rows != 1 then return -1 endi diff --git a/tests/system-test/0-others/splitVGroup.py b/tests/system-test/0-others/splitVGroup.py index 9fd00892e4..ed80505ce2 100644 --- a/tests/system-test/0-others/splitVGroup.py +++ b/tests/system-test/0-others/splitVGroup.py @@ -198,7 +198,7 @@ class TDTestCase: # init def init(self, conn, logSql, replicaVar=1): - seed = time.clock_gettime(time.CLOCK_REALTIME) + seed = time.time() % 10000 random.seed(seed) self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/0-others/timeRangeWise.py b/tests/system-test/0-others/timeRangeWise.py index a7dc18aa82..5ef5aa4a75 100644 --- a/tests/system-test/0-others/timeRangeWise.py +++ b/tests/system-test/0-others/timeRangeWise.py @@ -210,7 +210,7 @@ class TDTestCase: # init def init(self, conn, logSql, replicaVar=1): - seed = time.clock_gettime(time.CLOCK_REALTIME) + seed = time.time() % 10000 random.seed(seed) self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/0-others/ttl.py b/tests/system-test/0-others/ttl.py new file mode 100644 index 0000000000..32b18c6bbb --- /dev/null +++ b/tests/system-test/0-others/ttl.py @@ -0,0 +1,37 @@ +import time +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +class TDTestCase: + updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 1, "ttlChangeOnWrite": 0} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + self.ttl = 5 + self.dbname = "test" + + def check_ttl_result(self): + tdSql.execute(f'create database {self.dbname}') + tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)') + tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}') + tdSql.query(f'show {self.dbname}.tables') + tdSql.checkRows(2) + + time.sleep(self.ttl + 2) + tdSql.query(f'show {self.dbname}.tables') + tdSql.checkRows(1) + + def run(self): + self.check_ttl_result() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/ttlChangeOnWrite.py b/tests/system-test/0-others/ttlChangeOnWrite.py new file mode 100644 index 0000000000..f9c63e4ec2 --- /dev/null +++ b/tests/system-test/0-others/ttlChangeOnWrite.py @@ -0,0 +1,59 @@ +import time +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +class TDTestCase: + updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 3, "ttlChangeOnWrite": 1, "trimVDbIntervalSec": 360, + "ttlFlushThreshold": 100, "ttlBatchDropNum": 10} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + self.ttl = 5 + self.tables = 100 + self.dbname = "test" + + def check_batch_drop_num(self): + tdSql.execute(f'create database {self.dbname} vgroups 1') + tdSql.execute(f'use {self.dbname}') + tdSql.execute(f'create table stb(ts timestamp, c1 int) tags(t1 int)') + for i in range(self.tables): + tdSql.execute(f'create table t{i} using stb tags({i}) ttl {self.ttl}') + + time.sleep(self.ttl * 2) + tdSql.query('show tables') + tdSql.checkRows(90) + + def check_ttl_result(self): + tdSql.execute(f'drop database if exists {self.dbname}') + tdSql.execute(f'create database {self.dbname}') + tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)') + tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}') + tdSql.query(f'show {self.dbname}.tables') + tdSql.checkRows(2) + + time.sleep(self.ttl) + tdSql.execute(f'insert into {self.dbname}.t2 values(now, 1)'); + + time.sleep(self.ttl) + tdSql.query(f'show {self.dbname}.tables') + tdSql.checkRows(2) + + time.sleep(self.ttl * 2) + tdSql.query(f'show {self.dbname}.tables') + tdSql.checkRows(1) + + def run(self): + self.check_batch_drop_num() + self.check_ttl_result() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/alter_database.py b/tests/system-test/1-insert/alter_database.py index 1fca294a47..8de43b8ee8 100644 --- a/tests/system-test/1-insert/alter_database.py +++ b/tests/system-test/1-insert/alter_database.py @@ -18,7 +18,7 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) self.buffer_boundary = [3, 4097, 8193, 12289, 16384] self.buffer_error = [self.buffer_boundary[0] - - 1, self.buffer_boundary[-1]+1, 256] + 1, self.buffer_boundary[-1]+1] # pages_boundary >= 64 self.pages_boundary = [64, 128, 512] self.pages_error = [self.pages_boundary[0]-1] @@ -47,15 +47,40 @@ class TDTestCase: tdSql.execute('create database db') tdSql.query( 'select * from information_schema.ins_databases where name = "db"') - self.pages_error.append(tdSql.queryResult[0][10]) + # self.pages_error.append(tdSql.queryResult[0][10]) for pages in self.pages_error: tdSql.error(f'alter database db pages {pages}') tdSql.execute('drop database db') + def alter_same_options(self): + tdSql.execute('drop database if exists db') + tdSql.execute('create database db') + tdSql.query('select * from information_schema.ins_databases where name = "db"') + + db_options_items = ["replica","keep","buffer","pages","minrows","cachemodel","cachesize","wal_level","wal_fsync_period", + "wal_retention_period","wal_retention_size","stt_trigger"] + db_options_result_idx = [4,7,8,10,11,18,19,20,21,22,23,24] + + self.option_result = [] + for idx in db_options_result_idx: + self.option_result.append(tdSql.queryResult[0][idx]) + + index = 0 + for option in db_options_items: + if option == "cachemodel": + option_sql = "alter database db %s '%s'" % (option, self.option_result[index] ) + else: + option_sql = "alter database db %s %s" % (option, self.option_result[index] ) + tdLog.debug(option_sql) + tdSql.query(option_sql) + index += 1 + tdSql.execute('drop database db') + def run(self): self.alter_buffer() self.alter_pages() + self.alter_same_options() def stop(self): tdSql.close() diff --git a/tests/system-test/1-insert/precisionUS.py b/tests/system-test/1-insert/precisionUS.py index 1b41d66010..d634149297 100644 --- a/tests/system-test/1-insert/precisionUS.py +++ b/tests/system-test/1-insert/precisionUS.py @@ -220,7 +220,7 @@ class TDTestCase: # init def init(self, conn, logSql, replicaVar=1): - seed = time.clock_gettime(time.CLOCK_REALTIME) + seed = time.time() % 10000 random.seed(seed) self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/2-query/To_unixtimestamp.py b/tests/system-test/2-query/To_unixtimestamp.py index 424ebff6c5..975abfa8b0 100644 --- a/tests/system-test/2-query/To_unixtimestamp.py +++ b/tests/system-test/2-query/To_unixtimestamp.py @@ -99,7 +99,7 @@ class TDTestCase: tdSql.query(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 0);") tdSql.checkEqual(tdSql.queryResult[0][0], 0) tdSql.query(f"select to_unixtimestamp('1970-01-01 00:00:00', 1);") - tdSql.checkData(0, 0, '1970-01-01 00:00:00') + tdSql.checkData(0, 0, datetime.datetime(1970, 1, 1, 0, 0, 0)) tdSql.error(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 2);") tdSql.error(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 1.5);") tdSql.error(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 'abc');") diff --git a/tests/system-test/2-query/orderBy.py b/tests/system-test/2-query/orderBy.py index fed1651b3a..fa447cbca4 100644 --- a/tests/system-test/2-query/orderBy.py +++ b/tests/system-test/2-query/orderBy.py @@ -220,7 +220,7 @@ class TDTestCase: # init def init(self, conn, logSql, replicaVar=1): - seed = time.clock_gettime(time.CLOCK_REALTIME) + seed = time.time() % 10000 random.seed(seed) self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/2-query/smaBasic.py b/tests/system-test/2-query/smaBasic.py index c221a70605..a82d190e2b 100644 --- a/tests/system-test/2-query/smaBasic.py +++ b/tests/system-test/2-query/smaBasic.py @@ -269,7 +269,7 @@ class TDTestCase: # init def init(self, conn, logSql, replicaVar=1): - seed = time.clock_gettime(time.CLOCK_REALTIME) + seed = time.time() % 10000 random.seed(seed) self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py index e89df638d0..5aaf5b9b28 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py @@ -88,15 +88,15 @@ class TDTestCase: # restart all taosd tdDnodes=cluster.dnodes - tdDnodes[1].stoptaosd() + # tdDnodes[1].stoptaosd() tdDnodes[2].stoptaosd() - tdLog.info("check whether 2 mnode status is offline") - clusterComCheck.check3mnode2off() + tdLog.info("check whether 1 mnode status is offline") + clusterComCheck.check3mnodeoff(3) # tdSql.error("create user user1 pass '123';") - tdLog.info("start two follower") - tdDnodes[1].starttaosd() + tdLog.info("start follower") + # tdDnodes[1].starttaosd() tdDnodes[2].starttaosd() clusterComCheck.checkMnodeStatus(mnodeNums) diff --git a/tests/system-test/6-cluster/clusterCommonCheck.py b/tests/system-test/6-cluster/clusterCommonCheck.py index 5e5568c5c5..85d670a325 100644 --- a/tests/system-test/6-cluster/clusterCommonCheck.py +++ b/tests/system-test/6-cluster/clusterCommonCheck.py @@ -245,9 +245,12 @@ class ClusterComCheck: tdLog.exit(f"vgroup number of {db_name} is not correct") if self.db_replica == 1 : if tdSql.queryResult[0][4] == 'leader' and tdSql.queryResult[1][4] == 'leader' and tdSql.queryResult[last_number][4] == 'leader': - ready_time= (count + 1) - tdLog.success(f"all vgroups of {db_name} are leaders in {count + 1} s") - return True + tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';") + print("db replica :",tdSql.queryResult[0][0]) + if tdSql.queryResult[0][0] == db_replica: + ready_time= (count + 1) + tdLog.success(f"all vgroups with replica {self.db_replica} of {db_name} are leaders in {count + 1} s") + return True count+=1 elif self.db_replica == 3 : vgroup_status_first=[tdSql.queryResult[0][4],tdSql.queryResult[0][6],tdSql.queryResult[0][8]] @@ -255,13 +258,16 @@ class ClusterComCheck: vgroup_status_last=[tdSql.queryResult[last_number][4],tdSql.queryResult[last_number][6],tdSql.queryResult[last_number][8]] if vgroup_status_first.count('leader') == 1 and vgroup_status_first.count('follower') == 2: if vgroup_status_last.count('leader') == 1 and vgroup_status_last.count('follower') == 2: - ready_time= (count + 1) - tdLog.success(f"elections of {db_name}.vgroups are ready in {ready_time} s") - return True + tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';") + print("db replica :",tdSql.queryResult[0][0]) + if tdSql.queryResult[0][0] == db_replica: + ready_time= (count + 1) + tdLog.success(f"elections of {db_name}.vgroups with replica {self.db_replica} are ready in {ready_time} s") + return True count+=1 else: tdLog.debug(tdSql.queryResult) - tdLog.notice(f"elections of {db_name} all vgroups are failed in {count} s ") + tdLog.notice(f"elections of {db_name} all vgroups with replica {self.db_replica} are failed in {count} s ") caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno) tdLog.exit("%s(%d) failed " % args) diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py index aa0c7a0177..9ab47764c8 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py @@ -177,7 +177,7 @@ class TDTestCase: tdSql.query("select count(*) from %s"%stableName) tdSql.checkData(0,0,rowsPerStb) - clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica1,db_name=paraDict["dbName"],count_number=40) + clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica1,db_name=paraDict["dbName"],count_number=100) sleep(5) tdLog.info(f"show transactions;alter database db0_0 replica {replica3};") TdSqlEx.execute(f'show transactions;') diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py index b66334a6a6..37e3a17100 100644 --- a/tests/system-test/7-tmq/subscribeDb3.py +++ b/tests/system-test/7-tmq/subscribeDb3.py @@ -94,7 +94,7 @@ class TDTestCase: resultList=[] while 1: tdSql.query("select * from %s.consumeresult"%cdbName) - #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + # tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))) if tdSql.getRows() == expectRows: break else: @@ -336,7 +336,7 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows > expectrowcnt or totalConsumeRows <= 0: + if totalConsumeRows > expectrowcnt or totalConsumeRows < 0: tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py index 6a03f0f751..117c3ce637 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py @@ -218,7 +218,7 @@ class TDTestCase: actConsumeTotalRows = resultList[0] - if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted): tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py index c11159c6e5..2864240441 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -216,7 +216,7 @@ class TDTestCase: actConsumeTotalRows = resultList[0] tdLog.info("act consume rows: %d, expect rows range (0, %d)"%(actConsumeTotalRows, totalRowsInserted)) - if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted): tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py index 439845aa54..d8606efe58 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py @@ -218,7 +218,7 @@ class TDTestCase: tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) - if not (actConsumeTotalRows > 0 and actConsumeTotalRows <= totalRowsInserted): + if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted): tdLog.exit("%d tmq consume rows error!"%consumerId) time.sleep(10) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py index 53ff020b08..05aa82c929 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py @@ -216,7 +216,7 @@ class TDTestCase: actConsumeTotalRows = resultList[0] - if not (actConsumeTotalRows > 0 and actConsumeTotalRows <= totalRowsInserted): + if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted): tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py index 4bb6cf463f..dcaa6ceb7c 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py @@ -217,7 +217,7 @@ class TDTestCase: tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) - if not ((actConsumeTotalRows > 0) and (actConsumeTotalRows <= totalRowsInserted)): + if not ((actConsumeTotalRows >= 0) and (actConsumeTotalRows <= totalRowsInserted)): tdLog.exit("%d tmq consume rows error!"%consumerId) time.sleep(10) diff --git a/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py b/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py index 3dabca4cd1..dd2420dcfb 100644 --- a/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py +++ b/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py @@ -205,12 +205,21 @@ class TDTestCase: time.sleep(self.walRetentionPeriod + 1) tdLog.info("secondely call to flash database") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # wait the consumer to complete one poll while (0 == self.retryPoll): time.sleep(1) continue - + + # write data again when consumer stopped to make sure some data aren't consumed + pInsertDataAgainThread = tmqCom.asyncInsertDataByInterlace(paraDict) + pInsertDataAgainThread.join() + tdLog.info("firstly call to flash database when writing data second time") + tdSql.query("flush database %s"%(paraDict['dbName'])) + time.sleep(self.walRetentionPeriod + 1) + tdLog.info("secondely call to flash database when writing data second time") + tdSql.query("flush database %s"%(paraDict['dbName'])) + with self.lock: self.retryPoll = 0 currentTime = datetime.now() @@ -218,15 +227,14 @@ class TDTestCase: paraDict["startTs"] = 1640966400000 + paraDict["ctbNum"] * paraDict["rowsPerTbl"] pThread3 = tmqCom.asyncInsertDataByInterlace(paraDict) - - + tdLog.debug("wait sub-thread to end insert data") pThread3.join() - - totalInsertRows = paraDict["ctbNum"] * paraDict["rowsPerTbl"] * 2 + + totalInsertRows = paraDict["ctbNum"] * paraDict["rowsPerTbl"] * 3 tdLog.debug("wait sub-thread to end consume data") pThread2.join() - + tdLog.info("act consume total rows: %d, act insert total rows: %d"%(self.actConsumeTotalRows, totalInsertRows)) if (self.actConsumeTotalRows >= totalInsertRows): diff --git a/tests/system-test/7-tmq/tmqSeekAndCommit.py b/tests/system-test/7-tmq/tmqSeekAndCommit.py index 2d837ef7a4..253edcd10d 100644 --- a/tests/system-test/7-tmq/tmqSeekAndCommit.py +++ b/tests/system-test/7-tmq/tmqSeekAndCommit.py @@ -21,6 +21,7 @@ class TDTestCase: self.db_name = "tmq_db" self.topic_name = "tmq_topic" self.stable_name = "tmqst" + self.prepareData() def prepareData(self): @@ -73,8 +74,9 @@ class TDTestCase: return consumer def test_seek_and_committed_position_with_autocommit(self): + """Check the position and committed offset of the topic for autocommit scenario + """ try: - self.prepareData() inputDict = { "topic_name": self.topic_name, "group_id": "1", @@ -95,18 +97,26 @@ class TDTestCase: partitions = consumer.assignment() position_partitions = consumer.position(partitions) - tdLog.info("position_partitions: %s"%(position_partitions)) for i in range(len(position_partitions)): tdLog.info("position_partitions[%s].offset: %s"%(i, position_partitions[i].offset)) committed_partitions = consumer.committed(partitions) - tdLog.info("committed_partitions: %s"%(committed_partitions)) + origin_committed_position = [] for i in range(len(committed_partitions)): tdLog.info("committed_partitions[%s].offset: %s"%(i, committed_partitions[i].offset)) + origin_committed_position.append(committed_partitions[i].offset) assert(len(position_partitions) == len(committed_partitions)) for i in range(len(position_partitions)): assert(position_partitions[i].offset == committed_partitions[i].offset) - # seek to the beginning of the topic - + # seek to the specified offset of the topic, then check position and committed offset + for partition in partitions: + partition.offset = 5 + consumer.seek(partition) + position_partitions = consumer.position(partitions) + for i in range(len(position_partitions)): + assert(position_partitions[i].offset == 5) + committed_partitions = consumer.committed(partitions) + for i in range(len(committed_partitions)): + assert(committed_partitions[i].offset != 5 and committed_partitions[i].offset == origin_committed_position[i]) except Exception as ex: raise Exception("Failed to test seek and committed position with autocommit with error: {}".format(str(ex))) finally: @@ -114,12 +124,81 @@ class TDTestCase: consumer.close() def test_commit_by_offset(self): - pass - + """Check the position and committed offset of the topic for commit by offset scenario + """ + try: + inputDict = { + "topic_name": self.topic_name, + "group_id": "1", + "auto_commit": "false", + "offset_reset": "earliest" + } + consumer = self.tmqSubscribe(inputDict) + origin_committed_position = [] + while(True): + res = consumer.poll(1) + if not res: + break + err = res.error() + if err is not None: + raise err + partitions = consumer.assignment() + consumer.commit(offsets=partitions) + val = res.value() + for block in val: + tdLog.info("block.fetchall() number: %s"%(len(block.fetchall()))) + position_partitions = consumer.position(partitions) + committed_partitions = consumer.committed(partitions) + for i in range(len(position_partitions)): + assert(position_partitions[i].offset == committed_partitions[i].offset) + committed_partitions = consumer.committed(partitions) + for i in range(len(committed_partitions)): + origin_committed_position.append(committed_partitions[i].offset) + tdLog.info("original committed_partitions[%s].offset: %s"%(i, committed_partitions[i].offset)) + # seek to the specified offset of the topic, then check position and committed offset + for partition in partitions: + partition.offset = 2 + consumer.seek(partition) + position_partitions = consumer.position(partitions) + for i in range(len(position_partitions)): + assert(position_partitions[i].offset == 2) + committed_partitions = consumer.committed(partitions) + for i in range(len(committed_partitions)): + tdLog.info("after seek committed_partitions[%s].offset: %s"%(i, committed_partitions[i].offset)) + assert(committed_partitions[i].offset != 2 and committed_partitions[i].offset == origin_committed_position[i]) + # continue to consume data from seek offset + while(True): + res = consumer.poll(1) + if not res: + break + err = res.error() + if err is not None: + raise err + partitions = consumer.assignment() + # commit by offset + consumer.commit(offsets=partitions) + val = res.value() + for block in val: + tdLog.info("block.fetchall() number: %s"%(len(block.fetchall()))) + partitions = consumer.assignment() + position_partitions = consumer.position(partitions) + committed_partitions = consumer.committed(partitions) + assert(len(position_partitions) == len(committed_partitions)) + for i in range(len(position_partitions)): + assert(position_partitions[i].offset == committed_partitions[i].offset) + except Exception as ex: + raise Exception("Failed to test commit by offset with error: {}".format(str(ex))) + finally: + consumer.unsubscribe() + consumer.close() + def run(self): self.test_seek_and_committed_position_with_autocommit() + self.test_commit_by_offset() def stop(self): + tdSql.execute("drop topic %s" % self.topic_name) + tdSql.execute("drop database %s"%(self.db_name)) tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/8-stream/at_once_interval.py b/tests/system-test/8-stream/at_once_interval.py new file mode 100644 index 0000000000..020b5f2a17 --- /dev/null +++ b/tests/system-test/8-stream/at_once_interval.py @@ -0,0 +1,220 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None): + tdLog.info(f"*** testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_tbname_alias + + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_col_alias + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition is None: + partition_elm_alias = '"no_partition"' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or partition is None: + if case_when: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + if partition: + partition_elm = f'partition by {partition} {partition_elm_alias}' + else: + partition_elm = "" + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value) + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + if partition: + partition_elm = f'partition by {partition}' + else: + partition_elm = "" + + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + + if self.tdCom.subtable: + for tname in [self.stb_name, self.ctb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + if fill_value: + end_date_time = self.tdCom.date_time + final_range_count = self.tdCom.range_count + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + # update + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + for i in range(self.tdCom.range_count): + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if partition == "tbname": + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value) + + + def run(self): + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) + self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL") + # for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value) + self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/at_once_interval_ext.py b/tests/system-test/8-stream/at_once_interval_ext.py new file mode 100644 index 0000000000..8674e7f853 --- /dev/null +++ b/tests/system-test/8-stream/at_once_interval_ext.py @@ -0,0 +1,209 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + tdLog.info(f"*** testing stream at_once+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, delete: {delete}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***") + if use_except: + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm or len(stb_field_name_value.split(",")) == len(self.tdCom.partitial_stb_filter_des_select_elm.split(",")): + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + else: + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + + if stb_field_name_value is not None: + if len(stb_field_name_value) == 0: + stb_field_name_value = ",".join(self.tdCom.tb_filter_des_select_elm.split(",")[:5]) + # else: + # stb_field_name_value = self.tdCom.tb_filter_des_select_elm + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + defined_tag_count = len(tag_value.split()) if tag_value is not None else 0 + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, ext_stb=use_exist_stb) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_tbname_alias + + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_col_alias + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition == "tbname,t1,c1": + partition_elm_alias = f'{self.tdCom.partition_tbname_alias},t1,c1' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if subtable: + if partition == "tbname": + if case_when: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if subtable == "constant": + # stb_subtable_value = f'"{self.tdCom.ext_ctb_stream_des_table}"' + stb_subtable_value = f'"constant_{self.tdCom.ext_ctb_stream_des_table}"' + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(cast({subtable} as int unsigned) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = None + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + # self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.ext_tb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + if partition: + stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except) + else: + stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except) + if stream_sql: + tdSql.error(stream_sql) + return + start_time = self.tdCom.date_time + if subtable == "constant": + range_count = 1 + else: + range_count = self.tdCom.range_count + + for i in range(range_count): + latency = 0 + tag_value_list = list() + ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value) + self.tdCom.date_time += 1 + if tag_value: + if subtable == "constant": + tdSql.query(f'select {tag_value} from constant_{self.tdCom.ext_ctb_stream_des_table}') + else: + tdSql.query(f'select {tag_value} from {self.stb_name}') + tag_value_list = tdSql.queryResult + if not fill_value: + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm: + self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + elif stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, cast(max(c2) as tinyint), cast(min(c1) as smallint) from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True) + else: + if partition: + if tag_value == self.tdCom.exchange_tag_filter_des_select_elm: + self.tdCom.check_query_data(f'select {self.tdCom.partitial_tag_stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list) + elif tag_value == self.tdCom.cast_tag_filter_des_select_elm: + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart') + limit_row = tdSql.queryRows + self.tdCom.check_query_data(f'select {self.tdCom.cast_tag_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select cast(t1 as TINYINT UNSIGNED),cast(t2 as varchar(256)),cast(t3 as bool) from {self.stb_name} order by ts limit {limit_row}') + tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};') + while list(set(tdSql.queryResult)) != [(None, None, None, None, None, None, None, None, None, None)]: + tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};') + if latency < self.tdCom.default_interval: + latency += 1 + time.sleep(1) + else: + return False + tdSql.checkEqual(list(set(tdSql.queryResult)), [(None, None, None, None, None, None, None, None, None, None)]) + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list) + else: + if use_exist_stb and not tag_value: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, use_exist_stb=use_exist_stb) + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, subtable=subtable) + + if subtable: + for tname in [self.stb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + else: + tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))') + subtable_value = tdSql.queryResult[0][0] + if subtable == "constant": + return + else: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;') + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + def run(self): + # self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + for delete in [True, False]: + for fill_history_value in [0, 1]: + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.partitial_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.exchange_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + # self-define tag + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.partitial_tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.exchange_tag_filter_des_select_elm, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition="t1 as t5,t2 as t11,t3 as t13", subtable=None, stb_field_name_value=None, tag_value="t5,t11,t13", use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t1", use_exist_stb=True) + # error cases + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value="", tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm.replace("c1","c19"), tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="ttt", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t15", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="c5", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as t14", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True) + self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as c13", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,c13", use_exist_stb=True, use_except=True) + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/at_once_session.py b/tests/system-test/8-stream/at_once_session.py new file mode 100644 index 0000000000..9a253a187f --- /dev/null +++ b/tests/system-test/8-stream/at_once_session.py @@ -0,0 +1,223 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_session(self, session, ignore_expired=None, ignore_update=None, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True): + tdLog.info(f"*** testing stream at_once+interval: session: {session}, ignore_expired: {ignore_expired}, ignore_update: {ignore_update}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, case_when: {case_when}, subtable: {subtable} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + if partition == "tbname": + if case_when: + stream_case_when_partition = case_when + else: + stream_case_when_partition = self.tdCom.partition_tbname_alias + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + if subtable: + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = "constant" + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or subtable is None: + if case_when: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if subtable: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if 'abs' in partition: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + for i in range(self.tdCom.range_count): + ctb_name = self.tdCom.get_long_name() + self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) + + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session) + if i == 0: + record_window_close_ts = window_close_ts + for ts_value in [self.tdCom.date_time, window_close_ts]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True) + if self.delete and i%2 != 0: + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt) + ts_value += 1 + + # check result + if partition != "tbname": + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True) + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;') + else: + for tbname in [self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + + if self.tdCom.disorder: + if ignore_expired: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s') + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.checkEqual(res1, res2) + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s') + tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.checkEqual(res1, res2) + else: + if ignore_update: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res2 = tdSql.queryResult + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts) + tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + res1 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts) + if partition != "tbname": + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True) + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;') + else: + for tbname in [self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True) + + if fill_history_value: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.record_history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.record_history_ts) + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-")) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-")) + + if self.tdCom.subtable: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if c1_value[1] is not None: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + if subtable: + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if c1_value[1] is not None: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + if subtable: + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + + + def run(self): + self.at_once_session(session=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, delete=True, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') + for subtable in [None, True]: + self.at_once_session(session=random.randint(10, 15), subtable=subtable, partition="abs(c1)") + for ignore_expired in [None, 0, 1]: + for fill_history_value in [None, 1]: + self.at_once_session(session=random.randint(10, 15), ignore_expired=ignore_expired, fill_history_value=fill_history_value) + for fill_history_value in [None, 1]: + self.at_once_session(session=random.randint(10, 15), partition="tbname", delete=True, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), partition="c1", delete=True, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, subtable=None, fill_history_value=fill_history_value) + self.at_once_session(session=random.randint(10, 15), ignore_update=1, fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/at_once_state_window.py b/tests/system-test/8-stream/at_once_state_window.py new file mode 100644 index 0000000000..fa9f4ddd78 --- /dev/null +++ b/tests/system-test/8-stream/at_once_state_window.py @@ -0,0 +1,144 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def at_once_state_window(self, state_window, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True): + tdLog.info(f"*** testing stream at_once+interval: state_window: {state_window}, partition: {partition}, fill_history: {fill_history_value}, case_when: {case_when}***, delete: {delete}") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(state_window=state_window, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1" and subtable is not None: + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "c1" and subtable is None: + partition_elm_alias = 'constant' + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or subtable is None: + if partition == "tbname": + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + if 'abs' in partition: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + else: + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + state_window_col_name = self.tdCom.dataDict["state_window"] + if case_when: + stream_state_window = case_when + else: + stream_state_window = state_window_col_name + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + range_times = self.tdCom.range_count + state_window_max = self.tdCom.dataDict['state_window_max'] + for i in range(range_times): + state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times)) + for i in range(2, range_times+3): + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.tdCom.update and i%2 == 0: + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.delete and i%2 != 0: + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + tdSql.execute(f'delete from {self.ctb_name} where ts = {dt}') + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.tdCom.update and i%2 == 0: + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.delete and i%2 != 0: + tdSql.execute(f'delete from {self.tb_name} where ts = {dt}') + self.tdCom.date_time += 1 + + # for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True) + + if fill_history_value: + self.tdCom.update_delete_history_data(self.delete) + + if self.tdCom.subtable: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + if subtable: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + return + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + if subtable: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + else: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + return + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + + def run(self): + self.at_once_state_window(state_window="c2", partition="tbname", case_when="case when c1 < 0 then c1 else c2 end") + self.at_once_state_window(state_window="c1", partition="tbname", case_when="case when c1 >= 0 then c1 else c2 end") + for fill_history_value in [None, 1]: + self.at_once_state_window(state_window="c1", partition="tbname", fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="c1", fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="abs(c1)", fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="tbname", delete=True, fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="c1", delete=True, fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="abs(c1)", delete=True, fill_history_value=fill_history_value) + self.at_once_state_window(state_window="c1", partition="c1", subtable=None, fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/max_delay_interval.py b/tests/system-test/8-stream/max_delay_interval.py new file mode 100644 index 0000000000..944c946554 --- /dev/null +++ b/tests/system-test/8-stream/max_delay_interval.py @@ -0,0 +1,161 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_max_delay_interval(self, interval, max_delay, watermark=None, fill_value=None, delete=False): + tdLog.info(f"*** testing stream max_delay+interval: interval: {interval}, watermark: {watermark}, fill_value: {fill_value}, delete: {delete} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = 1658921623245 + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + fill_watermark_value = watermark_value + else: + watermark_value = None + fill_watermark_value = "0s" + + max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value) + init_num = 0 + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + # if not fill_value: + # for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + # if tbname != self.tb_stream_des_table: + # tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + # else: + # tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + # tdSql.checkEqual(tdSql.queryRows, init_num) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if i == 0: + init_num = 2 + i + if watermark is not None: + init_num += 1 + else: + init_num += 1 + time.sleep(int(max_delay.replace("s", ""))) + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)') + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)') + if fill_value: + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if self.tdCom.update: + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + time.sleep(int(max_delay.replace("s", ""))) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value) + + + def run(self): + for watermark in [None, random.randint(20, 25)]: + self.watermark_max_delay_interval(interval=random.choice([15]), watermark=watermark, max_delay=f"{random.randint(5, 6)}s") + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + self.watermark_max_delay_interval(interval=random.randint(10, 15), watermark=None, max_delay=f"{random.randint(5, 6)}s", fill_value=fill_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/max_delay_interval_ext.py b/tests/system-test/8-stream/max_delay_interval_ext.py new file mode 100644 index 0000000000..653fcd997c --- /dev/null +++ b/tests/system-test/8-stream/max_delay_interval_ext.py @@ -0,0 +1,101 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_max_delay_interval_ext(self, interval, max_delay, watermark=None, fill_value=None, partition="tbname", delete=False, fill_history_value=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False): + tdLog.info(f"*** testing stream max_delay+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, max_delay: {max_delay}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***") + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + if not stb_field_name_value: + stb_field_name_value = self.tdCom.tb_filter_des_select_elm + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + defined_tag_count = len(tag_value.split()) + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + if subtable: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + else: + stb_subtable_value = None + self.tdCom.date_time = 1658921623245 + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + + max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + + init_num = 0 + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + if i == 0: + init_num = 2 + i + if watermark is not None: + init_num += 1 + else: + init_num += 1 + time.sleep(int(max_delay.replace("s", ""))) + if tag_value: + tdSql.query(f'select {tag_value} from {self.stb_name}') + tag_value_list = tdSql.queryResult + if not fill_value: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts;', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition) + + def run(self): + for delete in [True, False]: + for fill_history_value in [0, 1]: + self.watermark_max_delay_interval_ext(interval=random.choice([15]), watermark=random.randint(20, 25), max_delay=f"{random.randint(5, 6)}s", delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/max_delay_session.py b/tests/system-test/8-stream/max_delay_session.py new file mode 100644 index 0000000000..1a734e0e61 --- /dev/null +++ b/tests/system-test/8-stream/max_delay_session.py @@ -0,0 +1,100 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_max_delay_session(self, session, watermark, max_delay, fill_history_value=None): + tdLog.info(f"*** testing stream max_delay+session: session: {session}, watermark: {watermark}, max_delay: {max_delay}, fill_history_value: {fill_history_value} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s' + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value) + init_num = 0 + for i in range(self.tdCom.range_count): + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + + if watermark_value is not None: + for ts_value in [self.tdCom.date_time, window_close_ts-1]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + if not fill_history_value: + tdSql.checkEqual(tdSql.queryRows, init_num) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if i == 0: + init_num = 2 + i + else: + init_num += 1 + if watermark_value is not None: + expected_value = init_num + else: + expected_value = i + 1 + + if not fill_history_value: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay) + else: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay) + else: + self.tdCom.update_delete_history_data(delete=True) + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + else: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)') + + def run(self): + for fill_history_value in [None, 1]: + for watermark in [None, random.randint(20, 30)]: + self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/partition_interval.py b/tests/system-test/8-stream/partition_interval.py new file mode 100644 index 0000000000..0424932bf8 --- /dev/null +++ b/tests/system-test/8-stream/partition_interval.py @@ -0,0 +1,105 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def partitionby_interval(self, interval=None, partition_by_elm="tbname", ignore_expired=None): + tdLog.info(f"*** testing stream partition+interval: interval: {interval}, partition_by: {partition_by_elm}, ignore_expired: {ignore_expired} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + ctb_name_list = list() + for i in range(1, self.tdCom.range_count): + ctb_name = self.tdCom.get_long_name() + ctb_name_list.append(ctb_name) + self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) + if interval is not None: + source_sql = f'select _wstart AS wstart, {self.tdCom.partition_by_stb_source_select_str} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s)' + else: + source_sql = f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm}' + + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=source_sql, ignore_expired=ignore_expired) + # insert data + count = 1 + step_count = 1 + for i in range(1, self.tdCom.range_count): + if i == 1: + record_window_close_ts = self.tdCom.date_time - 15 * self.tdCom.offset + ctb_name = self.tdCom.get_long_name() + self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name) + if i % 2 == 0: + step_count += i + for j in range(count, step_count): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s') + for ctb_name in ctb_name_list: + self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s') + count += i + else: + step_count += 1 + for i in range(2): + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s') + for ctb_name in ctb_name_list: + self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s') + count += 1 + # check result + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + if interval is not None: + self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;') + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;') + + if self.tdCom.disorder: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + for ctb_name in ctb_name_list: + self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=record_window_close_ts) + if ignore_expired: + if "first" not in colname and "last" not in colname: + for colname in self.tdCom.partition_by_downsampling_function_list: + if interval is not None: + tdSql.query(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;') + res1 = tdSql.queryResult + tdSql.query(f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;') + res2 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;') + + else: + for colname in self.tdCom.partition_by_downsampling_function_list: + if "first" not in colname and "last" not in colname: + if interval is not None: + self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;') + else: + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;') + + def run(self): + for interval in [None, 10]: + for ignore_expired in [0, 1]: + self.partitionby_interval(interval=interval, partition_by_elm="tbname", ignore_expired=ignore_expired) + self.partitionby_interval(interval=10, partition_by_elm="t1") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/pause_resume_test.py b/tests/system-test/8-stream/pause_resume_test.py new file mode 100644 index 0000000000..421f499a3d --- /dev/null +++ b/tests/system-test/8-stream/pause_resume_test.py @@ -0,0 +1,157 @@ +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + self.date_time = 1694054350870 + self.interval = 15 + + def pause_resume_test(self, interval, partition="tbname", delete=False, fill_history_value=None, pause=True, resume=True, ignore_untreated=False): + tdLog.info(f"*** testing stream pause+resume: interval: {interval}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, ignore_untreated: {ignore_untreated} ***") + date_time = self.date_time + if_exist_value_list = [None, True] + if_exist = random.choice(if_exist_value_list) + reverse_check = True if ignore_untreated else False + range_count = (self.tdCom.range_count + 3) * 3 + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition is None: + partition_elm_alias = '"no_partition"' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname" or partition is None: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + if partition: + partition_elm = f'partition by {partition} {partition_elm_alias}' + else: + partition_elm = "" + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value) + for i in range(range_count): + ts_value = str(date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s' + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value) + date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value) + date_time += 1 + if partition: + partition_elm = f'partition by {partition}' + else: + partition_elm = "" + # if i == int(range_count/2): + if i > 2 and i % 3 == 0: + for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']: + if if_exist is not None: + tdSql.execute(f'pause stream if exists {stream_name}_no_exist') + tdSql.error(f'pause stream if not exists {stream_name}') + tdSql.error(f'pause stream {stream_name}_no_exist') + self.tdCom.pause_stream(stream_name, if_exist) + if pause and not resume and range_count-i <= 3: + time.sleep(self.tdCom.default_interval) + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart') + res_after_pause = tdSql.queryResult + if resume: + if i > 2 and i % 3 != 0: + for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']: + if if_exist is not None: + tdSql.execute(f'resume stream if exists {stream_name}_no_exist') + tdSql.error(f'resume stream if not exists {stream_name}') + self.tdCom.resume_stream(stream_name, if_exist, None, ignore_untreated) + if pause and not resume: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart') + res_without_resume = tdSql.queryResult + tdSql.checkEqual(res_after_pause, res_without_resume) + else: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check) + else: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check) + + if self.tdCom.subtable: + for tname in [self.stb_name, self.ctb_name]: + tdSql.query(f'select * from {self.ctb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + elif partition is None: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + + def run(self): + for delete in [True, False]: + for fill_history_value in [0, 1]: + # pause/resume + self.pause_resume_test(interval=self.interval, partition="tbname", ignore_untreated=False, fill_history_value=fill_history_value, delete=delete) + self.pause_resume_test(interval=self.interval, partition="tbname", ignore_untreated=True, fill_history_value=fill_history_value, delete=delete) + # self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", resume=False, fill_history_value=fill_history_value, delete=delete) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/8-stream/scalar_function.py b/tests/system-test/8-stream/scalar_function.py new file mode 100644 index 0000000000..56537e2f54 --- /dev/null +++ b/tests/system-test/8-stream/scalar_function.py @@ -0,0 +1,177 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def scalar_function(self, partition="tbname", fill_history_value=None): + tdLog.info(f"*** testing stream scalar funtion partition: {partition}, fill_history_value: {fill_history_value} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + tdLog.info("preparing data ...") + self.tdCom.prepare_data(fill_history_value=fill_history_value) + # return + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20));') + if fill_history_value is None: + fill_history = "" + else: + tdLog.info("inserting fill_history data ...") + fill_history = f'fill_history {fill_history_value}' + for i in range(self.tdCom.range_count): + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{i}s, 100, -100.1, "hebei", Null, "Bigdata");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{i}s, 100, -100.1, "heBei", Null, "Bigdata");') + + # self.tdCom.write_latency(self.case_name) + math_function_list = ["abs", "acos", "asin", "atan", "ceil", "cos", "floor", "log", "pow", "round", "sin", "sqrt", "tan"] + string_function_list = ["char_length", "concat", "concat_ws", "length", "lower", "ltrim", "rtrim", "substr", "upper"] + for math_function in math_function_list: + tdLog.info(f"testing function {math_function} ...") + tdLog.info(f"creating stream for function {math_function} ...") + if math_function in ["log", "pow"]: + tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_ct1;') + tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_tb;') + else: + tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_ct1;') + tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_tb;') + self.tdCom.check_stream_field_type(f"describe output_{math_function}_stb", math_function) + self.tdCom.check_stream_field_type(f"describe output_{math_function}_ctb", math_function) + self.tdCom.check_stream_field_type(f"describe output_{math_function}_tb", math_function) + for tbname in ["scalar_ct1", "scalar_tb"]: + tdLog.info(f"function {math_function}: inserting data for tb --- {tbname} ...") + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);') + for i in range(self.tdCom.range_count): + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");') + if i%2 == 0: + tdLog.info(f"function {math_function}: update testing ...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + else: + tdLog.info(f"function {math_function}: delete testing ...") + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + + if fill_history_value: + tdLog.info(f"function {math_function}: disorder testing ...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + if math_function == "log" or math_function == "pow": + tdLog.info(f"function {math_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_stb partition by {partition} order by ts;') + self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_ctb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_tb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_tb;') + else: + tdLog.info(f"function {math_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1), {math_function}(c2) from scalar_stb partition by {partition} order by ts;') + self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_ctb;', f'select {math_function}(c1), {math_function}(c2) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_tb;', f'select {math_function}(c1), {math_function}(c2) from scalar_tb;') + tdSql.execute(f'drop stream if exists stb_{math_function}_stream') + tdSql.execute(f'drop stream if exists ctb_{math_function}_stream') + tdSql.execute(f'drop stream if exists tb_{math_function}_stream') + + for string_function in string_function_list: + tdLog.info(f"testing function {string_function} ...") + tdLog.info(f"creating stream for function {string_function} ...") + if string_function == "concat": + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;') + elif string_function == "concat_ws": + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;') + elif string_function == "substr": + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;') + else: + tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb partition by {partition};') + tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;') + tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;') + self.tdCom.check_stream_field_type(f"describe output_{string_function}_stb", string_function) + self.tdCom.check_stream_field_type(f"describe output_{string_function}_ctb", string_function) + self.tdCom.check_stream_field_type(f"describe output_{string_function}_tb", string_function) + for tbname in ["scalar_ct1", "scalar_tb"]: + tdLog.info(f"function {string_function}: inserting data for tb --- {tbname} ...") + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");') + tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);') + + + for i in range(self.tdCom.range_count): + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");') + if i%2 == 0: + tdLog.info(f"function {string_function}: update testing...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");') + else: + tdLog.info(f"function {string_function}: delete testing ...") + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + + if fill_history_value: + tdLog.info(f"function {string_function}: disorder testing ...") + tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");') + dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)' + tdSql.execute(f'delete from scalar_ct1 where ts = {dt};') + tdSql.execute(f'delete from scalar_tb where ts = {dt};') + + + if string_function == "concat": + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;') + elif string_function == "concat_ws": + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;') + elif string_function == "substr": + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_ctb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_tb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;') + else: + tdLog.info(f"function {string_function}: confirming query result ...") + self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb order by ts;') + self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_ctb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;') + self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_tb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;') + + tdSql.execute(f'drop stream if exists stb_{string_function}_stream') + tdSql.execute(f'drop stream if exists ctb_{string_function}_stream') + tdSql.execute(f'drop stream if exists tb_{string_function}_stream') + + def run(self): + self.scalar_function(partition="tbname", fill_history_value=1) + self.scalar_function(partition="tbname,c1,t1", fill_history_value=1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_interval.py b/tests/system-test/8-stream/window_close_interval.py new file mode 100644 index 0000000000..31a566a0f8 --- /dev/null +++ b/tests/system-test/8-stream/window_close_interval.py @@ -0,0 +1,256 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def window_close_interval(self, interval, watermark=None, ignore_expired=None, partition="tbname", fill_value=None, delete=False): + tdLog.info(f"*** testing stream window_close+interval: interval: {interval}, watermark: {watermark}, ignore_expired: {ignore_expired}, partition: {partition}, fill: {fill_value}, delete: {delete} ***") + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname": + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=ctb_subtable_value, fill_value=fill_value) + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11' + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=tb_subtable_value, fill_value=fill_value) + + start_time = self.tdCom.date_time + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + if i == 0: + record_window_close_ts = window_close_ts + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + + if self.delete and i%2 != 0: + dt = f'cast({ts_value-num*self.tdCom.offset} as timestamp)' + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt) + if not fill_value: + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + tdSql.checkEqual(tdSql.queryRows, i) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + if not fill_value: + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + + tdSql.checkEqual(tdSql.queryRows, i) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if not fill_value: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1) + else: + self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1) + if self.tdCom.disorder and not fill_value: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts) + if ignore_expired: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}') + res2 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}') + res1 = tdSql.queryResult + tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}') + res2 = tdSql.queryResult + tdSql.checkNotEqual(res1, res2) + else: + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1) + else: + self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1) + if self.tdCom.subtable: + tdSql.query(f'select * from {self.ctb_name}') + for tname in [self.stb_name, self.ctb_name]: + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count) + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + tdSql.query(f'select * from {self.tb_name}') + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + elif partition == "tbname" and ptn_counter == 0: + tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + + if fill_value: + history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + + if self.tdCom.update: + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + if i == 0: + record_window_close_ts = window_close_ts + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.delete: + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts)) + self.tdCom.date_time = start_time + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value) + else: + if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()): + additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}" + else: + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11' + if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete: + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value) + else: + if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()): + additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}" + else: + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + + + def run(self): + for watermark in [None, random.randint(15, 20)]: + for ignore_expired in [0, 1]: + self.window_close_interval(interval=random.randint(10, 15), watermark=watermark, ignore_expired=ignore_expired) + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + for watermark in [None, random.randint(15, 20)]: + self.window_close_interval(interval=random.randint(10, 12), watermark=watermark, fill_value=fill_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_session.py b/tests/system-test/8-stream/window_close_session.py new file mode 100644 index 0000000000..ddd366b45a --- /dev/null +++ b/tests/system-test/8-stream/window_close_session.py @@ -0,0 +1,98 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_window_close_session(self, session, watermark, fill_history_value=None, delete=True): + tdLog.info(f"*** testing stream window_close+session: session: {session}, watermark: {watermark}, fill_history: {fill_history_value}, delete: {delete} ***") + self.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + # self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value) + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value) + for i in range(self.tdCom.range_count): + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + if watermark_value is not None: + expected_value = i + 1 + for ts_value in [self.tdCom.date_time, window_close_ts-1]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + # for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]: + for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]: + if tbname != self.tb_stream_des_table: + tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}') + else: + tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}') + if not fill_history_value: + tdSql.checkEqual(tdSql.queryRows, i) + else: + expected_value = i + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts) + + if fill_history_value: + self.tdCom.update_delete_history_data(delete=delete) + + # for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if not fill_history_value: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value) + else: + self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value) + else: + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}') + else: + self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}') + + + + def run(self): + for fill_history_value in [None, 1]: + for watermark in [None, random.randint(20, 25)]: + self.watermark_window_close_session(session=random.randint(10, 15), watermark=watermark, fill_history_value=fill_history_value) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_session_ext.py b/tests/system-test/8-stream/window_close_session_ext.py new file mode 100644 index 0000000000..0fc041e965 --- /dev/null +++ b/tests/system-test/8-stream/window_close_session_ext.py @@ -0,0 +1,83 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def watermark_window_close_session_ext(self, session, watermark, fill_history_value=None, partition=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, delete=False): + tdLog.info(f"*** testing stream window_close+session+exist_stb+custom_tag: session: {session}, partition: {partition}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***") + if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm: + partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str + else: + partitial_tb_source_str = self.tdCom.ext_tb_source_select_str + if not stb_field_name_value: + stb_field_name_value = self.tdCom.tb_filter_des_select_elm + self.tdCom.case_name = sys._getframe().f_code.co_name + defined_tag_count = len(tag_value.split()) + if watermark is not None: + self.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value, ext_stb=use_exist_stb) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + self.tdCom.date_time = self.tdCom.dataDict["start_ts"] + if subtable: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None + else: + stb_subtable_value = None + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, subtable_value=stb_subtable_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb) + for i in range(self.tdCom.range_count): + if i == 0: + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + else: + self.tdCom.date_time = window_close_ts + 1 + window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session']) + if watermark_value is not None: + expected_value = i + 1 + for ts_value in [self.tdCom.date_time, window_close_ts-1]: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + else: + expected_value = i + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + if self.tdCom.update and i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + if fill_history_value: + self.tdCom.update_delete_history_data(delete=delete) + if tag_value: + tdSql.query(f'select {tag_value} from {self.stb_name}') + tag_value_list = tdSql.queryResult + self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart limit {expected_value};', sorted=True, defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition) + + def run(self): + #! TD-25893 + # self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=False, fill_history_value=1) + self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/8-stream/window_close_state_window.py b/tests/system-test/8-stream/window_close_state_window.py new file mode 100644 index 0000000000..4c978cb860 --- /dev/null +++ b/tests/system-test/8-stream/window_close_state_window.py @@ -0,0 +1,73 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def window_close_state_window(self, state_window, delete=True): + tdLog.info(f"*** testing stream window_close+session: state_window: {state_window}, delete: {delete} ***") + self.case_name = sys._getframe().f_code.co_name + self.delete = delete + self.tdCom.prepare_data(state_window=state_window) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}' + state_window_col_name = self.tdCom.dataDict["state_window"] + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} state_window({state_window_col_name})', trigger_mode="window_close") + self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} state_window({state_window_col_name})', trigger_mode="window_close") + state_window_max = self.tdCom.dataDict['state_window_max'] + state_window_value_inmem = 0 + sleep_step = 0 + for i in range(self.tdCom.range_count): + state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count)) + while state_window_value == state_window_value_inmem: + state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count)) + if sleep_step < self.tdCom.default_interval: + sleep_step += 1 + time.sleep(1) + else: + return + for j in range(2, self.tdCom.range_count+3): + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.tdCom.update and i%2 == 0: + tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})') + if self.delete and i%2 != 0: + dt = f'cast({self.tdCom.date_time-1} as timestamp)' + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt) + self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt) + self.tdCom.date_time += 1 + for tbname in [self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i) + else: + self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i) + state_window_value_inmem = state_window_value + + + + def run(self): + for delete in [True, False]: + self.window_close_state_window(state_window="c1", delete=delete) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 1c50e5bbbe..b1625997b4 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -177,7 +177,7 @@ if __name__ == "__main__": sys.exit(0) if key in ['-k', '--killValgrind']: - killValgrind = 0 + killValgrind = 1 if key in ['-e', '--execCmd']: try: diff --git a/utils/test/c/tmqSim.c b/utils/test/c/tmqSim.c index fbfacd9eda..6b774b3eff 100644 --- a/utils/test/c/tmqSim.c +++ b/utils/test/c/tmqSim.c @@ -696,7 +696,7 @@ static int32_t g_once_commit_flag = 0; static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { taosFprintfFile(g_fp, "tmq_commit_cb_print() commit %d\n", code); - if (0 == g_once_commit_flag) { + if (0 == g_once_commit_flag && code == 0) { g_once_commit_flag = 1; notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); } diff --git a/utils/test/c/varbinary_test.c b/utils/test/c/varbinary_test.c index e29b94ad1f..389f409e1a 100644 --- a/utils/test/c/varbinary_test.c +++ b/utils/test/c/varbinary_test.c @@ -85,6 +85,7 @@ void varbinary_sql_test() { // test insert pRes = taos_query(taos, "insert into tb2 using stb tags (2, 'tb2_bin1', 093) values (now + 2s, 'nchar1', 892, 0.3)"); + printf("error:%s", taos_errstr(pRes)); ASSERT(taos_errno(pRes) != 0); pRes = taos_query(taos, "insert into tb3 using stb tags (3, 'tb3_bin1', 0x7f829) values (now + 3s, 'nchar1', 0x7f829, 0.3)");