Merge branch 'enh/dev3New' into enh/ipWhiteList
This commit is contained in:
commit
7ce8f694dc
|
@ -176,6 +176,14 @@ IF(APPLE)
|
|||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
ENDIF()
|
||||
|
||||
IF(TD_WINDOWS)
|
||||
IF(MSVC)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:8388608")
|
||||
ELSEIF(MINGW)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--stack,8388608")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||
|
||||
set(TD_DEPS_DIR "x86")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.1.1.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.2.0.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# cos
|
||||
ExternalProject_Add(mxml
|
||||
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
||||
GIT_TAG release-2.12
|
||||
GIT_TAG v2.12
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf
|
|||
|
||||
### Step 1
|
||||
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
|
||||
:::note
|
||||
FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command.
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
|
|||
|
||||
There are two ways to install taosBenchmark:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](/operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
|
||||
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep
|
|||
There are two ways to install taosKeeper:
|
||||
Methods of installing taosKeeper:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](/operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
|
||||
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
|
||||
## Configuration and Launch
|
||||
|
|
|
@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install)
|
||||
|
||||
## Install Kafka
|
||||
|
||||
|
|
|
@ -0,0 +1,440 @@
|
|||
---
|
||||
sidebar_label: Seeq
|
||||
title: Seeq
|
||||
description: How to use Seeq and TDengine to perform time series data analysis
|
||||
---
|
||||
|
||||
# How to use Seeq and TDengine to perform time series data analysis
|
||||
|
||||
## Introduction
|
||||
|
||||
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
|
||||
|
||||
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||
|
||||
### Install Seeq
|
||||
|
||||
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
||||
|
||||
### Install and start Seeq Server
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Install and start Seeq Data Lab Server
|
||||
|
||||
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
|
||||
### Install TDengine on-premise instance
|
||||
|
||||
See [Quick Install from Package](../../get-started).
|
||||
|
||||
### Or use TDengine Cloud
|
||||
|
||||
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
|
||||
|
||||
## Make Seeq be able to access TDengine
|
||||
|
||||
1. Get data location configuration
|
||||
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
|
||||
|
||||
3. Restart Seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. Input License
|
||||
|
||||
Use a browser to access ip:34216 and input the license according to the guide.
|
||||
|
||||
## How to use Seeq to analyze time-series data that TDengine serves
|
||||
|
||||
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
|
||||
|
||||
### Scenario Overview
|
||||
|
||||
The example scenario involves a power system where users collect electricity consumption data from metering devices at a power station on a daily basis. This data is stored in a TDengine cluster. The user now wants to predict how the electricity consumption will develop and purchase additional equipment to support it. The electricity consumption varies with monthly orders, and seasonal variations also affect the power consumption. Since the city is located in the Northern Hemisphere, more electricity is consumed during the summer. We will use simulated data to reflect these assumptions.
|
||||
|
||||
### Schema
|
||||
|
||||
```
|
||||
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Mock data
|
||||
|
||||
```
|
||||
python mockdata.py
|
||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||
```
|
||||
|
||||
The source code is hosted at [GitHub Repository](https://github.com/sangshuduo/td-forecasting).
|
||||
|
||||
### Using Seeq for data analysis
|
||||
|
||||
#### Data Source configuration
|
||||
|
||||
Please login with Seeq administrator and create a few data sources as following.
|
||||
|
||||
- Power
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, num FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Num",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Goods
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerGoods",
|
||||
"Type": "CONDITION",
|
||||
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Goods",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Duration",
|
||||
"Value": "10days",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": [
|
||||
{
|
||||
"Name": "goods",
|
||||
"Value": "${columnResult}",
|
||||
"Column": "goods",
|
||||
"Uom": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Temperature
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, temperature FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Temperature",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Launch Seeq Workbench
|
||||
|
||||
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||
|
||||

|
||||
|
||||
#### Use Seeq Data Lab Server for advanced data analysis
|
||||
|
||||
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||
|
||||
```Python
|
||||
from seeq import spy
|
||||
spy.options.compatibility = 189
|
||||
import pandas as pd
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import mlforecast
|
||||
import lightgbm as lgb
|
||||
from mlforecast.target_transforms import Differences
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
|
||||
print(ds)
|
||||
|
||||
sig = ds.loc[ds['Name'].isin(['Num'])]
|
||||
print(sig)
|
||||
|
||||
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
|
||||
print("data.info()")
|
||||
data.info()
|
||||
print(data)
|
||||
#data.plot()
|
||||
|
||||
print("data[Num].info()")
|
||||
data['Num'].info()
|
||||
da = data['Num'].index.tolist()
|
||||
#print(da)
|
||||
|
||||
li = data['Num'].tolist()
|
||||
#print(li)
|
||||
|
||||
data2 = pd.DataFrame()
|
||||
data2['ds'] = da
|
||||
print('1st data2 ds info()')
|
||||
data2['ds'].info()
|
||||
|
||||
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
|
||||
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
|
||||
data2['y'] = li
|
||||
print('2nd data2 ds info()')
|
||||
data2['ds'].info()
|
||||
print(data2)
|
||||
|
||||
data2.insert(0, column = "unique_id", value="unique_id")
|
||||
|
||||
print("Forecasting ...")
|
||||
|
||||
forecast = mlforecast.MLForecast(
|
||||
models = lgb.LGBMRegressor(),
|
||||
freq = 1,
|
||||
lags=[365],
|
||||
target_transforms=[Differences([365])],
|
||||
)
|
||||
|
||||
forecast.fit(data2)
|
||||
predicts = forecast.predict(365)
|
||||
|
||||
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
|
||||
plt.show()
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||

|
||||
|
||||
### How to configure Seeq data source to access TDengine Cloud
|
||||
|
||||
Configuring a Seeq data source connection to TDengine Cloud or a local installation instance does not have any essential differences. After logging in to TDengine Cloud, select "Programming - Java" and copy the JDBC URL string with the token provided. Then, use this JDBC URL string to fill in the DatabaseJdbcUrl value in the Seeq Data Source configuration.
|
||||
|
||||
Please note that when using TDengine Cloud, you need to specify the database name in your SQL commands.
|
||||
|
||||
#### The data source of TDengine Cloud example
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Voltage",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Seeq Workbench with TDengine Cloud data source example
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||
|
||||
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||
|
||||
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.
|
Binary file not shown.
After Width: | Height: | Size: 13 KiB |
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
Binary file not shown.
After Width: | Height: | Size: 47 KiB |
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
||||
## 3.1.0.3
|
||||
|
||||
<Release type="tdengine" version="3.1.0.3" />
|
||||
|
|
|
@ -1004,7 +1004,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
|
@ -1082,7 +1082,7 @@ consumer.unsubscribe();
|
|||
consumer.close()
|
||||
```
|
||||
|
||||
详情请参考:[数据订阅](../../../develop/tmq)
|
||||
详情请参考:[数据订阅](../../develop/tmq)
|
||||
|
||||
#### 完整示例
|
||||
|
||||
|
@ -1373,7 +1373,7 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
|
||||
|
||||
其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||
其它问题请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -352,7 +352,7 @@ client.put(&sml_data)?
|
|||
|
||||
### 数据订阅
|
||||
|
||||
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
|
||||
TDengine 通过消息队列 [TMQ](../../taos-sql/tmq/) 启动一个订阅。
|
||||
|
||||
#### 创建 Topic
|
||||
|
||||
|
@ -491,7 +491,7 @@ let taos = pool.get()?;
|
|||
|
||||
## 常见问题
|
||||
|
||||
请参考 [FAQ](../../../train-faq/faq)
|
||||
请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -11,7 +11,11 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
|
|||
|
||||
## 安装
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark
|
||||
taosBenchmark 有两种安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||
|
||||
## 运行
|
||||
|
||||
|
|
|
@ -13,7 +13,12 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
|
|||
|
||||
## 安装
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper
|
||||
taosKeeper 有两种安装方式:
|
||||
taosKeeper 安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
|
||||
|
||||
## 配置和运行方式
|
||||
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
---
|
||||
title: 集群运维
|
||||
description: TDengine 提供了多种集群运维手段以使集群运行更健康更高效
|
||||
---
|
||||
|
||||
为了使集群运行更健康更高效,TDengine 企业版提供了一些运维手段来帮助系统管理员更好地运维集群。
|
||||
|
||||
## 数据重整
|
||||
|
||||
TDengine 面向多种写入场景,在有些写入场景下,TDengine 的存储会导致数据存储的放大或数据文件的空洞等。这一方面影响数据的存储效率,另一方面也会影响查询效率。为了解决上述问题,TDengine 企业版提供了对数据的重整功能,即 DATA COMPACT 功能,将存储的数据文件重新整理,删除文件空洞和无效数据,提高数据的组织度,从而提高存储和查询的效率。
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
```
|
||||
|
||||
**效果**
|
||||
|
||||
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
||||
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
||||
- COMPACT 会合并多个 STT 文件
|
||||
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
||||
- 可通过 end with 关键字指定 COMPACT 数据的终止时间
|
||||
|
||||
**补充说明**
|
||||
|
||||
- COMPACT 为异步,执行 COMPACT 命令后不会等 COMPACT 结束就会返回。如果上一个 COMPACT 没有完成则再发起一个 COMPACT 任务,则会等上一个任务完成后再返回。
|
||||
- COMPACT 可能阻塞写入,但不阻塞查询
|
||||
- COMPACT 的进度不可观测
|
||||
|
||||
## 集群负载再平衡
|
||||
|
||||
当多副本集群中的一个或多个节点因为升级或其它原因而重启后,有可能出现集群中各个 dnode 负载不均衡的现象,极端情况下会出现所有 vgroup 的 leader 都位于同一个 dnode 的情况。为了解决这个问题,可以使用下面的命令
|
||||
|
||||
```sql
|
||||
balance vgroup leader;
|
||||
```
|
||||
|
||||
**功能**
|
||||
|
||||
让所有的 vgroup 的 leade r在各自的replica节点上均匀分布。这个命令会让 vgroup 强制重新选举,通过重新选举,在选举的过程中,变换 vgroup 的leader,通过这个方式,最终让leader均匀分布。
|
||||
|
||||
**注意**
|
||||
|
||||
Raft选举本身带有随机性,所以通过选举的重新分布产生的均匀分布也是带有一定的概率,不会完全的均匀。**该命令的副作用是影响查询和写入**,在vgroup重新选举时,从开始选举到选举出新的 leader 这段时间,这 个vgroup 无法写入和查询。选举过程一般在秒级完成。所有的vgroup会依次逐个重新选举。
|
||||
|
||||
## 恢复数据节点
|
||||
|
||||
在多节点三副本的集群环境中,如果某个 dnode 的磁盘损坏,该 dnode 会自动退出,但集群中其它的 dnode 仍然能够继续提供写入和查询服务。
|
||||
|
||||
在更换了损坏的磁盘后,如果想要让曾经主动退出的 dnode 重新加入集群提供服务,可以通过 `restore dnode` 命令来恢复该数据节点上的部分或全部逻辑节点,该功能依赖多副本中的其它副本进行数据复制,所以只在集群中 dnode 数量大于等于 3 且副本数为 3 的情况下能够工作。
|
||||
|
||||
|
||||
```sql
|
||||
restore dnode <dnode_id>;# 恢复dnode上的mnode,所有vnode和qnode
|
||||
restore mnode on dnode <dnode_id>;# 恢复dnode上的mnode
|
||||
restore vnode on dnode <dnode_id> ;# 恢复dnode上的所有vnode
|
||||
restore qnode on dnode <dnode_id>;# 恢复dnode上的qnode
|
||||
```
|
||||
|
||||
**限制**
|
||||
- 该功能是基于已有的复制功能的恢复,不是灾难恢复或者备份恢复,所以对于要恢复的 mnode 和 vnode来说,使用该命令的前提是还存在该 mnode 或 vnode 的其它两个副本仍然能够正常工作。
|
||||
- 该命令不能修复数据目录中的个别文件的损坏或者丢失。例如,如果某个 mnode 或者 vnode 中的个别文件或数据损坏,无法单独恢复损坏的某个文件或者某块数据。此时,可以选择将该 mnode/vnode 的数据全部清空再进行恢复。
|
||||
|
||||
|
||||
## 虚拟组分裂 (Scale Out)
|
||||
|
||||
当一个 vgroup 因为子表数过多而导致 CPU 或 Disk 资源使用量负载过高时,增加 dnode 节点后,可通过 `split vgroup` 命令把该 vgroup 分裂为两个虚拟组。分裂完成后,新产生的两个 vgroup 承担原来由一个 vgroup 提供的读写服务。这也是 TDengine 为企业版用户提供的 scale out 集群的能力。
|
||||
|
||||
```sql
|
||||
split vgroup <vgroup_id>
|
||||
```
|
||||
|
||||
**注意**
|
||||
- 单副本库虚拟组,在分裂完成后,历史时序数据总磁盘空间使用量,可能会翻倍。所以,在执行该操作之前,通过增加 dnode 节点方式,确保集群中有足够的 CPU 和磁盘资源,避免资源不足现象发生。
|
||||
- 该命令为 DB 级事务;执行过程,当前DB的其它管理事务将会被拒绝。集群中,其它DB不受影响。
|
||||
- 分裂任务执行过程中,可持续提供读写服务;期间,可能存在可感知的短暂的读写业务中断。
|
||||
- 在分裂过程中,不支持流和订阅。分裂结束后,历史 WAL 会清空。
|
||||
- 分裂过程中,可支持节点宕机重启容错;但不支持节点磁盘故障容错。
|
|
@ -1,56 +0,0 @@
|
|||
---
|
||||
title: 多级存储
|
||||
---
|
||||
|
||||
## 多级存储
|
||||
|
||||
说明:多级存储功能仅企业版支持。
|
||||
|
||||
在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。
|
||||
|
||||
除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。
|
||||
|
||||
多级存储支持 3 级,每级最多可配置 16 个挂载点。
|
||||
|
||||
TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中):
|
||||
|
||||
```
|
||||
dataDir [path] <level> <primary>
|
||||
```
|
||||
|
||||
- path: 挂载点的文件夹路径
|
||||
- level: 介质存储等级,取值为 0,1,2。
|
||||
0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。
|
||||
各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。
|
||||
同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。
|
||||
需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。
|
||||
- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。
|
||||
|
||||
在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式:
|
||||
|
||||
```
|
||||
dataDir /mnt/data1 0 1
|
||||
dataDir /mnt/data2 0 0
|
||||
dataDir /mnt/data3 1 0
|
||||
dataDir /mnt/data4 1 0
|
||||
dataDir /mnt/data5 2 0
|
||||
dataDir /mnt/data6 2 0
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。
|
||||
2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。
|
||||
3. 多级存储目前不支持删除已经挂载的硬盘的功能。
|
||||
|
||||
:::
|
||||
|
||||
## 0 级负载均衡
|
||||
|
||||
在多级存储中,有且只有一个主挂载点,主挂载点承担了系统中最重要的元数据在座,同时各个 vnode 的主目录均存在于当前 dnode 主挂载点上,从而导致该 dnode 的写入性能受限于单个磁盘的 IO 吞吐能力。
|
||||
|
||||
从 TDengine 3.1.0.0 开始,如果一个 dnode 配置了多个 0 级挂载点,我们将该 dnode 上所有 vnode 的主目录均衡分布在所有的 0 级挂载点上,由这些 0 级挂载点共同承担写入负荷。在网络 I/O 及其它处理资源不成为瓶颈的情况下,通过优化集群配置,测试结果证明整个系统的写入能力和 0 级挂载点的数量呈现线性关系,即随着 0 级挂载点数量的增加,整个系统的写入能力也成倍增加。
|
||||
|
||||
## 同级挂载点选择策略
|
||||
|
||||
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 `minDiskFreeSize`,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。
|
|
@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../get-started/)
|
||||
|
||||
## 安装 Kafka
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在
|
|||
|
||||
### Seeq 安装方法
|
||||
|
||||
从 (Seeq 官网)[https://www.seeq.com/customer-download]下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||
|
||||
### Seeq Server 安装和启动
|
||||
|
||||
|
@ -29,7 +29,7 @@ sudo seeq start
|
|||
|
||||
### Seeq Data Lab Server 安装和启动
|
||||
|
||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见(Seeq 官方文档)[https://support.seeq.com/space/KB/1034059842]。
|
||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
|
@ -51,7 +51,7 @@ sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab serve
|
|||
|
||||
## TDengine 本地实例安装方法
|
||||
|
||||
请参考(官网文档)[https://docs.taosdata.com/get-started/package/]。
|
||||
请参考[官网文档](../../get-started)。
|
||||
|
||||
## TDengine Cloud 访问方法
|
||||
如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
|
||||
|
@ -64,7 +64,7 @@ sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab serve
|
|||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为(3.2.4)[https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar],并拷贝至 data 存储位置的 plugins\lib 中。
|
||||
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为[3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar),并拷贝至 data 存储位置的 plugins\lib 中。
|
||||
|
||||
3. 重新启动 seeq server
|
||||
|
||||
|
@ -91,7 +91,7 @@ CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS
|
|||
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||
```
|
||||
|
||||
!(Seeq demo schema)[./seeq/seeq-demo-schema.webp]
|
||||

|
||||
|
||||
### 构造数据方法
|
||||
|
||||
|
@ -99,7 +99,8 @@ CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
|||
python mockdata.py
|
||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||
```
|
||||
源代码托管在(github 仓库)[https://github.com/sangshuduo/td-forecasting]。
|
||||
|
||||
源代码托管在[GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。
|
||||
|
||||
### 使用 Seeq 进行数据分析
|
||||
|
||||
|
@ -287,9 +288,9 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from
|
|||
|
||||
#### 使用 Seeq Workbench
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见(官方知识库)[https://support.seeq.com/space/KB/146440193/Seeq+Workbench]。
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见[官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
|
||||
|
||||
!(Seeq Workbench)[./seeq/seeq-demo-workbench.webp]
|
||||

|
||||
|
||||
#### 用 Seeq Data Lab Server 进行进一步的数据分析
|
||||
|
||||
|
@ -358,7 +359,7 @@ plt.show()
|
|||
|
||||
运行程序输出结果:
|
||||
|
||||
!(Seeq forecast result)[./seeq/seeq-forecast-result.webp]
|
||||

|
||||
|
||||
### 配置 Seeq 数据源连接 TDengine Cloud
|
||||
|
||||
|
@ -426,7 +427,7 @@ plt.show()
|
|||
|
||||
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
|
||||
|
||||
!(Seeq workbench with TDengine cloud)[./seeq/seeq-workbench-with-tdengine-cloud.webp]
|
||||

|
||||
|
||||
## 方案总结
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
||||
## 3.1.0.3
|
||||
|
||||
<Release type="tdengine" version="3.1.0.3" />
|
||||
|
|
|
@ -268,6 +268,13 @@ typedef enum tmq_conf_res_t {
|
|||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
typedef enum tmq_res_t {
|
||||
TMQ_RES_INVALID = -1,
|
||||
TMQ_RES_DATA = 1,
|
||||
TMQ_RES_TABLE_META = 2,
|
||||
TMQ_RES_METADATA = 3,
|
||||
} tmq_res_t;
|
||||
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
|
@ -302,6 +309,8 @@ DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t
|
|||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
|
||||
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
|
@ -309,34 +318,22 @@ DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
|||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
|
||||
/* ------------------------------ TAOSX -----------------------------------*/
|
||||
// note: following apis are unstable
|
||||
enum tmq_res_t {
|
||||
TMQ_RES_INVALID = -1,
|
||||
TMQ_RES_DATA = 1,
|
||||
TMQ_RES_TABLE_META = 2,
|
||||
TMQ_RES_METADATA = 3,
|
||||
};
|
||||
|
||||
typedef struct tmq_raw_data {
|
||||
void *raw;
|
||||
uint32_t raw_len;
|
||||
uint16_t raw_type;
|
||||
} tmq_raw_data;
|
||||
|
||||
typedef enum tmq_res_t tmq_res_t;
|
||||
|
||||
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
|
||||
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
|
||||
DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
|
||||
DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
|
||||
DLL_EXPORT int taos_write_raw_block_with_fields(TAOS *taos, int rows, char *pData, const char *tbname,
|
||||
TAOS_FIELD *fields, int numFields);
|
||||
DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
|
||||
|
||||
// Returning null means error. Returned result need to be freed by tmq_free_json_meta
|
||||
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res);
|
||||
DLL_EXPORT void tmq_free_json_meta(char *jsonMeta);
|
||||
|
||||
/* ---------------------------- TAOSX END -------------------------------- */
|
||||
|
||||
typedef enum {
|
||||
|
|
|
@ -29,54 +29,6 @@ extern "C" {
|
|||
|
||||
#include "storageapi.h"
|
||||
|
||||
// void* streamBackendInit(const char* path);
|
||||
// void streamBackendCleanup(void* arg);
|
||||
// SListNode* streamBackendAddCompare(void* backend, void* arg);
|
||||
// void streamBackendDelCompare(void* backend, void* arg);
|
||||
|
||||
// <<<<<<< HEAD
|
||||
// typedef struct STdbState {
|
||||
// rocksdb_t* rocksdb;
|
||||
// rocksdb_column_family_handle_t** pHandle;
|
||||
// rocksdb_writeoptions_t* writeOpts;
|
||||
// rocksdb_readoptions_t* readOpts;
|
||||
// rocksdb_options_t** cfOpts;
|
||||
// rocksdb_options_t* dbOpt;
|
||||
// struct SStreamTask* pOwner;
|
||||
// void* param;
|
||||
// void* env;
|
||||
// SListNode* pComparNode;
|
||||
// void* pBackend;
|
||||
// char idstr[64];
|
||||
// void* compactFactory;
|
||||
// TdThreadRwlock rwLock;
|
||||
// =======
|
||||
// typedef struct STdbState {
|
||||
// rocksdb_t* rocksdb;
|
||||
// rocksdb_column_family_handle_t** pHandle;
|
||||
// rocksdb_writeoptions_t* writeOpts;
|
||||
// rocksdb_readoptions_t* readOpts;
|
||||
// rocksdb_options_t** cfOpts;
|
||||
// rocksdb_options_t* dbOpt;
|
||||
// struct SStreamTask* pOwner;
|
||||
// void* param;
|
||||
// void* env;
|
||||
// SListNode* pComparNode;
|
||||
// void* pBackendHandle;
|
||||
// char idstr[64];
|
||||
// void* compactFactory;
|
||||
//
|
||||
// TDB* db;
|
||||
// TTB* pStateDb;
|
||||
// TTB* pFuncStateDb;
|
||||
// TTB* pFillStateDb; // todo refactor
|
||||
// TTB* pSessionStateDb;
|
||||
// TTB* pParNameDb;
|
||||
// TTB* pParTagDb;
|
||||
// TXN* txn;
|
||||
//} STdbState;
|
||||
//>>>>>>> enh/dev3.0
|
||||
|
||||
SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||
void streamStateClose(SStreamState* pState, bool remove);
|
||||
int32_t streamStateBegin(SStreamState* pState);
|
||||
|
|
|
@ -41,16 +41,16 @@ enum {
|
|||
STREAM_STATUS__PAUSE,
|
||||
};
|
||||
|
||||
enum {
|
||||
typedef enum ETaskStatus {
|
||||
TASK_STATUS__NORMAL = 0,
|
||||
TASK_STATUS__DROPPING,
|
||||
TASK_STATUS__FAIL,
|
||||
TASK_STATUS__UNINIT, // not used, an placeholder
|
||||
TASK_STATUS__STOP,
|
||||
TASK_STATUS__SCAN_HISTORY, // stream task scan history data by using tsdbread in the stream scanner
|
||||
TASK_STATUS__HALT, // pause, but not be manipulated by user command
|
||||
TASK_STATUS__PAUSE, // pause
|
||||
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
|
||||
};
|
||||
} ETaskStatus;
|
||||
|
||||
enum {
|
||||
TASK_SCHED_STATUS__INACTIVE = 1,
|
||||
|
|
|
@ -39,7 +39,7 @@ int64_t taosGetOsUptime();
|
|||
int32_t taosGetEmail(char *email, int32_t maxLen);
|
||||
int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t maxLen);
|
||||
int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores);
|
||||
int32_t taosGetCpuCores(float *numOfCores);
|
||||
int32_t taosGetCpuCores(float *numOfCores, bool physical);
|
||||
void taosGetCpuUsage(double *cpu_system, double *cpu_engine);
|
||||
int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma);
|
||||
int32_t taosGetTotalMemory(int64_t *totalKB);
|
||||
|
|
|
@ -152,7 +152,6 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int32_t vgStatus;
|
||||
int32_t vgSkipCnt; // here used to mark the slow vgroups
|
||||
// bool receivedInfoFromVnode; // has already received info from vnode
|
||||
int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
|
||||
bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp.
|
||||
SEpSet epSet;
|
||||
|
@ -190,17 +189,13 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int64_t refId;
|
||||
int32_t epoch;
|
||||
void* pParam;
|
||||
__tmq_askep_fn_t pUserFn;
|
||||
} SMqAskEpCbParam;
|
||||
|
||||
typedef struct {
|
||||
int64_t refId;
|
||||
int32_t epoch;
|
||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||
// SMqClientVg* pVg;
|
||||
// SMqClientTopic* pTopic;
|
||||
int32_t vgId;
|
||||
uint64_t requestId; // request id for debug purpose
|
||||
} SMqPollCbParam;
|
||||
|
@ -237,7 +232,6 @@ typedef struct {
|
|||
int64_t refId;
|
||||
int32_t epoch;
|
||||
int32_t waitingRspNum;
|
||||
int32_t totalRspNum;
|
||||
int32_t code;
|
||||
tmq_commit_cb* callbackFn;
|
||||
/*SArray* successfulOffsets;*/
|
||||
|
@ -445,7 +439,6 @@ static int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
|
|||
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
|
||||
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
|
||||
|
||||
// taosMemoryFree(pParam->pOffset);
|
||||
taosMemoryFree(pBuf->pData);
|
||||
taosMemoryFree(pBuf->pEpSet);
|
||||
|
||||
|
@ -516,14 +509,14 @@ static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffse
|
|||
pMsgSendInfo->fp = tmqCommitCb;
|
||||
pMsgSendInfo->msgType = TDMT_VND_TMQ_COMMIT_OFFSET;
|
||||
|
||||
atomic_add_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||
atomic_add_fetch_32(&pParamSet->totalRspNum, 1);
|
||||
|
||||
SEp* pEp = GET_ACTIVE_EP(epSet);
|
||||
|
||||
|
||||
int64_t transporterId = 0;
|
||||
return asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, pMsgSendInfo);
|
||||
atomic_add_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, pMsgSendInfo);
|
||||
if(code != 0){
|
||||
atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||
return code;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static SMqClientTopic* getTopicByName(tmq_t* tmq, const char* pTopicName) {
|
||||
|
@ -556,8 +549,6 @@ static SMqCommitCbParamSet* prepareCommitCbParamSet(tmq_t* tmq, tmq_commit_cb* p
|
|||
return pParamSet;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClientVg** pVg){
|
||||
SMqClientTopic* pTopic = getTopicByName(tmq, pTopicName);
|
||||
if (pTopic == NULL) {
|
||||
|
@ -578,11 +569,10 @@ static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClient
|
|||
}
|
||||
|
||||
static int32_t asyncCommitOffset(tmq_t* tmq, char* pTopicName, int32_t vgId, STqOffsetVal* offsetVal, tmq_commit_cb* pCommitFp, void* userParam) {
|
||||
int32_t code = 0;
|
||||
tscInfo("consumer:0x%" PRIx64 " do manual commit offset for %s, vgId:%d", tmq->consumerId, pTopicName, vgId);
|
||||
taosRLockLatch(&tmq->lock);
|
||||
SMqClientVg* pVg = NULL;
|
||||
code = getClientVg(tmq, pTopicName, vgId, &pVg);
|
||||
int32_t code = getClientVg(tmq, pTopicName, vgId, &pVg);
|
||||
if(code != 0){
|
||||
goto end;
|
||||
}
|
||||
|
@ -711,7 +701,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
|
|||
tscInfo("consumer:0x%" PRIx64 " total commit:%d for %d topics", tmq->consumerId, pParamSet->waitingRspNum - 1, numOfTopics);
|
||||
|
||||
// request is sent
|
||||
if (pParamSet->totalRspNum != 0) {
|
||||
if (pParamSet->waitingRspNum != 1) {
|
||||
// count down since waiting rsp num init as 1
|
||||
commitRspCountDown(pParamSet, tmq->consumerId, "", 0);
|
||||
return;
|
||||
|
@ -750,20 +740,6 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
|
|||
taosMemoryFree(param);
|
||||
}
|
||||
|
||||
//void tmqAssignDelayedReportTask(void* param, void* tmrId) {
|
||||
// int64_t refId = *(int64_t*)param;
|
||||
// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
|
||||
// if (tmq != NULL) {
|
||||
// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
|
||||
// *pTaskType = TMQ_DELAYED_TASK__REPORT;
|
||||
// taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
// tsem_post(&tmq->rspSem);
|
||||
// }
|
||||
//
|
||||
// taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
// taosMemoryFree(param);
|
||||
//}
|
||||
|
||||
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
if (pMsg) {
|
||||
taosMemoryFree(pMsg->pData);
|
||||
|
@ -1092,7 +1068,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
|||
pTmq->status = TMQ_CONSUMER_STATUS__INIT;
|
||||
pTmq->pollCnt = 0;
|
||||
pTmq->epoch = 0;
|
||||
// pTmq->needReportOffsetRows = true;
|
||||
|
||||
// set conf
|
||||
strcpy(pTmq->clientId, conf->clientId);
|
||||
|
@ -1153,7 +1128,7 @@ _failed:
|
|||
}
|
||||
|
||||
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
||||
if(tmq == NULL) return TSDB_CODE_INVALID_PARA;
|
||||
if(tmq == NULL || topic_list == NULL) return TSDB_CODE_INVALID_PARA;
|
||||
const int32_t MAX_RETRY_COUNT = 120 * 2; // let's wait for 2 mins at most
|
||||
const SArray* container = &topic_list->container;
|
||||
int32_t sz = taosArrayGetSize(container);
|
||||
|
@ -1229,7 +1204,10 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
|||
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
if(code != 0){
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
// avoid double free if msg is sent
|
||||
buf = NULL;
|
||||
|
@ -1246,7 +1224,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
|||
int32_t retryCnt = 0;
|
||||
while (TSDB_CODE_MND_CONSUMER_NOT_READY == doAskEp(tmq)) {
|
||||
if (retryCnt++ > MAX_RETRY_COUNT) {
|
||||
tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
|
||||
tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry more than 2 minutes", tmq->consumerId);
|
||||
code = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
@ -1311,54 +1289,50 @@ static SMqClientTopic* getTopicInfo(tmq_t* tmq, char* topicName){
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void setVgIdle(tmq_t* tmq, char* topicName, int32_t vgId){
|
||||
taosWLockLatch(&tmq->lock);
|
||||
SMqClientVg* pVg = getVgInfo(tmq, topicName, vgId);
|
||||
if(pVg){
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
}
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
}
|
||||
|
||||
int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
SMqPollCbParam* pParam = (SMqPollCbParam*)param;
|
||||
|
||||
int64_t refId = pParam->refId;
|
||||
// SMqClientVg* pVg = pParam->pVg;
|
||||
// SMqClientTopic* pTopic = pParam->pTopic;
|
||||
|
||||
int32_t vgId = pParam->vgId;
|
||||
uint64_t requestId = pParam->requestId;
|
||||
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
|
||||
if (tmq == NULL) {
|
||||
taosMemoryFree(pParam);
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
taosMemoryFreeClear(pMsg->pData);
|
||||
taosMemoryFreeClear(pMsg->pEpSet);
|
||||
terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t epoch = pParam->epoch;
|
||||
int32_t vgId = pParam->vgId;
|
||||
uint64_t requestId = pParam->requestId;
|
||||
|
||||
if (code != 0) {
|
||||
if (pMsg->pData) taosMemoryFree(pMsg->pData);
|
||||
if (pMsg->pEpSet) taosMemoryFree(pMsg->pEpSet);
|
||||
|
||||
// in case of consumer mismatch, wait for 500ms and retry
|
||||
if (code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) {
|
||||
// taosMsleep(500);
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__RECOVER);
|
||||
tscDebug("consumer:0x%" PRIx64 " wait for the re-balance, wait for 500ms and set status to be RECOVER",
|
||||
tscDebug("consumer:0x%" PRIx64 " wait for the re-balance, set status to be RECOVER",
|
||||
tmq->consumerId);
|
||||
} else if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0);
|
||||
if (pRspWrapper == NULL) {
|
||||
tscWarn("consumer:0x%" PRIx64 " msg from vgId:%d discarded, epoch %d since out of memory, reqId:0x%" PRIx64,
|
||||
tmq->consumerId, vgId, epoch, requestId);
|
||||
goto CREATE_MSG_FAIL;
|
||||
tscWarn("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since out of memory, reqId:0x%" PRIx64,
|
||||
tmq->consumerId, vgId, requestId);
|
||||
goto END;
|
||||
}
|
||||
|
||||
pRspWrapper->tmqRspType = TMQ_MSG_TYPE__END_RSP;
|
||||
taosWriteQitem(tmq->mqueue, pRspWrapper);
|
||||
// } else if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) { // poll data while insert
|
||||
// taosMsleep(5);
|
||||
} else{
|
||||
tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, epoch %d, since %s, reqId:0x%" PRIx64, tmq->consumerId,
|
||||
vgId, epoch, tstrerror(code), requestId);
|
||||
tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since %s, reqId:0x%" PRIx64, tmq->consumerId,
|
||||
vgId, tstrerror(code), requestId);
|
||||
}
|
||||
|
||||
goto CREATE_MSG_FAIL;
|
||||
goto END;
|
||||
}
|
||||
|
||||
int32_t msgEpoch = ((SMqRspHead*)pMsg->pData)->epoch;
|
||||
|
@ -1368,43 +1342,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tscWarn("consumer:0x%" PRIx64
|
||||
" msg discard from vgId:%d since from earlier epoch, rsp epoch %d, current epoch %d, reqId:0x%" PRIx64,
|
||||
tmq->consumerId, vgId, msgEpoch, clientEpoch, requestId);
|
||||
|
||||
tsem_post(&tmq->rspSem);
|
||||
taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
taosMemoryFree(pParam);
|
||||
|
||||
return 0;
|
||||
code = -1;
|
||||
goto END;
|
||||
}
|
||||
|
||||
if (msgEpoch != clientEpoch) {
|
||||
tscWarn("consumer:0x%" PRIx64 " mismatch rsp from vgId:%d, epoch %d, current epoch %d, reqId:0x%" PRIx64,
|
||||
tmq->consumerId, vgId, msgEpoch, clientEpoch, requestId);
|
||||
}
|
||||
ASSERT(msgEpoch == clientEpoch);
|
||||
|
||||
// handle meta rsp
|
||||
int8_t rspType = ((SMqRspHead*)pMsg->pData)->mqMsgType;
|
||||
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0);
|
||||
if (pRspWrapper == NULL) {
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
tscWarn("consumer:0x%" PRIx64 " msg discard from vgId:%d, epoch %d since out of memory", tmq->consumerId, vgId,
|
||||
epoch);
|
||||
goto CREATE_MSG_FAIL;
|
||||
tscWarn("consumer:0x%" PRIx64 " msg discard from vgId:%d, since out of memory", tmq->consumerId, vgId);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto END;
|
||||
}
|
||||
|
||||
pRspWrapper->tmqRspType = rspType;
|
||||
// pRspWrapper->vgHandle = pVg;
|
||||
// pRspWrapper->topicHandle = pTopic;
|
||||
pRspWrapper->reqId = requestId;
|
||||
pRspWrapper->pEpset = pMsg->pEpSet;
|
||||
pMsg->pEpSet = NULL;
|
||||
pRspWrapper->vgId = vgId;
|
||||
strcpy(pRspWrapper->topicName, pParam->topicName);
|
||||
|
||||
pMsg->pEpSet = NULL;
|
||||
if (rspType == TMQ_MSG_TYPE__POLL_DATA_RSP) {
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
|
||||
|
@ -1432,34 +1392,24 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tscError("consumer:0x%" PRIx64 " invalid rsp msg received, type:%d ignored", tmq->consumerId, rspType);
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosWriteQitem(tmq->mqueue, pRspWrapper);
|
||||
|
||||
int32_t total = taosQueueItemSize(tmq->mqueue);
|
||||
tscDebug("consumer:0x%" PRIx64 " put poll res into mqueue, type:%d, vgId:%d, total in queue:%d, reqId:0x%" PRIx64,
|
||||
tmq->consumerId, rspType, vgId, total, requestId);
|
||||
|
||||
tsem_post(&tmq->rspSem);
|
||||
taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
taosMemoryFree(pParam);
|
||||
|
||||
return 0;
|
||||
|
||||
CREATE_MSG_FAIL:
|
||||
if (epoch == tmq->epoch) {
|
||||
taosWLockLatch(&tmq->lock);
|
||||
SMqClientVg* pVg = getVgInfo(tmq, pParam->topicName, vgId);
|
||||
if(pVg){
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
}
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
END:
|
||||
if(code != 0){
|
||||
setVgIdle(tmq, pParam->topicName, vgId);
|
||||
}
|
||||
|
||||
tsem_post(&tmq->rspSem);
|
||||
taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
taosMemoryFree(pParam);
|
||||
taosMemoryFreeClear(pMsg->pData);
|
||||
taosMemoryFreeClear(pMsg->pEpSet);
|
||||
|
||||
return -1;
|
||||
return code;
|
||||
}
|
||||
|
||||
typedef struct SVgroupSaveInfo {
|
||||
|
@ -1467,6 +1417,7 @@ typedef struct SVgroupSaveInfo {
|
|||
STqOffsetVal commitOffset;
|
||||
STqOffsetVal seekOffset;
|
||||
int64_t numOfRows;
|
||||
int32_t vgStatus;
|
||||
} SVgroupSaveInfo;
|
||||
|
||||
static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopicEp, SHashObj* pVgOffsetHashMap,
|
||||
|
@ -1475,7 +1426,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
|
|||
pTopicEp->schema.nCols = 0;
|
||||
pTopicEp->schema.pSchema = NULL;
|
||||
|
||||
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
|
||||
char vgKey[TSDB_TOPIC_FNAME_LEN + 22] = {0};
|
||||
int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs);
|
||||
|
||||
tstrncpy(pTopic->topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
|
||||
|
@ -1497,7 +1448,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
|
|||
.pollCnt = 0,
|
||||
.vgId = pVgEp->vgId,
|
||||
.epSet = pVgEp->epSet,
|
||||
.vgStatus = TMQ_VG_STATUS__IDLE,
|
||||
.vgStatus = pInfo ? pInfo->vgStatus : TMQ_VG_STATUS__IDLE,
|
||||
.vgSkipCnt = 0,
|
||||
.emptyBlockReceiveTs = 0,
|
||||
.numOfRows = pInfo ? pInfo->numOfRows : 0,
|
||||
|
@ -1509,7 +1460,6 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
|
|||
clientVg.offsetInfo.walVerBegin = -1;
|
||||
clientVg.offsetInfo.walVerEnd = -1;
|
||||
clientVg.seekUpdated = false;
|
||||
// clientVg.receivedInfoFromVnode = false;
|
||||
|
||||
taosArrayPush(pTopic->vgs, &clientVg);
|
||||
}
|
||||
|
@ -1546,10 +1496,9 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
|
|||
taosWLockLatch(&tmq->lock);
|
||||
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
|
||||
|
||||
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
|
||||
char vgKey[TSDB_TOPIC_FNAME_LEN + 22] = {0};
|
||||
tscInfo("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d",
|
||||
tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur);
|
||||
// todo extract method
|
||||
for (int32_t i = 0; i < topicNumCur; i++) {
|
||||
// find old topic
|
||||
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
|
||||
|
@ -1565,7 +1514,9 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
|
|||
tscInfo("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
|
||||
vgKey, buf);
|
||||
|
||||
SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.endOffset, .seekOffset = pVgCur->offsetInfo.beginOffset, .commitOffset = pVgCur->offsetInfo.committedOffset, .numOfRows = pVgCur->numOfRows};
|
||||
SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.endOffset, .seekOffset = pVgCur->offsetInfo.beginOffset,
|
||||
.commitOffset = pVgCur->offsetInfo.committedOffset, .numOfRows = pVgCur->numOfRows,
|
||||
.vgStatus = pVgCur->vgStatus};
|
||||
taosHashPut(pVgOffsetHashMap, vgKey, strlen(vgKey), &info, sizeof(SVgroupSaveInfo));
|
||||
}
|
||||
}
|
||||
|
@ -1598,32 +1549,17 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
|
|||
int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param;
|
||||
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
|
||||
|
||||
if (tmq == NULL) {
|
||||
terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
|
||||
// pParam->pUserFn(tmq, terrno, NULL, pParam->pParam);
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
taosMemoryFree(pParam);
|
||||
return terrno;
|
||||
code = TSDB_CODE_TMQ_CONSUMER_CLOSED;
|
||||
goto END;
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code));
|
||||
pParam->pUserFn(tmq, code, NULL, pParam->pParam);
|
||||
|
||||
taosReleaseRef(tmqMgmt.rsetId, pParam->refId);
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
taosMemoryFree(pParam);
|
||||
return code;
|
||||
goto END;
|
||||
}
|
||||
|
||||
// tmq's epoch is monotonically increase,
|
||||
// so it's safe to discard any old epoch msg.
|
||||
// Epoch will only increase when received newer epoch ep msg
|
||||
SMqRspHead* head = pMsg->pData;
|
||||
int32_t epoch = atomic_load_32(&tmq->epoch);
|
||||
if (head->epoch <= epoch) {
|
||||
|
@ -1642,10 +1578,10 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId,
|
||||
head->epoch, epoch);
|
||||
}
|
||||
|
||||
pParam->pUserFn(tmq, code, pMsg, pParam->pParam);
|
||||
taosReleaseRef(tmqMgmt.rsetId, pParam->refId);
|
||||
|
||||
END:
|
||||
pParam->pUserFn(tmq, code, pMsg, pParam->pParam);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pParam);
|
||||
|
@ -1734,50 +1670,46 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClie
|
|||
return pRspObj;
|
||||
}
|
||||
|
||||
static int32_t handleErrorBeforePoll(SMqClientVg* pVg, tmq_t* pTmq) {
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&pTmq->rspSem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* pVg, int64_t timeout) {
|
||||
SMqPollReq req = {0};
|
||||
char* msg = NULL;
|
||||
SMqPollCbParam* pParam = NULL;
|
||||
SMsgSendInfo* sendInfo = NULL;
|
||||
int code = 0;
|
||||
tmqBuildConsumeReqImpl(&req, pTmq, timeout, pTopic, pVg);
|
||||
|
||||
int32_t msgSize = tSerializeSMqPollReq(NULL, 0, &req);
|
||||
if (msgSize < 0) {
|
||||
return handleErrorBeforePoll(pVg, pTmq);
|
||||
if (msgSize < 0){
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
char* msg = taosMemoryCalloc(1, msgSize);
|
||||
msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
return handleErrorBeforePoll(pVg, pTmq);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
if (tSerializeSMqPollReq(msg, msgSize, &req) < 0) {
|
||||
taosMemoryFree(msg);
|
||||
return handleErrorBeforePoll(pVg, pTmq);
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
SMqPollCbParam* pParam = taosMemoryMalloc(sizeof(SMqPollCbParam));
|
||||
pParam = taosMemoryMalloc(sizeof(SMqPollCbParam));
|
||||
if (pParam == NULL) {
|
||||
taosMemoryFree(msg);
|
||||
return handleErrorBeforePoll(pVg, pTmq);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
pParam->refId = pTmq->refId;
|
||||
pParam->epoch = pTmq->epoch;
|
||||
// pParam->pVg = pVg; // pVg may be released,fix it
|
||||
// pParam->pTopic = pTopic;
|
||||
strcpy(pParam->topicName, pTopic->topicName);
|
||||
pParam->vgId = pVg->vgId;
|
||||
pParam->requestId = req.reqId;
|
||||
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
taosMemoryFree(pParam);
|
||||
taosMemoryFree(msg);
|
||||
return handleErrorBeforePoll(pVg, pTmq);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
sendInfo->msgInfo = (SDataBuf){.pData = msg, .len = msgSize, .handle = NULL};
|
||||
|
@ -1793,13 +1725,21 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
|
|||
|
||||
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId,
|
||||
pTopic->topicName, pVg->vgId, pTmq->epoch, offsetFormatBuf, req.reqId);
|
||||
asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
||||
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
||||
if(code != 0){
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
pVg->pollCnt++;
|
||||
pVg->seekUpdated = false; // reset this flag.
|
||||
pTmq->pollCnt++;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return 0;
|
||||
|
||||
FAIL:
|
||||
taosMemoryFreeClear(pParam);
|
||||
taosMemoryFreeClear(msg);
|
||||
return code;
|
||||
}
|
||||
|
||||
// broadcast the poll request to all related vnodes
|
||||
|
@ -1836,6 +1776,8 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
atomic_store_32(&pVg->vgSkipCnt, 0);
|
||||
code = doTmqPollImpl(tmq, pTopic, pVg, timeout);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&tmq->rspSem);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
@ -1847,19 +1789,15 @@ end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset) {
|
||||
static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper) {
|
||||
if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__EP_RSP) {
|
||||
/*printf("ep %d %d\n", rspMsg->head.epoch, tmq->epoch);*/
|
||||
if (rspWrapper->epoch > atomic_load_32(&tmq->epoch)) {
|
||||
SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper;
|
||||
SMqAskEpRsp* rspMsg = &pEpRspWrapper->msg;
|
||||
doUpdateLocalEp(tmq, rspWrapper->epoch, rspMsg);
|
||||
/*tmqClearUnhandleMsg(tmq);*/
|
||||
tDeleteSMqAskEpRsp(rspMsg);
|
||||
*pReset = true;
|
||||
} else {
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
*pReset = false;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
|
@ -1882,10 +1820,9 @@ static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal
|
|||
// update the valid wal version range
|
||||
pVg->offsetInfo.walVerBegin = sver;
|
||||
pVg->offsetInfo.walVerEnd = ever + 1;
|
||||
// pVg->receivedInfoFromVnode = true;
|
||||
}
|
||||
|
||||
static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
||||
static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
|
||||
tscDebug("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, tmq->qall->numOfItems);
|
||||
|
||||
while (1) {
|
||||
|
@ -1961,6 +1898,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
} else {
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pDataRsp->head.epoch, consumerEpoch);
|
||||
setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
|
@ -1992,6 +1930,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
} else {
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
|
||||
setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
|
@ -2049,19 +1988,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
} else {
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
|
||||
setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " not data msg received", tmq->consumerId);
|
||||
|
||||
bool reset = false;
|
||||
tmqHandleNoPollRsp(tmq, pRspWrapper, &reset);
|
||||
tmqHandleNoPollRsp(tmq, pRspWrapper);
|
||||
taosFreeQitem(pRspWrapper);
|
||||
if (pollIfReset && reset) {
|
||||
tscDebug("consumer:0x%" PRIx64 ", reset and repoll", tmq->consumerId);
|
||||
tmqPollImpl(tmq, timeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2069,7 +2003,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
||||
if(tmq == NULL) return NULL;
|
||||
|
||||
void* rspObj;
|
||||
void* rspObj = NULL;
|
||||
int64_t startTime = taosGetTimestampMs();
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
|
||||
|
@ -2101,7 +2035,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
|||
tscError("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId);
|
||||
}
|
||||
|
||||
rspObj = tmqHandleAllRsp(tmq, timeout, false);
|
||||
rspObj = tmqHandleAllRsp(tmq, timeout);
|
||||
if (rspObj) {
|
||||
tscDebug("consumer:0x%" PRIx64 " return rsp %p", tmq->consumerId, rspObj);
|
||||
return (TAOS_RES*)rspObj;
|
||||
|
@ -2473,23 +2407,29 @@ end:
|
|||
}
|
||||
}
|
||||
|
||||
void updateEpCallbackFn(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* param) {
|
||||
void defaultAskEpCb(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* param) {
|
||||
SAskEpInfo* pInfo = param;
|
||||
pInfo->code = code;
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
SMqRspHead* head = pDataBuf->pData;
|
||||
if (pTmq == NULL || code != TSDB_CODE_SUCCESS){
|
||||
goto END;
|
||||
}
|
||||
|
||||
SMqRspHead* head = pDataBuf->pData;
|
||||
SMqAskEpRsp rsp;
|
||||
tDecodeSMqAskEpRsp(POINTER_SHIFT(pDataBuf->pData, sizeof(SMqRspHead)), &rsp);
|
||||
doUpdateLocalEp(pTmq, head->epoch, &rsp);
|
||||
tDeleteSMqAskEpRsp(&rsp);
|
||||
}
|
||||
|
||||
END:
|
||||
tsem_post(&pInfo->sem);
|
||||
}
|
||||
|
||||
void addToQueueCallbackFn(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* param) {
|
||||
if (pTmq == NULL){
|
||||
terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
|
||||
return;
|
||||
}
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return;
|
||||
|
@ -2515,7 +2455,7 @@ int32_t doAskEp(tmq_t* pTmq) {
|
|||
SAskEpInfo* pInfo = taosMemoryMalloc(sizeof(SAskEpInfo));
|
||||
tsem_init(&pInfo->sem, 0, 0);
|
||||
|
||||
asyncAskEp(pTmq, updateEpCallbackFn, pInfo);
|
||||
asyncAskEp(pTmq, defaultAskEpCb, pInfo);
|
||||
tsem_wait(&pInfo->sem);
|
||||
|
||||
int32_t code = pInfo->code;
|
||||
|
@ -2529,49 +2469,45 @@ void asyncAskEp(tmq_t* pTmq, __tmq_askep_fn_t askEpFn, void* param) {
|
|||
req.consumerId = pTmq->consumerId;
|
||||
req.epoch = pTmq->epoch;
|
||||
strcpy(req.cgroup, pTmq->groupId);
|
||||
int code = 0;
|
||||
SMqAskEpCbParam* pParam = NULL;
|
||||
void* pReq = NULL;
|
||||
|
||||
int32_t tlen = tSerializeSMqAskEpReq(NULL, 0, &req);
|
||||
if (tlen < 0) {
|
||||
tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq failed", pTmq->consumerId);
|
||||
askEpFn(pTmq, TSDB_CODE_INVALID_PARA, NULL, param);
|
||||
return;
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
void* pReq = taosMemoryCalloc(1, tlen);
|
||||
pReq = taosMemoryCalloc(1, tlen);
|
||||
if (pReq == NULL) {
|
||||
tscError("consumer:0x%" PRIx64 ", failed to malloc askEpReq msg, size:%d", pTmq->consumerId, tlen);
|
||||
askEpFn(pTmq, TSDB_CODE_OUT_OF_MEMORY, NULL, param);
|
||||
return;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
if (tSerializeSMqAskEpReq(pReq, tlen, &req) < 0) {
|
||||
tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq %d failed", pTmq->consumerId, tlen);
|
||||
taosMemoryFree(pReq);
|
||||
|
||||
askEpFn(pTmq, TSDB_CODE_INVALID_PARA, NULL, param);
|
||||
return;
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam));
|
||||
pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam));
|
||||
if (pParam == NULL) {
|
||||
tscError("consumer:0x%" PRIx64 ", failed to malloc subscribe param", pTmq->consumerId);
|
||||
taosMemoryFree(pReq);
|
||||
|
||||
askEpFn(pTmq, TSDB_CODE_OUT_OF_MEMORY, NULL, param);
|
||||
return;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
pParam->refId = pTmq->refId;
|
||||
pParam->epoch = pTmq->epoch;
|
||||
pParam->pUserFn = askEpFn;
|
||||
pParam->pParam = param;
|
||||
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
taosMemoryFree(pParam);
|
||||
taosMemoryFree(pReq);
|
||||
askEpFn(pTmq, TSDB_CODE_OUT_OF_MEMORY, NULL, param);
|
||||
return;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = tlen, .handle = NULL};
|
||||
|
@ -2586,7 +2522,15 @@ void asyncAskEp(tmq_t* pTmq, __tmq_askep_fn_t askEpFn, void* param) {
|
|||
tscInfo("consumer:0x%" PRIx64 " ask ep from mnode, reqId:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
|
||||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
if(code == 0){
|
||||
return;
|
||||
}
|
||||
|
||||
FAIL:
|
||||
taosMemoryFreeClear(pParam);
|
||||
taosMemoryFreeClear(pReq);
|
||||
askEpFn(pTmq, code, NULL, param);
|
||||
}
|
||||
|
||||
int32_t makeTopicVgroupKey(char* dst, const char* topicName, int32_t vg) {
|
||||
|
@ -2609,8 +2553,6 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
|
|||
}
|
||||
|
||||
taosMemoryFree(pParamSet);
|
||||
// tmq->needReportOffsetRows = true;
|
||||
|
||||
taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2783,7 +2725,14 @@ int64_t getCommittedFromServer(tmq_t *tmq, char* tname, int32_t vgId, SEpSet* ep
|
|||
sendInfo->msgType = TDMT_VND_TMQ_VG_COMMITTEDINFO;
|
||||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, sendInfo);
|
||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, epSet, &transporterId, sendInfo);
|
||||
if(code != 0){
|
||||
taosMemoryFree(buf);
|
||||
taosMemoryFree(sendInfo);
|
||||
tsem_destroy(&pParam->sem);
|
||||
taosMemoryFree(pParam);
|
||||
return code;
|
||||
}
|
||||
|
||||
tsem_wait(&pParam->sem);
|
||||
code = pParam->code;
|
||||
|
@ -3042,7 +2991,12 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
|
|||
|
||||
tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
|
||||
tmq->consumerId, pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
|
||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo);
|
||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo);
|
||||
if(code != 0){
|
||||
taosMemoryFree(pParam);
|
||||
taosMemoryFree(msg);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
tsem_wait(&pCommon->rsp);
|
||||
|
@ -3192,7 +3146,14 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
|
|||
sendInfo->msgType = TDMT_VND_TMQ_SEEK;
|
||||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
if(code != 0){
|
||||
taosMemoryFree(msg);
|
||||
taosMemoryFree(sendInfo);
|
||||
tsem_destroy(&pParam->sem);
|
||||
taosMemoryFree(pParam);
|
||||
return code;
|
||||
}
|
||||
|
||||
tsem_wait(&pParam->sem);
|
||||
code = pParam->code;
|
||||
|
|
|
@ -2460,10 +2460,10 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt
|
|||
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
|
||||
if (code) goto _exit;
|
||||
} else {
|
||||
if (ASSERT(varDataTLen(data + offset) <= bytes)) {
|
||||
if (varDataTLen(data + offset) > bytes) {
|
||||
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset),
|
||||
bytes);
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
code = TSDB_CODE_PAR_VALUE_TOO_LONG;
|
||||
goto _exit;
|
||||
}
|
||||
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](pColData, (uint8_t *)varDataVal(data + offset),
|
||||
|
|
|
@ -56,7 +56,7 @@ void *dmSetMgmtHandle(SArray *pArray, tmsg_t msgType, void *nodeMsgFp, bool need
|
|||
|
||||
void dmGetMonitorSystemInfo(SMonSysInfo *pInfo) {
|
||||
taosGetCpuUsage(&pInfo->cpu_system, &pInfo->cpu_engine);
|
||||
taosGetCpuCores(&pInfo->cpu_cores);
|
||||
taosGetCpuCores(&pInfo->cpu_cores, false);
|
||||
taosGetProcMemory(&pInfo->mem_engine);
|
||||
taosGetSysMemory(&pInfo->mem_system);
|
||||
pInfo->mem_total = tsTotalMemoryKB;
|
||||
|
|
|
@ -401,7 +401,6 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName);
|
||||
if(pSub == NULL){
|
||||
ASSERT(0);
|
||||
continue;
|
||||
}
|
||||
taosWLockLatch(&pSub->lock);
|
||||
|
|
|
@ -1017,7 +1017,10 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
code = mndSetDbCfgFromAlterDbReq(&dbObj, &alterReq);
|
||||
if (code != 0) goto _OVER;
|
||||
if (code != 0) {
|
||||
if (code == TSDB_CODE_MND_DB_OPTION_UNCHANGED) code = 0;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = mndCheckInChangeDbCfg(pMnode, &dbObj.cfg);
|
||||
if (code != 0) goto _OVER;
|
||||
|
|
|
@ -868,6 +868,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
mndTransDrop(pTrans);
|
||||
|
||||
taosThreadMutexLock(&execNodeList.lock);
|
||||
mDebug("register to stream task node list");
|
||||
keepStreamTasksInBuf(&streamObj, &execNodeList);
|
||||
taosThreadMutexUnlock(&execNodeList.lock);
|
||||
|
||||
|
@ -876,13 +877,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
char detail[2000] = {0};
|
||||
sprintf(detail,
|
||||
"checkpointFreq:%" PRId64 ", createStb:%d, deleteMark:%" PRId64
|
||||
", "
|
||||
"fillHistory:%d, igExists:%d, "
|
||||
"igExpired:%d, igUpdate:%d, lastTs:%" PRId64
|
||||
", "
|
||||
"maxDelay:%" PRId64
|
||||
", numOfTags:%d, sourceDB:%s, "
|
||||
"targetStbFullName:%s, triggerType:%d, watermark:%" PRId64,
|
||||
", fillHistory:%d, igExists:%d, igExpired:%d, igUpdate:%d, lastTs:%" PRId64 ", maxDelay:%" PRId64
|
||||
", numOfTags:%d, sourceDB:%s, targetStbFullName:%s, triggerType:%d, watermark:%" PRId64,
|
||||
createStreamReq.checkpointFreq, createStreamReq.createStb, createStreamReq.deleteMark,
|
||||
createStreamReq.fillHistory, createStreamReq.igExists, createStreamReq.igExpired, createStreamReq.igUpdate,
|
||||
createStreamReq.lastTs, createStreamReq.maxDelay, createStreamReq.numOfTags, createStreamReq.sourceDB,
|
||||
|
@ -1579,8 +1575,8 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
} else if (taskStatus == TASK_STATUS__DROPPING) {
|
||||
memcpy(varDataVal(status), "dropping", 8);
|
||||
varDataSetLen(status, 8);
|
||||
} else if (taskStatus == TASK_STATUS__FAIL) {
|
||||
memcpy(varDataVal(status), "fail", 4);
|
||||
} else if (taskStatus == TASK_STATUS__UNINIT) {
|
||||
memcpy(varDataVal(status), "uninit", 6);
|
||||
varDataSetLen(status, 4);
|
||||
} else if (taskStatus == TASK_STATUS__STOP) {
|
||||
memcpy(varDataVal(status), "stop", 4);
|
||||
|
@ -2021,14 +2017,11 @@ static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgr
|
|||
|
||||
static bool isNodeEpsetChanged(const SEpSet *pPrevEpset, const SEpSet *pCurrent) {
|
||||
const SEp *pEp = GET_ACTIVE_EP(pPrevEpset);
|
||||
const SEp* p = GET_ACTIVE_EP(pCurrent);
|
||||
|
||||
for (int32_t i = 0; i < pCurrent->numOfEps; ++i) {
|
||||
const SEp *p = &(pCurrent->eps[i]);
|
||||
if (pEp->port == p->port && strncmp(pEp->fqdn, p->fqdn, TSDB_FQDN_LEN) == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2125,6 +2118,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
|
|||
mDebug("stream:0x%" PRIx64 " involved node changed, create update trans", pStream->uid);
|
||||
int32_t code = createStreamUpdateTrans(pMnode, pStream, pChangeInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
@ -2228,17 +2222,21 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
|||
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
||||
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
|
||||
code = mndProcessVgroupChange(pMnode, &changeInfo);
|
||||
}
|
||||
|
||||
taosArrayDestroy(changeInfo.pUpdateNodeList);
|
||||
taosHashCleanup(changeInfo.pDBMap);
|
||||
|
||||
// keep the new vnode snapshot
|
||||
if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mDebug("create trans successfully, update cached node list");
|
||||
taosArrayDestroy(execNodeList.pNodeEntryList);
|
||||
execNodeList.pNodeEntryList = pNodeSnapshot;
|
||||
execNodeList.ts = ts;
|
||||
}
|
||||
} else {
|
||||
mDebug("no update found in nodeList");
|
||||
taosArrayDestroy(pNodeSnapshot);
|
||||
}
|
||||
|
||||
taosArrayDestroy(changeInfo.pUpdateNodeList);
|
||||
taosHashCleanup(changeInfo.pDBMap);
|
||||
|
||||
mDebug("end to do stream task node change checking");
|
||||
atomic_store_32(&mndNodeCheckSentinel, 0);
|
||||
|
@ -2289,7 +2287,6 @@ static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *p
|
|||
// todo: this process should be executed by the write queue worker of the mnode
|
||||
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
|
||||
SStreamHbMsg req = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -2315,9 +2312,12 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
|||
for (int32_t i = 0; i < req.numOfTasks; ++i) {
|
||||
STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i);
|
||||
int64_t k[2] = {p->streamId, p->taskId};
|
||||
int32_t index = *(int32_t *)taosHashGet(execNodeList.pTaskMap, &k, sizeof(k));
|
||||
int32_t *index = taosHashGet(execNodeList.pTaskMap, &k, sizeof(k));
|
||||
if (index == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, index);
|
||||
STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, *index);
|
||||
pStatusEntry->status = p->status;
|
||||
if (p->status != TASK_STATUS__NORMAL) {
|
||||
mDebug("received s-task:0x%x not in ready status:%s", p->taskId, streamGetTaskStatusStr(p->status));
|
||||
|
|
|
@ -288,6 +288,7 @@ typedef struct {
|
|||
int64_t numOfSTables;
|
||||
int64_t numOfCTables;
|
||||
int64_t numOfNTables;
|
||||
int64_t numOfCmprTables;
|
||||
int64_t numOfNTimeSeries;
|
||||
int64_t numOfTimeSeries;
|
||||
int64_t itvTimeSeries;
|
||||
|
|
|
@ -165,6 +165,7 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
|
|||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
||||
int32_t tqScanWal(STQ* pTq);
|
||||
int32_t tqCheckAndRunStreamTask(STQ* pTq);
|
||||
int32_t tqStartStreamTasks(STQ* pTq);
|
||||
int32_t tqStopStreamTasks(STQ* pTq);
|
||||
|
||||
// tq util
|
||||
|
|
|
@ -61,5 +61,12 @@ int metaPrepareAsyncCommit(SMeta *pMeta) {
|
|||
// abort the meta txn
|
||||
int metaAbort(SMeta *pMeta) {
|
||||
if (!pMeta->txn) return 0;
|
||||
return tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
int code = tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
if (code) {
|
||||
metaError("vgId:%d, failed to abort meta since %s", TD_VID(pMeta->pVnode), tstrerror(terrno));
|
||||
} else {
|
||||
pMeta->txn = NULL;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -699,13 +699,16 @@ int64_t metaGetTbNum(SMeta *pMeta) {
|
|||
// N.B. Called by statusReq per second
|
||||
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
|
||||
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
|
||||
if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 ||
|
||||
int64_t nTables = metaGetTbNum(pMeta);
|
||||
if (nTables - pMeta->pVnode->config.vndStats.numOfCmprTables > 100 ||
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 ||
|
||||
++pMeta->pVnode->config.vndStats.itvTimeSeries % (60 * 5) == 0) {
|
||||
int64_t num = 0;
|
||||
vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
|
||||
|
||||
pMeta->pVnode->config.vndStats.itvTimeSeries = (TD_VID(pMeta->pVnode) % 100) * 2;
|
||||
pMeta->pVnode->config.vndStats.numOfCmprTables = nTables;
|
||||
}
|
||||
|
||||
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
|
||||
|
|
|
@ -269,19 +269,21 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
tqDebug("tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s", req.consumerId, vgId, req.subKey);
|
||||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if (pHandle == NULL) {
|
||||
tqWarn("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", req.consumerId, vgId, req.subKey);
|
||||
code = 0;
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
goto end;
|
||||
}
|
||||
|
||||
// 2. check consumer-vg assignment status
|
||||
taosRLockLatch(&pTq->lock);
|
||||
if (pHandle->consumerId != req.consumerId) {
|
||||
tqError("ERROR tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64,
|
||||
req.consumerId, vgId, req.subKey, pHandle->consumerId);
|
||||
taosRUnLockLatch(&pTq->lock);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
code = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
|
||||
goto end;
|
||||
}
|
||||
|
@ -289,7 +291,7 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
// if consumer register to push manager, push empty to consumer to change vg status from TMQ_VG_STATUS__WAIT to
|
||||
// TMQ_VG_STATUS__IDLE, otherwise poll data failed after seek.
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
taosRUnLockLatch(&pTq->lock);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
|
||||
end:
|
||||
rsp.code = code;
|
||||
|
@ -496,15 +498,16 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
// 1. find handle
|
||||
taosRLockLatch(&pTq->lock);
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if (pHandle == NULL) {
|
||||
tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s not found", consumerId, vgId, req.subKey);
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
taosRUnLockLatch(&pTq->lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 2. check re-balance status
|
||||
taosRLockLatch(&pTq->lock);
|
||||
if (pHandle->consumerId != consumerId) {
|
||||
tqDebug("ERROR consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64,
|
||||
consumerId, vgId, req.subKey, pHandle->consumerId);
|
||||
|
@ -580,7 +583,7 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
bool exec = tqIsHandleExec(pHandle);
|
||||
|
||||
if(exec){
|
||||
tqInfo("vgId:%d, topic:%s, subscription is executing, wait for 10ms and retry, pHandle:%p", vgId,
|
||||
tqInfo("vgId:%d, topic:%s, subscription is executing, delete wait for 10ms and retry, pHandle:%p", vgId,
|
||||
pHandle->subKey, pHandle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
taosMsleep(10);
|
||||
|
@ -689,8 +692,16 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
}
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, &handle);
|
||||
} else {
|
||||
while(1){
|
||||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
bool exec = tqIsHandleExec(pHandle);
|
||||
if(exec){
|
||||
tqInfo("vgId:%d, topic:%s, subscription is executing, sub wait for 10ms and retry, pHandle:%p", pTq->pVnode->config.vgId,
|
||||
pHandle->subKey, pHandle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
taosMsleep(10);
|
||||
continue;
|
||||
}
|
||||
if (pHandle->consumerId == req.newConsumerId) { // do nothing
|
||||
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId);
|
||||
} else {
|
||||
|
@ -702,6 +713,8 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
|
||||
}
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
|
@ -1416,7 +1429,7 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
|||
}
|
||||
|
||||
int8_t status = pTask->status.taskStatus;
|
||||
if (status == TASK_STATUS__NORMAL || status == TASK_STATUS__SCAN_HISTORY) {
|
||||
if (status == TASK_STATUS__NORMAL || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) {
|
||||
// no lock needs to secure the access of the version
|
||||
if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) {
|
||||
// discard all the data when the stream task is suspended.
|
||||
|
@ -1670,6 +1683,7 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||
bool allStopped = false;
|
||||
|
||||
SStreamTaskNodeUpdateMsg req = {0};
|
||||
|
||||
|
@ -1700,22 +1714,49 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
tqDebug("s-task:%s receive task nodeEp update msg from mnode", pTask->id.idStr);
|
||||
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
||||
streamSetStatusNormal(pTask);
|
||||
|
||||
SStreamTask** ppHTask = NULL;
|
||||
if (pTask->historyTaskId.taskId != 0) {
|
||||
keys[0] = pTask->historyTaskId.streamId;
|
||||
keys[1] = pTask->historyTaskId.taskId;
|
||||
|
||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||
pMeta->vgId, req.taskId);
|
||||
} else {
|
||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||
streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
streamSetStatusNormal(pTask);
|
||||
streamMetaSaveTask(pMeta, pTask);
|
||||
if (ppHTask != NULL) {
|
||||
streamMetaSaveTask(pMeta, *ppHTask);
|
||||
}
|
||||
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
}
|
||||
|
||||
streamTaskStop(pTask);
|
||||
if (ppHTask != NULL) {
|
||||
streamTaskStop(*ppHTask);
|
||||
}
|
||||
|
||||
tqDebug("s-task:%s task nodeEp update completed", pTask->id.idStr);
|
||||
|
||||
pMeta->closedTask += 1;
|
||||
if (ppHTask != NULL) {
|
||||
pMeta->closedTask += 1;
|
||||
}
|
||||
|
||||
// possibly only handle the stream task.
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
bool allStopped = (pMeta->closedTask == numOfTasks);
|
||||
allStopped = (pMeta->closedTask == numOfTasks);
|
||||
if (allStopped) {
|
||||
pMeta->closedTask = 0;
|
||||
} else {
|
||||
|
@ -1752,6 +1793,7 @@ _end:
|
|||
taosWUnLockLatch(&pMeta->lock);
|
||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||
vInfo("vgId:%d, restart all stream tasks", vgId);
|
||||
tqStartStreamTasks(pTq);
|
||||
tqCheckAndRunStreamTaskAsync(pTq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -366,7 +366,7 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
|
|||
bool tqNextBlockInWal(STqReader* pReader, const char* id) {
|
||||
SWalReader* pWalReader = pReader->pWalReader;
|
||||
|
||||
// uint64_t st = taosGetTimestampMs();
|
||||
uint64_t st = taosGetTimestampMs();
|
||||
while (1) {
|
||||
SArray* pBlockList = pReader->submit.aSubmitTbData;
|
||||
if (pBlockList == NULL || pReader->nextBlk >= taosArrayGetSize(pBlockList)) {
|
||||
|
@ -441,9 +441,9 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) {
|
|||
|
||||
pReader->msg.msgStr = NULL;
|
||||
|
||||
// if(taosGetTimestampMs() - st > 5){
|
||||
// return false;
|
||||
// }
|
||||
if(taosGetTimestampMs() - st > 1000){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -224,6 +224,35 @@ int32_t tqStopStreamTasks(STQ* pTq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqStartStreamTasks(STQ* pTq) {
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
|
||||
tqDebug("vgId:%d start to stop all %d stream task(s)", vgId, numOfTasks);
|
||||
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
taosWLockLatch(&pMeta->lock);
|
||||
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||
|
||||
int64_t key[2] = {pTaskId->streamId, pTaskId->taskId};
|
||||
SStreamTask** pTask = taosHashGet(pMeta->pTasks, key, sizeof(key));
|
||||
|
||||
int8_t status = (*pTask)->status.taskStatus;
|
||||
if (status == TASK_STATUS__STOP) {
|
||||
streamSetStatusNormal(*pTask);
|
||||
}
|
||||
}
|
||||
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
||||
// seek the stored version and extract data from WAL
|
||||
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
||||
|
@ -383,6 +412,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
if (pItem != NULL) {
|
||||
streamFreeQitem(pItem);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -213,11 +213,11 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
walReaderVerifyOffset(pHandle->pWalReader, offset);
|
||||
int64_t fetchVer = offset->version;
|
||||
|
||||
// uint64_t st = taosGetTimestampMs();
|
||||
uint64_t st = taosGetTimestampMs();
|
||||
int totalRows = 0;
|
||||
while (1) {
|
||||
// int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
|
||||
// ASSERT (savedEpoch <= pRequest->epoch);
|
||||
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
|
||||
ASSERT(savedEpoch <= pRequest->epoch);
|
||||
|
||||
if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
|
@ -260,8 +260,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
goto end;
|
||||
}
|
||||
|
||||
// if (totalRows >= 4096 || taosxRsp.createTableNum > 0 || (taosGetTimestampMs() - st > 5)) {
|
||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
|
||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0 || (taosGetTimestampMs() - st > 1000)) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer + 1);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
|
|
|
@ -857,6 +857,9 @@ static int32_t tsdbSnapWriteTombRecord(STsdbSnapWriter* writer, const STombRecor
|
|||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
code = tsdbIterMergerNext(writer->ctx->tombIterMerger);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
if (record->suid == INT64_MAX) {
|
||||
|
|
|
@ -545,13 +545,10 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
|||
}
|
||||
|
||||
static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
|
||||
STSchema *pTSchema = metaGetTbTSchema(pVnode->pMeta, suid, -1, 1);
|
||||
// metaGetTbTSchemaEx(pVnode->pMeta, suid, suid, -1, &pTSchema);
|
||||
|
||||
if (pTSchema) {
|
||||
*num = pTSchema->numOfCols;
|
||||
|
||||
taosMemoryFree(pTSchema);
|
||||
SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 1);
|
||||
if (pSW) {
|
||||
*num = pSW->nCols;
|
||||
tDeleteSchemaWrapper(pSW);
|
||||
} else {
|
||||
*num = 2;
|
||||
}
|
||||
|
|
|
@ -425,6 +425,20 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
|
|||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
if (pWriter->pStreamTaskWriter) {
|
||||
code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback);
|
||||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
if (pWriter->pStreamStateWriter) {
|
||||
code = streamStateSnapWriterClose(pWriter->pStreamStateWriter, rollback);
|
||||
if (code) goto _exit;
|
||||
|
||||
code = streamStateRebuildFromSnap(pWriter->pStreamStateWriter, 0);
|
||||
pWriter->pStreamStateWriter = NULL;
|
||||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
if (pWriter->pRsmaSnapWriter) {
|
||||
code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
|
||||
if (code) goto _exit;
|
||||
|
|
|
@ -560,6 +560,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
|||
vInfo("vgId:%d, not launch stream tasks, since stream tasks are disabled", vgId);
|
||||
} else {
|
||||
vInfo("vgId:%d start to launch stream tasks", pVnode->config.vgId);
|
||||
tqStartStreamTasks(pVnode->pTq);
|
||||
tqCheckAndRunStreamTaskAsync(pVnode->pTq);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -786,6 +786,8 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat
|
|||
if (startPos < 0) {
|
||||
break;
|
||||
}
|
||||
qDebug("===stream===ignore expired data, window end ts:%" PRId64 ", maxts - wartermak:%" PRId64, nextWin.ekey,
|
||||
pInfo->twAggSup.maxTs - pInfo->twAggSup.waterMark);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1552,6 +1554,7 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
|
|||
tSimpleHashCleanup(pInfo->pStUpdated);
|
||||
tSimpleHashCleanup(pInfo->pStDeleted);
|
||||
pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
blockDataDestroy(pInfo->pCheckpointRes);
|
||||
|
@ -2197,6 +2200,7 @@ void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
|
|||
pGroupResInfo->pRows = pArrayList;
|
||||
pGroupResInfo->index = 0;
|
||||
pGroupResInfo->pBuf = NULL;
|
||||
pGroupResInfo->freeItem = false;
|
||||
}
|
||||
|
||||
void doBuildSessionResult(SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo, SSDataBlock* pBlock) {
|
||||
|
@ -2874,10 +2878,12 @@ void destroyStreamStateOperatorInfo(void* param) {
|
|||
}
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
blockDataDestroy(pInfo->pDelRes);
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
tSimpleHashCleanup(pInfo->pSeUpdated);
|
||||
tSimpleHashCleanup(pInfo->pSeDeleted);
|
||||
pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
blockDataDestroy(pInfo->pCheckpointRes);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
|
|
|
@ -121,7 +121,8 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
snprintf(dnodeIdEnvItem, 32, "%s=%d", "DNODE_ID", pData->dnodeId);
|
||||
|
||||
float numCpuCores = 4;
|
||||
taosGetCpuCores(&numCpuCores);
|
||||
taosGetCpuCores(&numCpuCores, false);
|
||||
numCpuCores = TMAX(numCpuCores, 2);
|
||||
snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2);
|
||||
|
||||
char pathTaosdLdLib[512] = {0};
|
||||
|
|
|
@ -683,8 +683,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
pStart += BitmapLen(numOfRows);
|
||||
}
|
||||
char* pData = pStart;
|
||||
|
||||
tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
if(ret != 0){
|
||||
goto end;
|
||||
}
|
||||
fields += sizeof(int8_t) + sizeof(int32_t);
|
||||
if (needChangeLength) {
|
||||
pStart += htonl(colLength[j]);
|
||||
|
@ -712,7 +714,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
char* pData = pStart;
|
||||
|
||||
SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, j);
|
||||
tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData);
|
||||
if(ret != 0){
|
||||
goto end;
|
||||
}
|
||||
fields += sizeof(int8_t) + sizeof(int32_t);
|
||||
if (needChangeLength) {
|
||||
pStart += htonl(colLength[i]);
|
||||
|
@ -729,7 +734,10 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
for (int c = 0; c < boundInfo->numOfBound; ++c) {
|
||||
if( boundInfo->pColIndex[c] != -1){
|
||||
SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, c);
|
||||
tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL);
|
||||
ret = tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL);
|
||||
if(ret != 0){
|
||||
goto end;
|
||||
}
|
||||
}else{
|
||||
boundInfo->pColIndex[c] = c; // restore for next block
|
||||
}
|
||||
|
|
|
@ -3754,6 +3754,10 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec
|
|||
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
if (pSelect->pPartitionByList) {
|
||||
code = removeConstantValueFromList(&pSelect->pPartitionByList);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && pSelect->pPartitionByList) {
|
||||
int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable);
|
||||
SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0);
|
||||
|
|
|
@ -37,6 +37,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
return "Column ambiguously defined: %s";
|
||||
case TSDB_CODE_PAR_WRONG_VALUE_TYPE:
|
||||
return "Invalid value type: %s";
|
||||
case TSDB_CODE_PAR_INVALID_VARBINARY:
|
||||
return "Invalid varbinary value: %s";
|
||||
case TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION:
|
||||
return "There mustn't be aggregation";
|
||||
case TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT:
|
||||
|
|
|
@ -373,6 +373,11 @@ static bool tagScanNodeHasTbname(SNode* pKeys) {
|
|||
static int32_t tagScanSetExecutionMode(SScanLogicNode* pScan) {
|
||||
pScan->onlyMetaCtbIdx = false;
|
||||
|
||||
if (pScan->tableType == TSDB_CHILD_TABLE) {
|
||||
pScan->onlyMetaCtbIdx = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (tagScanNodeListHasTbname(pScan->pScanPseudoCols)) {
|
||||
pScan->onlyMetaCtbIdx = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -391,8 +391,8 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
|||
SStreamTask* pTask = param;
|
||||
|
||||
if (streamTaskShouldStop(&pTask->status)) {
|
||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||
qDebug("s-task:%s should stop, abort from timer", pTask->id.idStr);
|
||||
int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||
qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -409,17 +409,22 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
|||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
}
|
||||
} else {
|
||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||
qDebug("s-task:%s should stop, abort from timer", pTask->id.idStr);
|
||||
int32_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||
qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||
}
|
||||
} else {
|
||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||
int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||
qDebug("s-task:%s send success, jump out of timer, ref:%d", pTask->id.idStr, ref);
|
||||
}
|
||||
}
|
||||
|
||||
void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration) {
|
||||
qError("s-task:%s dispatch data in %" PRId64 "ms", pTask->id.idStr, waitDuration);
|
||||
qWarn("s-task:%s dispatch data in %" PRId64 "ms, in timer", pTask->id.idStr, waitDuration);
|
||||
if (pTask->launchTaskTimer != NULL) {
|
||||
taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->launchTaskTimer);
|
||||
} else {
|
||||
pTask->launchTaskTimer = taosTmrStart(doRetryDispatchData, waitDuration, pTask, streamEnv.timer);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
|
||||
|
@ -540,8 +545,10 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry
|
||||
qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms",
|
||||
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS);
|
||||
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
||||
|
||||
qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d",
|
||||
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
break;
|
||||
}
|
||||
|
@ -982,8 +989,6 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
|
||||
}
|
||||
|
||||
streamFreeQitem(pTask->msgInfo.pData);
|
||||
pTask->msgInfo.pData = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -997,8 +1002,9 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data",
|
||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS);
|
||||
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data, ref:%d",
|
||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
} else { // pipeline send data in output queue
|
||||
// this message has been sent successfully, let's try next one.
|
||||
|
|
|
@ -521,6 +521,13 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
ASSERT(pTask->status.timerActive == 0);
|
||||
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
||||
|
||||
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
||||
qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
||||
taosTmrStop(pTask->schedInfo.pTimer);
|
||||
pTask->info.triggerParam = 0;
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
streamMetaRemoveTask(pMeta, keys);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
} else {
|
||||
|
@ -659,6 +666,8 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
|||
int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId};
|
||||
void* p = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||
if (p == NULL) {
|
||||
// pTask->chkInfo.checkpointVer may be 0, when a follower is become a leader
|
||||
// In this case, we try not to start fill-history task anymore.
|
||||
if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.checkpointVer) < 0) {
|
||||
doClear(pKey, pVal, pCur, pRecycleList);
|
||||
tFreeStreamTask(pTask);
|
||||
|
@ -757,7 +766,6 @@ void metaHbToMnode(void* param, void* tmrId) {
|
|||
SStreamHbMsg hbMsg = {0};
|
||||
SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid);
|
||||
if (pMeta == NULL) {
|
||||
// taosMemoryFree(param);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -779,6 +787,7 @@ void metaHbToMnode(void* param, void* tmrId) {
|
|||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
|
||||
SEpSet epset = {0};
|
||||
bool hasValEpset = false;
|
||||
|
||||
hbMsg.vgId = pMeta->vgId;
|
||||
hbMsg.pTaskStatus = taosArrayInit(numOfTasks, sizeof(STaskStatusEntry));
|
||||
|
@ -797,12 +806,14 @@ void metaHbToMnode(void* param, void* tmrId) {
|
|||
|
||||
if (i == 0) {
|
||||
epsetAssign(&epset, &(*pTask)->info.mnodeEpset);
|
||||
hasValEpset = true;
|
||||
}
|
||||
}
|
||||
|
||||
hbMsg.numOfTasks = taosArrayGetSize(hbMsg.pTaskStatus);
|
||||
taosRUnLockLatch(&pMeta->lock);
|
||||
|
||||
if (hasValEpset) {
|
||||
int32_t code = 0;
|
||||
int32_t tlen = 0;
|
||||
|
||||
|
@ -833,15 +844,15 @@ void metaHbToMnode(void* param, void* tmrId) {
|
|||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||
|
||||
SRpcMsg msg = {0};
|
||||
initRpcMsg(&msg, TDMT_MND_STREAM_HEARTBEAT, buf, tlen);
|
||||
msg.info.noResp = 1;
|
||||
|
||||
qDebug("vgId:%d, build and send hb to mnode", pMeta->vgId);
|
||||
|
||||
tmsgSendReq(&epset, &msg);
|
||||
}
|
||||
|
||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||
taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamEnv.timer, &pMeta->hbInfo.hbTmr);
|
||||
taosReleaseRef(streamMetaId, rid);
|
||||
}
|
||||
|
|
|
@ -234,9 +234,15 @@ static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) {
|
|||
if (status == TASK_STATUS__SCAN_HISTORY) {
|
||||
qDebug("s-task:%s enter into scan-history data stage, status:%s", id, str);
|
||||
streamTaskLaunchScanHistory(pTask);
|
||||
} else {
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
qDebug("s-task:%s fill-history is set normal when start it, try to remove it,set it task to be dropping", id);
|
||||
pTask->status.taskStatus = TASK_STATUS__DROPPING;
|
||||
ASSERT(pTask->historyTaskId.taskId == 0);
|
||||
} else {
|
||||
qDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str);
|
||||
}
|
||||
}
|
||||
|
||||
// when current stream task is ready, check the related fill history task.
|
||||
streamLaunchFillHistoryTask(pTask);
|
||||
|
@ -579,19 +585,17 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
|||
// todo fix the bug: 2. race condition
|
||||
// an fill history task needs to be started.
|
||||
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
||||
int32_t tId = pTask->historyTaskId.taskId;
|
||||
if (tId == 0) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
int32_t hTaskId = pTask->historyTaskId.taskId;
|
||||
if (hTaskId == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
ASSERT(pTask->status.downstreamReady == 1);
|
||||
qDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr,
|
||||
pTask->historyTaskId.streamId, tId);
|
||||
pTask->historyTaskId.streamId, hTaskId);
|
||||
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
int32_t hTaskId = pTask->historyTaskId.taskId;
|
||||
|
||||
int64_t keys[2] = {pTask->historyTaskId.streamId, pTask->historyTaskId.taskId};
|
||||
int64_t keys[2] = {pTask->historyTaskId.streamId, hTaskId};
|
||||
|
||||
// Set the execute conditions, including the query time window and the version range
|
||||
SStreamTask** pHTask = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||
|
@ -610,11 +614,12 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
|||
// todo failed to create timer
|
||||
taosMemoryFree(pInfo);
|
||||
} else {
|
||||
atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active
|
||||
int32_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active
|
||||
ASSERT(ref == 1);
|
||||
qDebug("s-task:%s set timer active flag", pTask->id.idStr);
|
||||
}
|
||||
} else { // timer exists
|
||||
ASSERT(pTask->status.timerActive > 0);
|
||||
ASSERT(pTask->status.timerActive == 1);
|
||||
qDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr);
|
||||
taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer);
|
||||
}
|
||||
|
@ -918,6 +923,13 @@ void streamTaskHalt(SStreamTask* pTask) {
|
|||
return;
|
||||
}
|
||||
|
||||
// wait for checkpoint completed
|
||||
while(pTask->status.taskStatus == TASK_STATUS__CK) {
|
||||
qDebug("s-task:%s status:%s during generating checkpoint, wait for 1sec and retry set status:halt", pTask->id.idStr,
|
||||
streamGetTaskStatusStr(TASK_STATUS__CK));
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
||||
// upgrade to halt status
|
||||
if (status == TASK_STATUS__PAUSE) {
|
||||
qDebug("s-task:%s upgrade status to %s from %s", pTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__HALT),
|
||||
|
|
|
@ -345,7 +345,7 @@ bool streamStateCheck(SStreamState* pState, const SWinKey* key) {
|
|||
return hasRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey));
|
||||
#else
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen);
|
||||
return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), NULL, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
|
||||
#define FLUSH_RATIO 0.5
|
||||
#define FLUSH_NUM 4
|
||||
#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024);
|
||||
#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024)
|
||||
#define MIN_NUM_OF_ROW_BUFF 10240
|
||||
|
||||
struct SStreamFileState {
|
||||
SList* usedBuffs;
|
||||
|
@ -67,7 +68,7 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_
|
|||
pFileState->usedBuffs = tdListNew(POINTER_BYTES);
|
||||
pFileState->freeBuffs = tdListNew(POINTER_BYTES);
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
int32_t cap = TMIN(10240, pFileState->maxRowCount);
|
||||
int32_t cap = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount);
|
||||
pFileState->rowBuffMap = tSimpleHashInit(cap, hashFn);
|
||||
if (!pFileState->usedBuffs || !pFileState->freeBuffs || !pFileState->rowBuffMap) {
|
||||
goto _error;
|
||||
|
@ -272,10 +273,12 @@ int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, voi
|
|||
*pVLen = pFileState->rowSize;
|
||||
*pVal = *pos;
|
||||
(*pos)->beUsed = true;
|
||||
(*pos)->beFlushed = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SRowBuffPos* pNewPos = getNewRowPos(pFileState);
|
||||
pNewPos->beUsed = true;
|
||||
pNewPos->beFlushed = false;
|
||||
ASSERT(pNewPos->pRowBuff);
|
||||
memcpy(pNewPos->pKey, pKey, keyLen);
|
||||
|
||||
|
@ -375,6 +378,10 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
|
|||
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
|
||||
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
|
||||
ASSERT(pPos->pRowBuff && pFileState->rowSize > 0);
|
||||
if (pPos->beFlushed) {
|
||||
continue;
|
||||
}
|
||||
pPos->beFlushed = true;
|
||||
|
||||
if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) {
|
||||
streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
|
||||
|
@ -513,6 +520,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) {
|
|||
ASSERT(pVLen == pFileState->rowSize);
|
||||
memcpy(pNewPos->pRowBuff, pVal, pVLen);
|
||||
taosMemoryFreeClear(pVal);
|
||||
pNewPos->beFlushed = true;
|
||||
code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyRowBuffPos(pNewPos);
|
||||
|
|
|
@ -2562,7 +2562,11 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum
|
|||
ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex);
|
||||
}
|
||||
|
||||
SSyncLogReplMgr oldLogReplMgrs[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA] = {0};
|
||||
SSyncLogReplMgr* oldLogReplMgrs = NULL;
|
||||
int64_t length = sizeof(SSyncLogReplMgr) * (TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA);
|
||||
oldLogReplMgrs = taosMemoryMalloc(length);
|
||||
if (NULL == oldLogReplMgrs) return -1;
|
||||
memset(oldLogReplMgrs, 0, length);
|
||||
|
||||
for(int i = 0; i < oldtotalReplicaNum; i++){
|
||||
oldLogReplMgrs[i] = *(ths->logReplMgrs[i]);
|
||||
|
@ -2643,6 +2647,8 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum
|
|||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(oldLogReplMgrs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -229,7 +229,15 @@ void tdbPCacheInvalidatePage(SPCache *pCache, SPager *pPager, SPgno pgno) {
|
|||
}
|
||||
|
||||
if (pPage) {
|
||||
bool moveToFreeList = false;
|
||||
if (pPage->pLruNext) {
|
||||
tdbPCachePinPage(pCache, pPage);
|
||||
moveToFreeList = true;
|
||||
}
|
||||
tdbPCacheRemovePageFromHash(pCache, pPage);
|
||||
if (moveToFreeList) {
|
||||
tdbPCacheFreePage(pCache, pPage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ int32_t tfsMkdirRecurAt(STfs *pTfs, const char *rname, SDiskID diskId) {
|
|||
// https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/dirname.3.html
|
||||
char *dir = taosStrdup(taosDirName(s));
|
||||
|
||||
if (tfsMkdirRecurAt(pTfs, dir, diskId) < 0) {
|
||||
if (strlen(dir) >= strlen(rname) || tfsMkdirRecurAt(pTfs, dir, diskId) < 0) {
|
||||
taosMemoryFree(s);
|
||||
taosMemoryFree(dir);
|
||||
return -1;
|
||||
|
|
|
@ -116,7 +116,11 @@ int32_t tfsAllocDiskOnTier(STfsTier *pTier) {
|
|||
|
||||
if (pDisk == NULL) continue;
|
||||
|
||||
if (pDisk->size.avail < tsMinDiskFreeSize) continue;
|
||||
if (pDisk->size.avail < tsMinDiskFreeSize) {
|
||||
uInfo("disk %s is full and skip it, level:%d id:%d free size:%" PRId64 " min free size:%" PRId64, pDisk->path,
|
||||
pDisk->level, pDisk->id, pDisk->size.avail, tsMinDiskFreeSize);
|
||||
continue;
|
||||
}
|
||||
|
||||
retId = diskId;
|
||||
terrno = 0;
|
||||
|
|
|
@ -2156,12 +2156,10 @@ static void cliSchedMsgToNextNode(SCliMsg* pMsg, SCliThrd* pThrd) {
|
|||
|
||||
if (rpcDebugFlag & DEBUG_DEBUG) {
|
||||
STraceId* trace = &pMsg->msg.info.traceId;
|
||||
char* tbuf = taosMemoryCalloc(1, TSDB_FQDN_LEN * 5);
|
||||
|
||||
char tbuf[512] = {0};
|
||||
EPSET_TO_STR(&pCtx->epSet, tbuf);
|
||||
tGDebug("%s retry on next node,use:%s, step: %d,timeout:%" PRId64 "", transLabel(pThrd->pTransInst), tbuf,
|
||||
pCtx->retryStep, pCtx->retryNextInterval);
|
||||
taosMemoryFree(tbuf);
|
||||
}
|
||||
|
||||
STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg));
|
||||
|
@ -2387,7 +2385,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
|
|||
bool hasEpSet = cliTryExtractEpSet(pResp, &pCtx->epSet);
|
||||
if (hasEpSet) {
|
||||
if (rpcDebugFlag & DEBUG_TRACE) {
|
||||
char tbuf[256] = {0};
|
||||
char tbuf[512] = {0};
|
||||
EPSET_TO_STR(&pCtx->epSet, tbuf);
|
||||
tGTrace("%s conn %p extract epset from msg", CONN_GET_INST_LABEL(pConn), pConn);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ target_link_libraries(
|
|||
)
|
||||
if(TD_WINDOWS)
|
||||
target_link_libraries(
|
||||
os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump dbghelp version
|
||||
os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump dbghelp version KtmW32
|
||||
)
|
||||
elseif(TD_DARWIN_64)
|
||||
find_library(CORE_FOUNDATION_FRAMEWORK CoreFoundation)
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
#ifdef WINDOWS
|
||||
#include <io.h>
|
||||
#include <WinBase.h>
|
||||
#include <ktmw32.h>
|
||||
#define F_OK 0
|
||||
#define W_OK 2
|
||||
#define R_OK 4
|
||||
|
@ -175,12 +177,32 @@ int32_t taosRemoveFile(const char *path) { return remove(path); }
|
|||
|
||||
int32_t taosRenameFile(const char *oldName, const char *newName) {
|
||||
#ifdef WINDOWS
|
||||
bool code = MoveFileEx(oldName, newName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED);
|
||||
if (!code) {
|
||||
printf("failed to rename file %s to %s, reason:%s\n", oldName, newName, strerror(errno));
|
||||
bool finished = false;
|
||||
|
||||
HANDLE transactionHandle = CreateTransaction(NULL, NULL, 0, 0, 0, INFINITE, NULL);
|
||||
if (transactionHandle == INVALID_HANDLE_VALUE) {
|
||||
printf("failed to rename file %s to %s, reason: CreateTransaction failed.\n", oldName, newName);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return code ? 0 : -1;
|
||||
BOOL result = MoveFileTransacted(oldName, newName, NULL, NULL, MOVEFILE_REPLACE_EXISTING, transactionHandle);
|
||||
|
||||
if (result) {
|
||||
finished = CommitTransaction(transactionHandle);
|
||||
if (!finished) {
|
||||
DWORD error = GetLastError();
|
||||
printf("failed to rename file %s to %s, reason: CommitTransaction errcode %d.\n", oldName, newName, error);
|
||||
}
|
||||
} else {
|
||||
RollbackTransaction(transactionHandle);
|
||||
DWORD error = GetLastError();
|
||||
finished = false;
|
||||
printf("failed to rename file %s to %s, reason: MoveFileTransacted errcode %d.\n", oldName, newName, error);
|
||||
}
|
||||
|
||||
CloseHandle(transactionHandle);
|
||||
|
||||
return finished ? 0 : -1;
|
||||
#else
|
||||
int32_t code = rename(oldName, newName);
|
||||
if (code < 0) {
|
||||
|
@ -904,10 +926,16 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) {
|
|||
goto cmp_end;
|
||||
}
|
||||
|
||||
dstFp = gzdopen(pFile->fd, "wb6f");
|
||||
// Both gzclose() and fclose() will close the associated fd, so they need to have different fds.
|
||||
FileFd gzFd = dup(pFile->fd);
|
||||
if (gzFd < 0) {
|
||||
ret = -4;
|
||||
goto cmp_end;
|
||||
}
|
||||
dstFp = gzdopen(gzFd, "wb6f");
|
||||
if (dstFp == NULL) {
|
||||
ret = -3;
|
||||
taosCloseFile(&pFile);
|
||||
close(gzFd);
|
||||
goto cmp_end;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,6 +121,8 @@ LONG WINAPI exceptionHandler(LPEXCEPTION_POINTERS exception);
|
|||
static pid_t tsProcId;
|
||||
static char tsSysNetFile[] = "/proc/net/dev";
|
||||
static char tsSysCpuFile[] = "/proc/stat";
|
||||
static char tsCpuPeriodFile[] = "/sys/fs/cgroup/cpu/cpu.cfs_period_us";
|
||||
static char tsCpuQuotaFile[] = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us";
|
||||
static char tsProcCpuFile[25] = {0};
|
||||
static char tsProcMemFile[25] = {0};
|
||||
static char tsProcIOFile[25] = {0};
|
||||
|
@ -234,7 +236,7 @@ bool taosCheckSystemIsLittleEnd() {
|
|||
|
||||
void taosGetSystemInfo() {
|
||||
#ifdef WINDOWS
|
||||
taosGetCpuCores(&tsNumOfCores);
|
||||
taosGetCpuCores(&tsNumOfCores, false);
|
||||
taosGetTotalMemory(&tsTotalMemoryKB);
|
||||
taosGetCpuUsage(NULL, NULL);
|
||||
#elif defined(_TD_DARWIN_64)
|
||||
|
@ -245,7 +247,7 @@ void taosGetSystemInfo() {
|
|||
tsNumOfCores = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
#else
|
||||
taosGetProcIOnfos();
|
||||
taosGetCpuCores(&tsNumOfCores);
|
||||
taosGetCpuCores(&tsNumOfCores, false);
|
||||
taosGetTotalMemory(&tsTotalMemoryKB);
|
||||
taosGetCpuUsage(NULL, NULL);
|
||||
taosGetCpuInstructions(&tsSSE42Enable, &tsAVXEnable, &tsAVX2Enable, &tsFMAEnable);
|
||||
|
@ -493,7 +495,55 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
|
|||
#endif
|
||||
}
|
||||
|
||||
int32_t taosGetCpuCores(float *numOfCores) {
|
||||
// Returns the container's CPU quota if successful, otherwise returns the physical CPU cores
|
||||
static int32_t taosCntrGetCpuCores(float *numOfCores) {
|
||||
#ifdef WINDOWS
|
||||
return -1;
|
||||
#elif defined(_TD_DARWIN_64)
|
||||
return -1;
|
||||
#else
|
||||
TdFilePtr pFile = NULL;
|
||||
if (!(pFile = taosOpenFile(tsCpuQuotaFile, TD_FILE_READ | TD_FILE_STREAM))) {
|
||||
goto _sys;
|
||||
}
|
||||
char qline[32] = {0};
|
||||
if (taosGetsFile(pFile, sizeof(qline), qline) < 0) {
|
||||
taosCloseFile(&pFile);
|
||||
goto _sys;
|
||||
}
|
||||
taosCloseFile(&pFile);
|
||||
float quota = taosStr2Float(qline, NULL);
|
||||
if (quota < 0) {
|
||||
goto _sys;
|
||||
}
|
||||
|
||||
if (!(pFile = taosOpenFile(tsCpuPeriodFile, TD_FILE_READ | TD_FILE_STREAM))) {
|
||||
goto _sys;
|
||||
}
|
||||
char pline[32] = {0};
|
||||
if (taosGetsFile(pFile, sizeof(pline), pline) < 0) {
|
||||
taosCloseFile(&pFile);
|
||||
goto _sys;
|
||||
}
|
||||
taosCloseFile(&pFile);
|
||||
|
||||
float period = taosStr2Float(pline, NULL);
|
||||
float quotaCores = quota / period;
|
||||
float sysCores = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
if (quotaCores < sysCores && quotaCores > 0) {
|
||||
*numOfCores = quotaCores;
|
||||
} else {
|
||||
*numOfCores = sysCores;
|
||||
}
|
||||
goto _end;
|
||||
_sys:
|
||||
*numOfCores = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
_end:
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t taosGetCpuCores(float *numOfCores, bool physical) {
|
||||
#ifdef WINDOWS
|
||||
SYSTEM_INFO info;
|
||||
GetSystemInfo(&info);
|
||||
|
@ -503,7 +553,11 @@ int32_t taosGetCpuCores(float *numOfCores) {
|
|||
*numOfCores = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
return 0;
|
||||
#else
|
||||
if (physical) {
|
||||
*numOfCores = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
} else {
|
||||
taosCntrGetCpuCores(numOfCores);
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -538,7 +538,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ROW_LENGTH, "Row length exceeds
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of columns")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TOO_MANY_COLUMNS, "Too many columns")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FIRST_COLUMN, "First column must be timestamp")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN, "Invalid binary/nchar column/tag length")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN, "Invalid varbinary/binary/nchar column/tag length")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TAGS_NUM, "Invalid number of tag columns")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Invalid stream query")
|
||||
|
@ -573,7 +573,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VARBINARY, "Invalidate varbinary type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VARBINARY, "Invalidate varbinary value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_IP_RANGE, "Invalid IPV4 address ranges")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ pip3 install kafka-python
|
|||
python3 kafka_example_consumer.py
|
||||
|
||||
# 21
|
||||
pip3 install taos-ws-py
|
||||
pip3 install taos-ws-py==0.2.6
|
||||
python3 conn_websocket_pandas.py
|
||||
|
||||
# 22
|
||||
|
|
|
@ -6,6 +6,21 @@
|
|||
,,y,unit-test,bash test.sh
|
||||
|
||||
#system test
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_session.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_state_window.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_state_window.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_session.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval_ext.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval_ext.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session_ext.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/partition_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py
|
||||
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 3
|
||||
|
@ -79,10 +94,10 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 4
|
||||
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3 -i False
|
||||
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/create_wrong_topic.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
|
||||
|
@ -135,6 +150,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py
|
||||
,,n,system-test,python3 ./test.py -f 7-tmq/tmq_offset.py
|
||||
,,n,system-test,python3 ./test.py -f 7-tmq/tmqDataPrecisionUnit.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/raw_block_interface_test.py
|
||||
|
@ -171,6 +187,8 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/compatibility.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
|
||||
|
@ -467,7 +485,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3
|
||||
,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3
|
||||
,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1
|
||||
#,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
|
||||
|
|
|
@ -79,7 +79,7 @@ md5sum /home/TDinternal/debug/build/lib/libtaos.so
|
|||
#define taospy 2.7.10
|
||||
pip3 list|grep taospy
|
||||
pip3 uninstall taospy -y
|
||||
pip3 install --default-timeout=120 taospy==2.7.10
|
||||
pip3 install --default-timeout=120 taospy==2.7.12
|
||||
|
||||
$TIMEOUT_CMD $cmd
|
||||
RET=$?
|
||||
|
|
|
@ -219,11 +219,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
|||
|
||||
if valgrind :
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments)
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
||||
|
||||
else:
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments)
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012'%(crash_gen_path ,arguments)
|
||||
|
||||
return crash_gen_cmd
|
||||
|
||||
|
|
|
@ -220,11 +220,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
|||
|
||||
if valgrind :
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments)
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
||||
|
||||
else:
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments)
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012'%(crash_gen_path ,arguments)
|
||||
|
||||
return crash_gen_cmd
|
||||
|
||||
|
|
|
@ -220,11 +220,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
|||
|
||||
if valgrind :
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203 '%(crash_gen_path ,arguments)
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
||||
|
||||
else:
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203'%(crash_gen_path ,arguments)
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203,0x4012'%(crash_gen_path ,arguments)
|
||||
|
||||
return crash_gen_cmd
|
||||
|
||||
|
|
|
@ -52,8 +52,8 @@ class ConfigureyCluster:
|
|||
dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}")
|
||||
|
||||
# configure dnoe of independent mnodes
|
||||
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == "True" :
|
||||
tdLog.info("set mnode supportVnodes 0")
|
||||
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True :
|
||||
tdLog.info(f"set mnode:{num} supportVnodes 0")
|
||||
dnode.addExtraCfg("supportVnodes", 0)
|
||||
# print(dnode)
|
||||
self.dnodes.append(dnode)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -668,7 +668,7 @@ class TDDnodes:
|
|||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.asan = False
|
||||
self.killValgrind = 1
|
||||
self.killValgrind = 0
|
||||
|
||||
def init(self, path, remoteIP = ""):
|
||||
binPath = self.dnodes[0].getPath() + "/../../../"
|
||||
|
@ -775,9 +775,41 @@ class TDDnodes:
|
|||
tdLog.info("execute finished")
|
||||
return
|
||||
|
||||
def killProcesser(self, processerName):
|
||||
if platform.system().lower() == 'windows':
|
||||
killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName)
|
||||
psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName))
|
||||
else:
|
||||
killCmd = (
|
||||
"ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1"
|
||||
% processerName
|
||||
)
|
||||
psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName)
|
||||
|
||||
processID = ""
|
||||
|
||||
try:
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
while processID:
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
try:
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
except Exception as err:
|
||||
processID = ""
|
||||
tdLog.debug('**** kill pid warn: {err}')
|
||||
except Exception as err:
|
||||
processID = ""
|
||||
tdLog.debug(f'**** find pid warn: {err}')
|
||||
|
||||
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes, asan:%d" % self.asan)
|
||||
if platform.system().lower() != 'windows':
|
||||
distro_id = distro.id()
|
||||
else:
|
||||
distro_id = "not alpine"
|
||||
if self.asan and distro_id != "alpine":
|
||||
tdLog.info("execute script: %s" % self.stopDnodesPath)
|
||||
os.system(self.stopDnodesPath)
|
||||
|
@ -792,7 +824,6 @@ class TDDnodes:
|
|||
|
||||
|
||||
if (distro_id == "alpine"):
|
||||
print(distro_id)
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
|
@ -803,36 +834,9 @@ class TDDnodes:
|
|||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
elif platform.system().lower() == 'windows':
|
||||
psCmd = "for /f %a in ('wmic process where \"name='taosd.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(f"pid of taosd.exe:{processID}")
|
||||
killCmd = "kill -9 %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
psCmd = "for /f %a in ('wmic process where \"name='tmq_sim.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(f"pid of tmq_sim.exe:{processID}")
|
||||
killCmd = "kill -9 %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
psCmd = "for /f %a in ('wmic process where \"name='taosBenchmark.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(f"pid of taosBenchmark.exe:{processID}")
|
||||
killCmd = "kill -9 %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
self.killProcesser("taosd")
|
||||
self.killProcesser("tmq_sim")
|
||||
self.killProcesser("taosBenchmark")
|
||||
else:
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
|
@ -849,7 +853,6 @@ class TDDnodes:
|
|||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
if self.killValgrind == 1:
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
|
|
|
@ -111,7 +111,7 @@ class TDSql:
|
|||
return self.error_info
|
||||
|
||||
|
||||
def query(self, sql, row_tag=None,queryTimes=10):
|
||||
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
||||
self.sql = sql
|
||||
i=1
|
||||
while i <= queryTimes:
|
||||
|
@ -120,6 +120,17 @@ class TDSql:
|
|||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
|
||||
if count_expected_res is not None:
|
||||
counter = 0
|
||||
while count_expected_res != self.queryResult[0][0]:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
if counter < queryTimes:
|
||||
counter += 0.5
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
return False
|
||||
if row_tag:
|
||||
return self.queryResult
|
||||
return self.queryRows
|
||||
|
@ -501,7 +512,8 @@ class TDSql:
|
|||
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
# tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
|
||||
def checkNotEqual(self, elm, expect_elm):
|
||||
if elm != expect_elm:
|
||||
|
@ -509,7 +521,8 @@ class TDSql:
|
|||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||
tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||
raise Exception
|
||||
|
||||
def get_times(self, time_str, precision="ms"):
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
|
|
|
@ -35,7 +35,7 @@ sql_error alter database $db keep 20.0,20.0,20.0
|
|||
sql_error alter database $db keep 0,0,0
|
||||
sql_error alter database $db keep 3
|
||||
sql_error alter database $db keep -1,-1,-1
|
||||
sql_error alter database $db keep 20,20
|
||||
sql alter database $db keep 20,20
|
||||
sql_error alter database $db keep 9,9,9
|
||||
sql_error alter database $db keep 20,20,19
|
||||
sql_error alter database $db keep 20,19,20
|
||||
|
@ -44,7 +44,7 @@ sql_error alter database $db keep 20,19,18
|
|||
sql_error alter database $db keep 20,20,20,20
|
||||
sql_error alter database $db keep 365001,365001,365001
|
||||
sql_error alter database $db keep 365001
|
||||
sql_error alter database $db keep 20
|
||||
sql alter database $db keep 20
|
||||
sql select * from information_schema.ins_databases
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
|
|
|
@ -127,4 +127,17 @@ if $data01 != @1aa@ then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select tags t,b from ctt11
|
||||
print $rows
|
||||
print $data00 $data01
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @1@ then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != @1aa@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -198,7 +198,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -210,7 +210,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
import time
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 1, "ttlChangeOnWrite": 0}
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
self.ttl = 5
|
||||
self.dbname = "test"
|
||||
|
||||
def check_ttl_result(self):
|
||||
tdSql.execute(f'create database {self.dbname}')
|
||||
tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)')
|
||||
tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}')
|
||||
tdSql.query(f'show {self.dbname}.tables')
|
||||
tdSql.checkRows(2)
|
||||
|
||||
time.sleep(self.ttl + 2)
|
||||
tdSql.query(f'show {self.dbname}.tables')
|
||||
tdSql.checkRows(1)
|
||||
|
||||
def run(self):
|
||||
self.check_ttl_result()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,59 @@
|
|||
import time
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 3, "ttlChangeOnWrite": 1, "trimVDbIntervalSec": 360,
|
||||
"ttlFlushThreshold": 100, "ttlBatchDropNum": 10}
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
self.ttl = 5
|
||||
self.tables = 100
|
||||
self.dbname = "test"
|
||||
|
||||
def check_batch_drop_num(self):
|
||||
tdSql.execute(f'create database {self.dbname} vgroups 1')
|
||||
tdSql.execute(f'use {self.dbname}')
|
||||
tdSql.execute(f'create table stb(ts timestamp, c1 int) tags(t1 int)')
|
||||
for i in range(self.tables):
|
||||
tdSql.execute(f'create table t{i} using stb tags({i}) ttl {self.ttl}')
|
||||
|
||||
time.sleep(self.ttl * 2)
|
||||
tdSql.query('show tables')
|
||||
tdSql.checkRows(90)
|
||||
|
||||
def check_ttl_result(self):
|
||||
tdSql.execute(f'drop database if exists {self.dbname}')
|
||||
tdSql.execute(f'create database {self.dbname}')
|
||||
tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)')
|
||||
tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}')
|
||||
tdSql.query(f'show {self.dbname}.tables')
|
||||
tdSql.checkRows(2)
|
||||
|
||||
time.sleep(self.ttl)
|
||||
tdSql.execute(f'insert into {self.dbname}.t2 values(now, 1)');
|
||||
|
||||
time.sleep(self.ttl)
|
||||
tdSql.query(f'show {self.dbname}.tables')
|
||||
tdSql.checkRows(2)
|
||||
|
||||
time.sleep(self.ttl * 2)
|
||||
tdSql.query(f'show {self.dbname}.tables')
|
||||
tdSql.checkRows(1)
|
||||
|
||||
def run(self):
|
||||
self.check_batch_drop_num()
|
||||
self.check_ttl_result()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -18,7 +18,7 @@ class TDTestCase:
|
|||
tdSql.init(conn.cursor(), logSql)
|
||||
self.buffer_boundary = [3, 4097, 8193, 12289, 16384]
|
||||
self.buffer_error = [self.buffer_boundary[0] -
|
||||
1, self.buffer_boundary[-1]+1, 256]
|
||||
1, self.buffer_boundary[-1]+1]
|
||||
# pages_boundary >= 64
|
||||
self.pages_boundary = [64, 128, 512]
|
||||
self.pages_error = [self.pages_boundary[0]-1]
|
||||
|
@ -47,15 +47,40 @@ class TDTestCase:
|
|||
tdSql.execute('create database db')
|
||||
tdSql.query(
|
||||
'select * from information_schema.ins_databases where name = "db"')
|
||||
self.pages_error.append(tdSql.queryResult[0][10])
|
||||
# self.pages_error.append(tdSql.queryResult[0][10])
|
||||
for pages in self.pages_error:
|
||||
tdSql.error(f'alter database db pages {pages}')
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def alter_same_options(self):
|
||||
tdSql.execute('drop database if exists db')
|
||||
tdSql.execute('create database db')
|
||||
tdSql.query('select * from information_schema.ins_databases where name = "db"')
|
||||
|
||||
db_options_items = ["replica","keep","buffer","pages","minrows","cachemodel","cachesize","wal_level","wal_fsync_period",
|
||||
"wal_retention_period","wal_retention_size","stt_trigger"]
|
||||
db_options_result_idx = [4,7,8,10,11,18,19,20,21,22,23,24]
|
||||
|
||||
self.option_result = []
|
||||
for idx in db_options_result_idx:
|
||||
self.option_result.append(tdSql.queryResult[0][idx])
|
||||
|
||||
index = 0
|
||||
for option in db_options_items:
|
||||
if option == "cachemodel":
|
||||
option_sql = "alter database db %s '%s'" % (option, self.option_result[index] )
|
||||
else:
|
||||
option_sql = "alter database db %s %s" % (option, self.option_result[index] )
|
||||
tdLog.debug(option_sql)
|
||||
tdSql.query(option_sql)
|
||||
index += 1
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def run(self):
|
||||
|
||||
self.alter_buffer()
|
||||
self.alter_pages()
|
||||
self.alter_same_options()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -220,7 +220,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -99,7 +99,7 @@ class TDTestCase:
|
|||
tdSql.query(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 0);")
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0], 0)
|
||||
tdSql.query(f"select to_unixtimestamp('1970-01-01 00:00:00', 1);")
|
||||
tdSql.checkData(0, 0, '1970-01-01 00:00:00')
|
||||
tdSql.checkData(0, 0, datetime.datetime(1970, 1, 1, 0, 0, 0))
|
||||
tdSql.error(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 2);")
|
||||
tdSql.error(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 1.5);")
|
||||
tdSql.error(f"select to_unixtimestamp('1970-01-01 08:00:00+08:00', 'abc');")
|
||||
|
|
|
@ -220,7 +220,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -269,7 +269,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -88,15 +88,15 @@ class TDTestCase:
|
|||
# restart all taosd
|
||||
tdDnodes=cluster.dnodes
|
||||
|
||||
tdDnodes[1].stoptaosd()
|
||||
# tdDnodes[1].stoptaosd()
|
||||
tdDnodes[2].stoptaosd()
|
||||
|
||||
tdLog.info("check whether 2 mnode status is offline")
|
||||
clusterComCheck.check3mnode2off()
|
||||
tdLog.info("check whether 1 mnode status is offline")
|
||||
clusterComCheck.check3mnodeoff(3)
|
||||
# tdSql.error("create user user1 pass '123';")
|
||||
|
||||
tdLog.info("start two follower")
|
||||
tdDnodes[1].starttaosd()
|
||||
tdLog.info("start follower")
|
||||
# tdDnodes[1].starttaosd()
|
||||
tdDnodes[2].starttaosd()
|
||||
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
|
|
@ -245,8 +245,11 @@ class ClusterComCheck:
|
|||
tdLog.exit(f"vgroup number of {db_name} is not correct")
|
||||
if self.db_replica == 1 :
|
||||
if tdSql.queryResult[0][4] == 'leader' and tdSql.queryResult[1][4] == 'leader' and tdSql.queryResult[last_number][4] == 'leader':
|
||||
tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';")
|
||||
print("db replica :",tdSql.queryResult[0][0])
|
||||
if tdSql.queryResult[0][0] == db_replica:
|
||||
ready_time= (count + 1)
|
||||
tdLog.success(f"all vgroups of {db_name} are leaders in {count + 1} s")
|
||||
tdLog.success(f"all vgroups with replica {self.db_replica} of {db_name} are leaders in {count + 1} s")
|
||||
return True
|
||||
count+=1
|
||||
elif self.db_replica == 3 :
|
||||
|
@ -255,13 +258,16 @@ class ClusterComCheck:
|
|||
vgroup_status_last=[tdSql.queryResult[last_number][4],tdSql.queryResult[last_number][6],tdSql.queryResult[last_number][8]]
|
||||
if vgroup_status_first.count('leader') == 1 and vgroup_status_first.count('follower') == 2:
|
||||
if vgroup_status_last.count('leader') == 1 and vgroup_status_last.count('follower') == 2:
|
||||
tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';")
|
||||
print("db replica :",tdSql.queryResult[0][0])
|
||||
if tdSql.queryResult[0][0] == db_replica:
|
||||
ready_time= (count + 1)
|
||||
tdLog.success(f"elections of {db_name}.vgroups are ready in {ready_time} s")
|
||||
tdLog.success(f"elections of {db_name}.vgroups with replica {self.db_replica} are ready in {ready_time} s")
|
||||
return True
|
||||
count+=1
|
||||
else:
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.notice(f"elections of {db_name} all vgroups are failed in {count} s ")
|
||||
tdLog.notice(f"elections of {db_name} all vgroups with replica {self.db_replica} are failed in {count} s ")
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno)
|
||||
tdLog.exit("%s(%d) failed " % args)
|
||||
|
|
|
@ -177,7 +177,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from %s"%stableName)
|
||||
tdSql.checkData(0,0,rowsPerStb)
|
||||
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica1,db_name=paraDict["dbName"],count_number=40)
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica1,db_name=paraDict["dbName"],count_number=100)
|
||||
sleep(5)
|
||||
tdLog.info(f"show transactions;alter database db0_0 replica {replica3};")
|
||||
TdSqlEx.execute(f'show transactions;')
|
||||
|
|
|
@ -94,7 +94,7 @@ class TDTestCase:
|
|||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
# tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
|
@ -336,7 +336,7 @@ class TDTestCase:
|
|||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows > expectrowcnt or totalConsumeRows <= 0:
|
||||
if totalConsumeRows > expectrowcnt or totalConsumeRows < 0:
|
||||
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
|
|
|
@ -218,7 +218,7 @@ class TDTestCase:
|
|||
|
||||
actConsumeTotalRows = resultList[0]
|
||||
|
||||
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
|
||||
if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
|
|
@ -216,7 +216,7 @@ class TDTestCase:
|
|||
|
||||
actConsumeTotalRows = resultList[0]
|
||||
tdLog.info("act consume rows: %d, expect rows range (0, %d)"%(actConsumeTotalRows, totalRowsInserted))
|
||||
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
|
||||
if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
|
|
@ -218,7 +218,7 @@ class TDTestCase:
|
|||
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
if not (actConsumeTotalRows > 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
||||
time.sleep(10)
|
||||
|
|
|
@ -216,7 +216,7 @@ class TDTestCase:
|
|||
|
||||
actConsumeTotalRows = resultList[0]
|
||||
|
||||
if not (actConsumeTotalRows > 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
|
|
@ -217,7 +217,7 @@ class TDTestCase:
|
|||
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
if not ((actConsumeTotalRows > 0) and (actConsumeTotalRows <= totalRowsInserted)):
|
||||
if not ((actConsumeTotalRows >= 0) and (actConsumeTotalRows <= totalRowsInserted)):
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
||||
time.sleep(10)
|
||||
|
|
|
@ -211,6 +211,15 @@ class TDTestCase:
|
|||
time.sleep(1)
|
||||
continue
|
||||
|
||||
# write data again when consumer stopped to make sure some data aren't consumed
|
||||
pInsertDataAgainThread = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
pInsertDataAgainThread.join()
|
||||
tdLog.info("firstly call to flash database when writing data second time")
|
||||
tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||
time.sleep(self.walRetentionPeriod + 1)
|
||||
tdLog.info("secondely call to flash database when writing data second time")
|
||||
tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||
|
||||
with self.lock:
|
||||
self.retryPoll = 0
|
||||
currentTime = datetime.now()
|
||||
|
@ -219,11 +228,10 @@ class TDTestCase:
|
|||
paraDict["startTs"] = 1640966400000 + paraDict["ctbNum"] * paraDict["rowsPerTbl"]
|
||||
pThread3 = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
|
||||
|
||||
tdLog.debug("wait sub-thread to end insert data")
|
||||
pThread3.join()
|
||||
|
||||
totalInsertRows = paraDict["ctbNum"] * paraDict["rowsPerTbl"] * 2
|
||||
totalInsertRows = paraDict["ctbNum"] * paraDict["rowsPerTbl"] * 3
|
||||
tdLog.debug("wait sub-thread to end consume data")
|
||||
pThread2.join()
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ class TDTestCase:
|
|||
self.db_name = "tmq_db"
|
||||
self.topic_name = "tmq_topic"
|
||||
self.stable_name = "tmqst"
|
||||
self.prepareData()
|
||||
|
||||
|
||||
def prepareData(self):
|
||||
|
@ -73,8 +74,9 @@ class TDTestCase:
|
|||
return consumer
|
||||
|
||||
def test_seek_and_committed_position_with_autocommit(self):
|
||||
"""Check the position and committed offset of the topic for autocommit scenario
|
||||
"""
|
||||
try:
|
||||
self.prepareData()
|
||||
inputDict = {
|
||||
"topic_name": self.topic_name,
|
||||
"group_id": "1",
|
||||
|
@ -95,18 +97,26 @@ class TDTestCase:
|
|||
|
||||
partitions = consumer.assignment()
|
||||
position_partitions = consumer.position(partitions)
|
||||
tdLog.info("position_partitions: %s"%(position_partitions))
|
||||
for i in range(len(position_partitions)):
|
||||
tdLog.info("position_partitions[%s].offset: %s"%(i, position_partitions[i].offset))
|
||||
committed_partitions = consumer.committed(partitions)
|
||||
tdLog.info("committed_partitions: %s"%(committed_partitions))
|
||||
origin_committed_position = []
|
||||
for i in range(len(committed_partitions)):
|
||||
tdLog.info("committed_partitions[%s].offset: %s"%(i, committed_partitions[i].offset))
|
||||
origin_committed_position.append(committed_partitions[i].offset)
|
||||
assert(len(position_partitions) == len(committed_partitions))
|
||||
for i in range(len(position_partitions)):
|
||||
assert(position_partitions[i].offset == committed_partitions[i].offset)
|
||||
# seek to the beginning of the topic
|
||||
|
||||
# seek to the specified offset of the topic, then check position and committed offset
|
||||
for partition in partitions:
|
||||
partition.offset = 5
|
||||
consumer.seek(partition)
|
||||
position_partitions = consumer.position(partitions)
|
||||
for i in range(len(position_partitions)):
|
||||
assert(position_partitions[i].offset == 5)
|
||||
committed_partitions = consumer.committed(partitions)
|
||||
for i in range(len(committed_partitions)):
|
||||
assert(committed_partitions[i].offset != 5 and committed_partitions[i].offset == origin_committed_position[i])
|
||||
except Exception as ex:
|
||||
raise Exception("Failed to test seek and committed position with autocommit with error: {}".format(str(ex)))
|
||||
finally:
|
||||
|
@ -114,12 +124,81 @@ class TDTestCase:
|
|||
consumer.close()
|
||||
|
||||
def test_commit_by_offset(self):
|
||||
pass
|
||||
"""Check the position and committed offset of the topic for commit by offset scenario
|
||||
"""
|
||||
try:
|
||||
inputDict = {
|
||||
"topic_name": self.topic_name,
|
||||
"group_id": "1",
|
||||
"auto_commit": "false",
|
||||
"offset_reset": "earliest"
|
||||
}
|
||||
consumer = self.tmqSubscribe(inputDict)
|
||||
origin_committed_position = []
|
||||
while(True):
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
break
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
partitions = consumer.assignment()
|
||||
consumer.commit(offsets=partitions)
|
||||
val = res.value()
|
||||
for block in val:
|
||||
tdLog.info("block.fetchall() number: %s"%(len(block.fetchall())))
|
||||
position_partitions = consumer.position(partitions)
|
||||
committed_partitions = consumer.committed(partitions)
|
||||
for i in range(len(position_partitions)):
|
||||
assert(position_partitions[i].offset == committed_partitions[i].offset)
|
||||
committed_partitions = consumer.committed(partitions)
|
||||
for i in range(len(committed_partitions)):
|
||||
origin_committed_position.append(committed_partitions[i].offset)
|
||||
tdLog.info("original committed_partitions[%s].offset: %s"%(i, committed_partitions[i].offset))
|
||||
# seek to the specified offset of the topic, then check position and committed offset
|
||||
for partition in partitions:
|
||||
partition.offset = 2
|
||||
consumer.seek(partition)
|
||||
position_partitions = consumer.position(partitions)
|
||||
for i in range(len(position_partitions)):
|
||||
assert(position_partitions[i].offset == 2)
|
||||
committed_partitions = consumer.committed(partitions)
|
||||
for i in range(len(committed_partitions)):
|
||||
tdLog.info("after seek committed_partitions[%s].offset: %s"%(i, committed_partitions[i].offset))
|
||||
assert(committed_partitions[i].offset != 2 and committed_partitions[i].offset == origin_committed_position[i])
|
||||
# continue to consume data from seek offset
|
||||
while(True):
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
break
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
partitions = consumer.assignment()
|
||||
# commit by offset
|
||||
consumer.commit(offsets=partitions)
|
||||
val = res.value()
|
||||
for block in val:
|
||||
tdLog.info("block.fetchall() number: %s"%(len(block.fetchall())))
|
||||
partitions = consumer.assignment()
|
||||
position_partitions = consumer.position(partitions)
|
||||
committed_partitions = consumer.committed(partitions)
|
||||
assert(len(position_partitions) == len(committed_partitions))
|
||||
for i in range(len(position_partitions)):
|
||||
assert(position_partitions[i].offset == committed_partitions[i].offset)
|
||||
except Exception as ex:
|
||||
raise Exception("Failed to test commit by offset with error: {}".format(str(ex)))
|
||||
finally:
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
|
||||
def run(self):
|
||||
self.test_seek_and_committed_position_with_autocommit()
|
||||
self.test_commit_by_offset()
|
||||
|
||||
def stop(self):
|
||||
tdSql.execute("drop topic %s" % self.topic_name)
|
||||
tdSql.execute("drop database %s"%(self.db_name))
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None):
|
||||
tdLog.info(f"*** testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
elif partition is None:
|
||||
partition_elm_alias = '"no_partition"'
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or partition is None:
|
||||
if case_when:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition} {partition_elm_alias}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
start_time = self.tdCom.date_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value)
|
||||
if i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value)
|
||||
if i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
|
||||
if self.tdCom.subtable:
|
||||
for tname in [self.stb_name, self.ctb_name]:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition is None:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition is None:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
if fill_value:
|
||||
end_date_time = self.tdCom.date_time
|
||||
final_range_count = self.tdCom.range_count
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
self.tdCom.date_time = start_time
|
||||
# update
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
self.tdCom.date_time = start_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
self.tdCom.date_time += 1
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL")
|
||||
# for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,209 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False):
|
||||
tdLog.info(f"*** testing stream at_once+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, delete: {delete}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||
if use_except:
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm or len(stb_field_name_value.split(",")) == len(self.tdCom.partitial_stb_filter_des_select_elm.split(",")):
|
||||
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||
else:
|
||||
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||
else:
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||
else:
|
||||
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||
|
||||
if stb_field_name_value is not None:
|
||||
if len(stb_field_name_value) == 0:
|
||||
stb_field_name_value = ",".join(self.tdCom.tb_filter_des_select_elm.split(",")[:5])
|
||||
# else:
|
||||
# stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
defined_tag_count = len(tag_value.split()) if tag_value is not None else 0
|
||||
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, ext_stb=use_exist_stb)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
elif partition == "tbname,t1,c1":
|
||||
partition_elm_alias = f'{self.tdCom.partition_tbname_alias},t1,c1'
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if subtable:
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if subtable == "constant":
|
||||
# stb_subtable_value = f'"{self.tdCom.ext_ctb_stream_des_table}"'
|
||||
stb_subtable_value = f'"constant_{self.tdCom.ext_ctb_stream_des_table}"'
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(cast({subtable} as int unsigned) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = None
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
# self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.ext_tb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||
if partition:
|
||||
stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except)
|
||||
else:
|
||||
stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except)
|
||||
if stream_sql:
|
||||
tdSql.error(stream_sql)
|
||||
return
|
||||
start_time = self.tdCom.date_time
|
||||
if subtable == "constant":
|
||||
range_count = 1
|
||||
else:
|
||||
range_count = self.tdCom.range_count
|
||||
|
||||
for i in range(range_count):
|
||||
latency = 0
|
||||
tag_value_list = list()
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
if tag_value:
|
||||
if subtable == "constant":
|
||||
tdSql.query(f'select {tag_value} from constant_{self.tdCom.ext_ctb_stream_des_table}')
|
||||
else:
|
||||
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||
tag_value_list = tdSql.queryResult
|
||||
if not fill_value:
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
elif stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, cast(max(c2) as tinyint), cast(min(c1) as smallint) from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
else:
|
||||
if partition:
|
||||
if tag_value == self.tdCom.exchange_tag_filter_des_select_elm:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.partitial_tag_stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list)
|
||||
elif tag_value == self.tdCom.cast_tag_filter_des_select_elm:
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart')
|
||||
limit_row = tdSql.queryRows
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.cast_tag_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select cast(t1 as TINYINT UNSIGNED),cast(t2 as varchar(256)),cast(t3 as bool) from {self.stb_name} order by ts limit {limit_row}')
|
||||
tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};')
|
||||
while list(set(tdSql.queryResult)) != [(None, None, None, None, None, None, None, None, None, None)]:
|
||||
tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};')
|
||||
if latency < self.tdCom.default_interval:
|
||||
latency += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
return False
|
||||
tdSql.checkEqual(list(set(tdSql.queryResult)), [(None, None, None, None, None, None, None, None, None, None)])
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list)
|
||||
else:
|
||||
if use_exist_stb and not tag_value:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, use_exist_stb=use_exist_stb)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, subtable=subtable)
|
||||
|
||||
if subtable:
|
||||
for tname in [self.stb_name]:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
else:
|
||||
tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))')
|
||||
subtable_value = tdSql.queryResult[0][0]
|
||||
if subtable == "constant":
|
||||
return
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
def run(self):
|
||||
# self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
for delete in [True, False]:
|
||||
for fill_history_value in [0, 1]:
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.partitial_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.exchange_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
# self-define tag
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.partitial_tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.exchange_tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition="t1 as t5,t2 as t11,t3 as t13", subtable=None, stb_field_name_value=None, tag_value="t5,t11,t13", use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t1", use_exist_stb=True)
|
||||
# error cases
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value="", tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm.replace("c1","c19"), tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="ttt", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t15", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="c5", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as t14", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as c13", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,c13", use_exist_stb=True, use_except=True)
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,223 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_session(self, session, ignore_expired=None, ignore_update=None, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True):
|
||||
tdLog.info(f"*** testing stream at_once+interval: session: {session}, ignore_expired: {ignore_expired}, ignore_update: {ignore_update}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, case_when: {case_when}, subtable: {subtable} ***")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(session=session, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
if subtable:
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
else:
|
||||
partition_elm_alias = "constant"
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or subtable is None:
|
||||
if case_when:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if subtable:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if 'abs' in partition:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
|
||||
# create stb/ctb/tb stream
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||
for i in range(self.tdCom.range_count):
|
||||
ctb_name = self.tdCom.get_long_name()
|
||||
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||
|
||||
if i == 0:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session)
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + 1
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session)
|
||||
if i == 0:
|
||||
record_window_close_ts = window_close_ts
|
||||
for ts_value in [self.tdCom.date_time, window_close_ts]:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True)
|
||||
if self.delete and i%2 != 0:
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||
ts_value += 1
|
||||
|
||||
# check result
|
||||
if partition != "tbname":
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True)
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;')
|
||||
else:
|
||||
for tbname in [self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
|
||||
if self.tdCom.disorder:
|
||||
if ignore_expired:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s')
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkEqual(res1, res2)
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s')
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkEqual(res1, res2)
|
||||
else:
|
||||
if ignore_update:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||
if partition != "tbname":
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True)
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;')
|
||||
else:
|
||||
for tbname in [self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
|
||||
if fill_history_value:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.record_history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.record_history_ts)
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-"))
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-"))
|
||||
|
||||
if self.tdCom.subtable:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if c1_value[1] is not None:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
if subtable:
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if c1_value[1] is not None:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
if subtable:
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_session(session=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, delete=True, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||
for subtable in [None, True]:
|
||||
self.at_once_session(session=random.randint(10, 15), subtable=subtable, partition="abs(c1)")
|
||||
for ignore_expired in [None, 0, 1]:
|
||||
for fill_history_value in [None, 1]:
|
||||
self.at_once_session(session=random.randint(10, 15), ignore_expired=ignore_expired, fill_history_value=fill_history_value)
|
||||
for fill_history_value in [None, 1]:
|
||||
self.at_once_session(session=random.randint(10, 15), partition="tbname", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), partition="c1", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, subtable=None, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), ignore_update=1, fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,144 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_state_window(self, state_window, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True):
|
||||
tdLog.info(f"*** testing stream at_once+interval: state_window: {state_window}, partition: {partition}, fill_history: {fill_history_value}, case_when: {case_when}***, delete: {delete}")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(state_window=state_window, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
|
||||
if partition == "tbname":
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1" and subtable is not None:
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "c1" and subtable is None:
|
||||
partition_elm_alias = 'constant'
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or subtable is None:
|
||||
if partition == "tbname":
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if 'abs' in partition:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
state_window_col_name = self.tdCom.dataDict["state_window"]
|
||||
if case_when:
|
||||
stream_state_window = case_when
|
||||
else:
|
||||
stream_state_window = state_window_col_name
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||
range_times = self.tdCom.range_count
|
||||
state_window_max = self.tdCom.dataDict['state_window_max']
|
||||
for i in range(range_times):
|
||||
state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times))
|
||||
for i in range(2, range_times+3):
|
||||
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.delete and i%2 != 0:
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
tdSql.execute(f'delete from {self.ctb_name} where ts = {dt}')
|
||||
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.delete and i%2 != 0:
|
||||
tdSql.execute(f'delete from {self.tb_name} where ts = {dt}')
|
||||
self.tdCom.date_time += 1
|
||||
|
||||
# for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True)
|
||||
|
||||
if fill_history_value:
|
||||
self.tdCom.update_delete_history_data(self.delete)
|
||||
|
||||
if self.tdCom.subtable:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
if subtable:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
return
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
if subtable:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
return
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_state_window(state_window="c2", partition="tbname", case_when="case when c1 < 0 then c1 else c2 end")
|
||||
self.at_once_state_window(state_window="c1", partition="tbname", case_when="case when c1 >= 0 then c1 else c2 end")
|
||||
for fill_history_value in [None, 1]:
|
||||
self.at_once_state_window(state_window="c1", partition="tbname", fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="c1", fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="abs(c1)", fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="tbname", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="c1", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="abs(c1)", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="c1", subtable=None, fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue