fix:conflicts from 3.0
This commit is contained in:
commit
ef87b9c571
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.1.1.0.alpha")
|
SET(TD_VER_NUMBER "3.2.0.0.alpha")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# cos
|
# cos
|
||||||
ExternalProject_Add(mxml
|
ExternalProject_Add(mxml
|
||||||
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
||||||
GIT_TAG release-2.12
|
GIT_TAG v2.12
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
||||||
#BINARY_DIR ""
|
#BINARY_DIR ""
|
||||||
BUILD_IN_SOURCE TRUE
|
BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -0,0 +1,440 @@
|
||||||
|
---
|
||||||
|
sidebar_label: Seeq
|
||||||
|
title: Seeq
|
||||||
|
description: How to use Seeq and TDengine to perform time series data analysis
|
||||||
|
---
|
||||||
|
|
||||||
|
# How to use Seeq and TDengine to perform time series data analysis
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
|
||||||
|
|
||||||
|
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||||
|
|
||||||
|
### Install Seeq
|
||||||
|
|
||||||
|
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
||||||
|
|
||||||
|
### Install and start Seeq Server
|
||||||
|
|
||||||
|
```
|
||||||
|
tar xvzf seeq-server-xxx.tar.gz
|
||||||
|
cd seeq-server-installer
|
||||||
|
sudo ./install
|
||||||
|
|
||||||
|
sudo seeq service enable
|
||||||
|
sudo seeq start
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install and start Seeq Data Lab Server
|
||||||
|
|
||||||
|
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
|
||||||
|
|
||||||
|
```
|
||||||
|
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||||
|
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||||
|
sudo seeq config set Network/DataLab/Hostname localhost
|
||||||
|
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||||
|
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||||
|
|
||||||
|
# If the main Seeq server is configured to listen over HTTPS
|
||||||
|
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||||
|
|
||||||
|
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||||
|
sudo seeq config set Network/Webserver/Port <value>
|
||||||
|
|
||||||
|
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||||
|
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||||
|
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install TDengine on-premise instance
|
||||||
|
|
||||||
|
See [Quick Install from Package](../../get-started).
|
||||||
|
|
||||||
|
### Or use TDengine Cloud
|
||||||
|
|
||||||
|
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
|
||||||
|
|
||||||
|
## Make Seeq be able to access TDengine
|
||||||
|
|
||||||
|
1. Get data location configuration
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo seeq config get Folders/Data
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
|
||||||
|
|
||||||
|
3. Restart Seeq server
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo seeq restart
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Input License
|
||||||
|
|
||||||
|
Use a browser to access ip:34216 and input the license according to the guide.
|
||||||
|
|
||||||
|
## How to use Seeq to analyze time-series data that TDengine serves
|
||||||
|
|
||||||
|
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
|
||||||
|
|
||||||
|
### Scenario Overview
|
||||||
|
|
||||||
|
The example scenario involves a power system where users collect electricity consumption data from metering devices at a power station on a daily basis. This data is stored in a TDengine cluster. The user now wants to predict how the electricity consumption will develop and purchase additional equipment to support it. The electricity consumption varies with monthly orders, and seasonal variations also affect the power consumption. Since the city is located in the Northern Hemisphere, more electricity is consumed during the summer. We will use simulated data to reflect these assumptions.
|
||||||
|
|
||||||
|
### Schema
|
||||||
|
|
||||||
|
```
|
||||||
|
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||||
|
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||||
|
```
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Mock data
|
||||||
|
|
||||||
|
```
|
||||||
|
python mockdata.py
|
||||||
|
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||||
|
```
|
||||||
|
|
||||||
|
The source code is hosted at [GitHub Repository](https://github.com/sangshuduo/td-forecasting).
|
||||||
|
|
||||||
|
### Using Seeq for data analysis
|
||||||
|
|
||||||
|
#### Data Source configuration
|
||||||
|
|
||||||
|
Please login with Seeq administrator and create a few data sources as following.
|
||||||
|
|
||||||
|
- Power
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"QueryDefinitions": [
|
||||||
|
{
|
||||||
|
"Name": "PowerNum",
|
||||||
|
"Type": "SIGNAL",
|
||||||
|
"Sql": "SELECT ts, num FROM meters",
|
||||||
|
"Enabled": true,
|
||||||
|
"TestMode": false,
|
||||||
|
"TestQueriesDuringSync": true,
|
||||||
|
"InProgressCapsulesEnabled": false,
|
||||||
|
"Variables": null,
|
||||||
|
"Properties": [
|
||||||
|
{
|
||||||
|
"Name": "Name",
|
||||||
|
"Value": "Num",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Interpolation Method",
|
||||||
|
"Value": "linear",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Maximum Interpolation",
|
||||||
|
"Value": "2day",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"CapsuleProperties": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Type": "GENERIC",
|
||||||
|
"Hostname": null,
|
||||||
|
"Port": 0,
|
||||||
|
"DatabaseName": null,
|
||||||
|
"Username": "root",
|
||||||
|
"Password": "taosdata",
|
||||||
|
"InitialSql": null,
|
||||||
|
"TimeZone": null,
|
||||||
|
"PrintRows": false,
|
||||||
|
"UseWindowsAuth": false,
|
||||||
|
"SqlFetchBatchSize": 100000,
|
||||||
|
"UseSSL": false,
|
||||||
|
"JdbcProperties": null,
|
||||||
|
"GenericDatabaseConfig": {
|
||||||
|
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||||
|
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||||
|
"ResolutionInNanoseconds": 1000,
|
||||||
|
"ZonedColumnTypes": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- Goods
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"QueryDefinitions": [
|
||||||
|
{
|
||||||
|
"Name": "PowerGoods",
|
||||||
|
"Type": "CONDITION",
|
||||||
|
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
|
||||||
|
"Enabled": true,
|
||||||
|
"TestMode": false,
|
||||||
|
"TestQueriesDuringSync": true,
|
||||||
|
"InProgressCapsulesEnabled": false,
|
||||||
|
"Variables": null,
|
||||||
|
"Properties": [
|
||||||
|
{
|
||||||
|
"Name": "Name",
|
||||||
|
"Value": "Goods",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Maximum Duration",
|
||||||
|
"Value": "10days",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"CapsuleProperties": [
|
||||||
|
{
|
||||||
|
"Name": "goods",
|
||||||
|
"Value": "${columnResult}",
|
||||||
|
"Column": "goods",
|
||||||
|
"Uom": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Type": "GENERIC",
|
||||||
|
"Hostname": null,
|
||||||
|
"Port": 0,
|
||||||
|
"DatabaseName": null,
|
||||||
|
"Username": "root",
|
||||||
|
"Password": "taosdata",
|
||||||
|
"InitialSql": null,
|
||||||
|
"TimeZone": null,
|
||||||
|
"PrintRows": false,
|
||||||
|
"UseWindowsAuth": false,
|
||||||
|
"SqlFetchBatchSize": 100000,
|
||||||
|
"UseSSL": false,
|
||||||
|
"JdbcProperties": null,
|
||||||
|
"GenericDatabaseConfig": {
|
||||||
|
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||||
|
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||||
|
"ResolutionInNanoseconds": 1000,
|
||||||
|
"ZonedColumnTypes": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- Temperature
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"QueryDefinitions": [
|
||||||
|
{
|
||||||
|
"Name": "PowerNum",
|
||||||
|
"Type": "SIGNAL",
|
||||||
|
"Sql": "SELECT ts, temperature FROM meters",
|
||||||
|
"Enabled": true,
|
||||||
|
"TestMode": false,
|
||||||
|
"TestQueriesDuringSync": true,
|
||||||
|
"InProgressCapsulesEnabled": false,
|
||||||
|
"Variables": null,
|
||||||
|
"Properties": [
|
||||||
|
{
|
||||||
|
"Name": "Name",
|
||||||
|
"Value": "Temperature",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Interpolation Method",
|
||||||
|
"Value": "linear",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Maximum Interpolation",
|
||||||
|
"Value": "2day",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"CapsuleProperties": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Type": "GENERIC",
|
||||||
|
"Hostname": null,
|
||||||
|
"Port": 0,
|
||||||
|
"DatabaseName": null,
|
||||||
|
"Username": "root",
|
||||||
|
"Password": "taosdata",
|
||||||
|
"InitialSql": null,
|
||||||
|
"TimeZone": null,
|
||||||
|
"PrintRows": false,
|
||||||
|
"UseWindowsAuth": false,
|
||||||
|
"SqlFetchBatchSize": 100000,
|
||||||
|
"UseSSL": false,
|
||||||
|
"JdbcProperties": null,
|
||||||
|
"GenericDatabaseConfig": {
|
||||||
|
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||||
|
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||||
|
"ResolutionInNanoseconds": 1000,
|
||||||
|
"ZonedColumnTypes": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Launch Seeq Workbench
|
||||||
|
|
||||||
|
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
#### Use Seeq Data Lab Server for advanced data analysis
|
||||||
|
|
||||||
|
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||||
|
|
||||||
|
```Python
|
||||||
|
from seeq import spy
|
||||||
|
spy.options.compatibility = 189
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import mlforecast
|
||||||
|
import lightgbm as lgb
|
||||||
|
from mlforecast.target_transforms import Differences
|
||||||
|
from sklearn.linear_model import LinearRegression
|
||||||
|
|
||||||
|
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
|
||||||
|
print(ds)
|
||||||
|
|
||||||
|
sig = ds.loc[ds['Name'].isin(['Num'])]
|
||||||
|
print(sig)
|
||||||
|
|
||||||
|
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
|
||||||
|
print("data.info()")
|
||||||
|
data.info()
|
||||||
|
print(data)
|
||||||
|
#data.plot()
|
||||||
|
|
||||||
|
print("data[Num].info()")
|
||||||
|
data['Num'].info()
|
||||||
|
da = data['Num'].index.tolist()
|
||||||
|
#print(da)
|
||||||
|
|
||||||
|
li = data['Num'].tolist()
|
||||||
|
#print(li)
|
||||||
|
|
||||||
|
data2 = pd.DataFrame()
|
||||||
|
data2['ds'] = da
|
||||||
|
print('1st data2 ds info()')
|
||||||
|
data2['ds'].info()
|
||||||
|
|
||||||
|
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
|
||||||
|
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
|
||||||
|
data2['y'] = li
|
||||||
|
print('2nd data2 ds info()')
|
||||||
|
data2['ds'].info()
|
||||||
|
print(data2)
|
||||||
|
|
||||||
|
data2.insert(0, column = "unique_id", value="unique_id")
|
||||||
|
|
||||||
|
print("Forecasting ...")
|
||||||
|
|
||||||
|
forecast = mlforecast.MLForecast(
|
||||||
|
models = lgb.LGBMRegressor(),
|
||||||
|
freq = 1,
|
||||||
|
lags=[365],
|
||||||
|
target_transforms=[Differences([365])],
|
||||||
|
)
|
||||||
|
|
||||||
|
forecast.fit(data2)
|
||||||
|
predicts = forecast.predict(365)
|
||||||
|
|
||||||
|
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
|
||||||
|
plt.show()
|
||||||
|
```
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### How to configure Seeq data source to access TDengine Cloud
|
||||||
|
|
||||||
|
Configuring a Seeq data source connection to TDengine Cloud or a local installation instance does not have any essential differences. After logging in to TDengine Cloud, select "Programming - Java" and copy the JDBC URL string with the token provided. Then, use this JDBC URL string to fill in the DatabaseJdbcUrl value in the Seeq Data Source configuration.
|
||||||
|
|
||||||
|
Please note that when using TDengine Cloud, you need to specify the database name in your SQL commands.
|
||||||
|
|
||||||
|
#### The data source of TDengine Cloud example
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"QueryDefinitions": [
|
||||||
|
{
|
||||||
|
"Name": "CloudVoltage",
|
||||||
|
"Type": "SIGNAL",
|
||||||
|
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||||
|
"Enabled": true,
|
||||||
|
"TestMode": false,
|
||||||
|
"TestQueriesDuringSync": true,
|
||||||
|
"InProgressCapsulesEnabled": false,
|
||||||
|
"Variables": null,
|
||||||
|
"Properties": [
|
||||||
|
{
|
||||||
|
"Name": "Name",
|
||||||
|
"Value": "Voltage",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Interpolation Method",
|
||||||
|
"Value": "linear",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "Maximum Interpolation",
|
||||||
|
"Value": "2day",
|
||||||
|
"Sql": null,
|
||||||
|
"Uom": "string"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"CapsuleProperties": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Type": "GENERIC",
|
||||||
|
"Hostname": null,
|
||||||
|
"Port": 0,
|
||||||
|
"DatabaseName": null,
|
||||||
|
"Username": "root",
|
||||||
|
"Password": "taosdata",
|
||||||
|
"InitialSql": null,
|
||||||
|
"TimeZone": null,
|
||||||
|
"PrintRows": false,
|
||||||
|
"UseWindowsAuth": false,
|
||||||
|
"SqlFetchBatchSize": 100000,
|
||||||
|
"UseSSL": false,
|
||||||
|
"JdbcProperties": null,
|
||||||
|
"GenericDatabaseConfig": {
|
||||||
|
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||||
|
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||||
|
"ResolutionInNanoseconds": 1000,
|
||||||
|
"ZonedColumnTypes": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Seeq Workbench with TDengine Cloud data source example
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||||
|
|
||||||
|
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||||
|
|
||||||
|
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.
|
Binary file not shown.
After Width: | Height: | Size: 13 KiB |
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
Binary file not shown.
After Width: | Height: | Size: 47 KiB |
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.1.1.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.1.1.0" />
|
||||||
|
|
||||||
## 3.1.0.3
|
## 3.1.0.3
|
||||||
|
|
||||||
<Release type="tdengine" version="3.1.0.3" />
|
<Release type="tdengine" version="3.1.0.3" />
|
||||||
|
|
|
@ -11,7 +11,11 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
|
||||||
|
|
||||||
## 安装
|
## 安装
|
||||||
|
|
||||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark
|
taosBenchmark 有两种安装方式:
|
||||||
|
|
||||||
|
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
|
||||||
|
|
||||||
|
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||||
|
|
||||||
## 运行
|
## 运行
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,12 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
|
||||||
|
|
||||||
## 安装
|
## 安装
|
||||||
|
|
||||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper
|
taosKeeper 有两种安装方式:
|
||||||
|
taosKeeper 安装方式:
|
||||||
|
|
||||||
|
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
|
||||||
|
|
||||||
|
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
|
||||||
|
|
||||||
## 配置和运行方式
|
## 配置和运行方式
|
||||||
|
|
||||||
|
|
|
@ -1,80 +0,0 @@
|
||||||
---
|
|
||||||
title: 集群运维
|
|
||||||
description: TDengine 提供了多种集群运维手段以使集群运行更健康更高效
|
|
||||||
---
|
|
||||||
|
|
||||||
为了使集群运行更健康更高效,TDengine 企业版提供了一些运维手段来帮助系统管理员更好地运维集群。
|
|
||||||
|
|
||||||
## 数据重整
|
|
||||||
|
|
||||||
TDengine 面向多种写入场景,在有些写入场景下,TDengine 的存储会导致数据存储的放大或数据文件的空洞等。这一方面影响数据的存储效率,另一方面也会影响查询效率。为了解决上述问题,TDengine 企业版提供了对数据的重整功能,即 DATA COMPACT 功能,将存储的数据文件重新整理,删除文件空洞和无效数据,提高数据的组织度,从而提高存储和查询的效率。
|
|
||||||
|
|
||||||
**语法**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
|
||||||
```
|
|
||||||
|
|
||||||
**效果**
|
|
||||||
|
|
||||||
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
|
||||||
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
|
||||||
- COMPACT 会合并多个 STT 文件
|
|
||||||
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
|
||||||
- 可通过 end with 关键字指定 COMPACT 数据的终止时间
|
|
||||||
|
|
||||||
**补充说明**
|
|
||||||
|
|
||||||
- COMPACT 为异步,执行 COMPACT 命令后不会等 COMPACT 结束就会返回。如果上一个 COMPACT 没有完成则再发起一个 COMPACT 任务,则会等上一个任务完成后再返回。
|
|
||||||
- COMPACT 可能阻塞写入,但不阻塞查询
|
|
||||||
- COMPACT 的进度不可观测
|
|
||||||
|
|
||||||
## 集群负载再平衡
|
|
||||||
|
|
||||||
当多副本集群中的一个或多个节点因为升级或其它原因而重启后,有可能出现集群中各个 dnode 负载不均衡的现象,极端情况下会出现所有 vgroup 的 leader 都位于同一个 dnode 的情况。为了解决这个问题,可以使用下面的命令
|
|
||||||
|
|
||||||
```sql
|
|
||||||
balance vgroup leader;
|
|
||||||
```
|
|
||||||
|
|
||||||
**功能**
|
|
||||||
|
|
||||||
让所有的 vgroup 的 leade r在各自的replica节点上均匀分布。这个命令会让 vgroup 强制重新选举,通过重新选举,在选举的过程中,变换 vgroup 的leader,通过这个方式,最终让leader均匀分布。
|
|
||||||
|
|
||||||
**注意**
|
|
||||||
|
|
||||||
Raft选举本身带有随机性,所以通过选举的重新分布产生的均匀分布也是带有一定的概率,不会完全的均匀。**该命令的副作用是影响查询和写入**,在vgroup重新选举时,从开始选举到选举出新的 leader 这段时间,这 个vgroup 无法写入和查询。选举过程一般在秒级完成。所有的vgroup会依次逐个重新选举。
|
|
||||||
|
|
||||||
## 恢复数据节点
|
|
||||||
|
|
||||||
在多节点三副本的集群环境中,如果某个 dnode 的磁盘损坏,该 dnode 会自动退出,但集群中其它的 dnode 仍然能够继续提供写入和查询服务。
|
|
||||||
|
|
||||||
在更换了损坏的磁盘后,如果想要让曾经主动退出的 dnode 重新加入集群提供服务,可以通过 `restore dnode` 命令来恢复该数据节点上的部分或全部逻辑节点,该功能依赖多副本中的其它副本进行数据复制,所以只在集群中 dnode 数量大于等于 3 且副本数为 3 的情况下能够工作。
|
|
||||||
|
|
||||||
|
|
||||||
```sql
|
|
||||||
restore dnode <dnode_id>;# 恢复dnode上的mnode,所有vnode和qnode
|
|
||||||
restore mnode on dnode <dnode_id>;# 恢复dnode上的mnode
|
|
||||||
restore vnode on dnode <dnode_id> ;# 恢复dnode上的所有vnode
|
|
||||||
restore qnode on dnode <dnode_id>;# 恢复dnode上的qnode
|
|
||||||
```
|
|
||||||
|
|
||||||
**限制**
|
|
||||||
- 该功能是基于已有的复制功能的恢复,不是灾难恢复或者备份恢复,所以对于要恢复的 mnode 和 vnode来说,使用该命令的前提是还存在该 mnode 或 vnode 的其它两个副本仍然能够正常工作。
|
|
||||||
- 该命令不能修复数据目录中的个别文件的损坏或者丢失。例如,如果某个 mnode 或者 vnode 中的个别文件或数据损坏,无法单独恢复损坏的某个文件或者某块数据。此时,可以选择将该 mnode/vnode 的数据全部清空再进行恢复。
|
|
||||||
|
|
||||||
|
|
||||||
## 虚拟组分裂 (Scale Out)
|
|
||||||
|
|
||||||
当一个 vgroup 因为子表数过多而导致 CPU 或 Disk 资源使用量负载过高时,增加 dnode 节点后,可通过 `split vgroup` 命令把该 vgroup 分裂为两个虚拟组。分裂完成后,新产生的两个 vgroup 承担原来由一个 vgroup 提供的读写服务。这也是 TDengine 为企业版用户提供的 scale out 集群的能力。
|
|
||||||
|
|
||||||
```sql
|
|
||||||
split vgroup <vgroup_id>
|
|
||||||
```
|
|
||||||
|
|
||||||
**注意**
|
|
||||||
- 单副本库虚拟组,在分裂完成后,历史时序数据总磁盘空间使用量,可能会翻倍。所以,在执行该操作之前,通过增加 dnode 节点方式,确保集群中有足够的 CPU 和磁盘资源,避免资源不足现象发生。
|
|
||||||
- 该命令为 DB 级事务;执行过程,当前DB的其它管理事务将会被拒绝。集群中,其它DB不受影响。
|
|
||||||
- 分裂任务执行过程中,可持续提供读写服务;期间,可能存在可感知的短暂的读写业务中断。
|
|
||||||
- 在分裂过程中,不支持流和订阅。分裂结束后,历史 WAL 会清空。
|
|
||||||
- 分裂过程中,可支持节点宕机重启容错;但不支持节点磁盘故障容错。
|
|
|
@ -1,56 +0,0 @@
|
||||||
---
|
|
||||||
title: 多级存储
|
|
||||||
---
|
|
||||||
|
|
||||||
## 多级存储
|
|
||||||
|
|
||||||
说明:多级存储功能仅企业版支持。
|
|
||||||
|
|
||||||
在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。
|
|
||||||
|
|
||||||
除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。
|
|
||||||
|
|
||||||
多级存储支持 3 级,每级最多可配置 16 个挂载点。
|
|
||||||
|
|
||||||
TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中):
|
|
||||||
|
|
||||||
```
|
|
||||||
dataDir [path] <level> <primary>
|
|
||||||
```
|
|
||||||
|
|
||||||
- path: 挂载点的文件夹路径
|
|
||||||
- level: 介质存储等级,取值为 0,1,2。
|
|
||||||
0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。
|
|
||||||
各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。
|
|
||||||
同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。
|
|
||||||
需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。
|
|
||||||
- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。
|
|
||||||
|
|
||||||
在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式:
|
|
||||||
|
|
||||||
```
|
|
||||||
dataDir /mnt/data1 0 1
|
|
||||||
dataDir /mnt/data2 0 0
|
|
||||||
dataDir /mnt/data3 1 0
|
|
||||||
dataDir /mnt/data4 1 0
|
|
||||||
dataDir /mnt/data5 2 0
|
|
||||||
dataDir /mnt/data6 2 0
|
|
||||||
```
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。
|
|
||||||
2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。
|
|
||||||
3. 多级存储目前不支持删除已经挂载的硬盘的功能。
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## 0 级负载均衡
|
|
||||||
|
|
||||||
在多级存储中,有且只有一个主挂载点,主挂载点承担了系统中最重要的元数据在座,同时各个 vnode 的主目录均存在于当前 dnode 主挂载点上,从而导致该 dnode 的写入性能受限于单个磁盘的 IO 吞吐能力。
|
|
||||||
|
|
||||||
从 TDengine 3.1.0.0 开始,如果一个 dnode 配置了多个 0 级挂载点,我们将该 dnode 上所有 vnode 的主目录均衡分布在所有的 0 级挂载点上,由这些 0 级挂载点共同承担写入负荷。在网络 I/O 及其它处理资源不成为瓶颈的情况下,通过优化集群配置,测试结果证明整个系统的写入能力和 0 级挂载点的数量呈现线性关系,即随着 0 级挂载点数量的增加,整个系统的写入能力也成倍增加。
|
|
||||||
|
|
||||||
## 同级挂载点选择策略
|
|
||||||
|
|
||||||
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 `minDiskFreeSize`,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。
|
|
|
@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
||||||
1. Linux 操作系统
|
1. Linux 操作系统
|
||||||
2. 已安装 Java 8 和 Maven
|
2. 已安装 Java 8 和 Maven
|
||||||
3. 已安装 Git、curl、vi
|
3. 已安装 Git、curl、vi
|
||||||
4. 已安装并启动 TDengine。
|
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../operation/pkg-install)
|
||||||
|
|
||||||
## 安装 Kafka
|
## 安装 Kafka
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在
|
||||||
|
|
||||||
### Seeq 安装方法
|
### Seeq 安装方法
|
||||||
|
|
||||||
从 (Seeq 官网)[https://www.seeq.com/customer-download]下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||||
|
|
||||||
### Seeq Server 安装和启动
|
### Seeq Server 安装和启动
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ sudo seeq start
|
||||||
|
|
||||||
### Seeq Data Lab Server 安装和启动
|
### Seeq Data Lab Server 安装和启动
|
||||||
|
|
||||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见(Seeq 官方文档)[https://support.seeq.com/space/KB/1034059842]。
|
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
|
||||||
|
|
||||||
```
|
```
|
||||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||||
|
@ -51,7 +51,7 @@ sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab serve
|
||||||
|
|
||||||
## TDengine 本地实例安装方法
|
## TDengine 本地实例安装方法
|
||||||
|
|
||||||
请参考(官网文档)[https://docs.taosdata.com/get-started/package/]。
|
请参考[官网文档](../../get-started)。
|
||||||
|
|
||||||
## TDengine Cloud 访问方法
|
## TDengine Cloud 访问方法
|
||||||
如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
|
如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
|
||||||
|
@ -64,7 +64,7 @@ sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab serve
|
||||||
sudo seeq config get Folders/Data
|
sudo seeq config get Folders/Data
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为(3.2.4)[https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar],并拷贝至 data 存储位置的 plugins\lib 中。
|
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为[3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar),并拷贝至 data 存储位置的 plugins\lib 中。
|
||||||
|
|
||||||
3. 重新启动 seeq server
|
3. 重新启动 seeq server
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS
|
||||||
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||||
```
|
```
|
||||||
|
|
||||||
!(Seeq demo schema)[./seeq/seeq-demo-schema.webp]
|

|
||||||
|
|
||||||
### 构造数据方法
|
### 构造数据方法
|
||||||
|
|
||||||
|
@ -99,7 +99,8 @@ CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||||
python mockdata.py
|
python mockdata.py
|
||||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||||
```
|
```
|
||||||
源代码托管在(github 仓库)[https://github.com/sangshuduo/td-forecasting]。
|
|
||||||
|
源代码托管在[GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。
|
||||||
|
|
||||||
### 使用 Seeq 进行数据分析
|
### 使用 Seeq 进行数据分析
|
||||||
|
|
||||||
|
@ -287,9 +288,9 @@ taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from
|
||||||
|
|
||||||
#### 使用 Seeq Workbench
|
#### 使用 Seeq Workbench
|
||||||
|
|
||||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见(官方知识库)[https://support.seeq.com/space/KB/146440193/Seeq+Workbench]。
|
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见[官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
|
||||||
|
|
||||||
!(Seeq Workbench)[./seeq/seeq-demo-workbench.webp]
|

|
||||||
|
|
||||||
#### 用 Seeq Data Lab Server 进行进一步的数据分析
|
#### 用 Seeq Data Lab Server 进行进一步的数据分析
|
||||||
|
|
||||||
|
@ -358,7 +359,7 @@ plt.show()
|
||||||
|
|
||||||
运行程序输出结果:
|
运行程序输出结果:
|
||||||
|
|
||||||
!(Seeq forecast result)[./seeq/seeq-forecast-result.webp]
|

|
||||||
|
|
||||||
### 配置 Seeq 数据源连接 TDengine Cloud
|
### 配置 Seeq 数据源连接 TDengine Cloud
|
||||||
|
|
||||||
|
@ -426,7 +427,7 @@ plt.show()
|
||||||
|
|
||||||
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
|
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
|
||||||
|
|
||||||
!(Seeq workbench with TDengine cloud)[./seeq/seeq-workbench-with-tdengine-cloud.webp]
|

|
||||||
|
|
||||||
## 方案总结
|
## 方案总结
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.1.1.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.1.1.0" />
|
||||||
|
|
||||||
## 3.1.0.3
|
## 3.1.0.3
|
||||||
|
|
||||||
<Release type="tdengine" version="3.1.0.3" />
|
<Release type="tdengine" version="3.1.0.3" />
|
||||||
|
|
|
@ -88,6 +88,7 @@ extern int64_t tsMndLogRetention;
|
||||||
extern int8_t tsGrant;
|
extern int8_t tsGrant;
|
||||||
extern int32_t tsMndGrantMode;
|
extern int32_t tsMndGrantMode;
|
||||||
extern bool tsMndSkipGrant;
|
extern bool tsMndSkipGrant;
|
||||||
|
extern bool tsEnableWhiteList;
|
||||||
|
|
||||||
// dnode
|
// dnode
|
||||||
extern int64_t tsDndStart;
|
extern int64_t tsDndStart;
|
||||||
|
|
|
@ -184,6 +184,8 @@ typedef enum _mgmt_table {
|
||||||
#define TSDB_ALTER_USER_REMOVE_WRITE_TABLE 0x10
|
#define TSDB_ALTER_USER_REMOVE_WRITE_TABLE 0x10
|
||||||
#define TSDB_ALTER_USER_ADD_ALL_TABLE 0x11
|
#define TSDB_ALTER_USER_ADD_ALL_TABLE 0x11
|
||||||
#define TSDB_ALTER_USER_REMOVE_ALL_TABLE 0x12
|
#define TSDB_ALTER_USER_REMOVE_ALL_TABLE 0x12
|
||||||
|
#define TSDB_ALTER_USER_ADD_WHITE_LIST 0x13
|
||||||
|
#define TSDB_ALTER_USER_DROP_WHITE_LIST 0x14
|
||||||
|
|
||||||
#define TSDB_ALTER_USER_PRIVILEGES 0x2
|
#define TSDB_ALTER_USER_PRIVILEGES 0x2
|
||||||
|
|
||||||
|
@ -418,10 +420,9 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
|
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
|
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
|
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
|
||||||
} ENodeType;
|
} ENodeType;
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
char* dbFName;
|
char* dbFName;
|
||||||
|
@ -874,29 +875,39 @@ typedef struct {
|
||||||
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||||
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||||
|
|
||||||
|
typedef struct SIpV4Range {
|
||||||
|
uint32_t ip;
|
||||||
|
uint32_t mask;
|
||||||
|
} SIpV4Range;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t createType;
|
int8_t createType;
|
||||||
int8_t superUser; // denote if it is a super user or not
|
int8_t superUser; // denote if it is a super user or not
|
||||||
int8_t sysInfo;
|
int8_t sysInfo;
|
||||||
int8_t enable;
|
int8_t enable;
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
char pass[TSDB_USET_PASSWORD_LEN];
|
char pass[TSDB_USET_PASSWORD_LEN];
|
||||||
|
int32_t numIpRanges;
|
||||||
|
SIpV4Range* pIpRanges;
|
||||||
} SCreateUserReq;
|
} SCreateUserReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||||
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||||
|
void tFreeSCreateUserReq(SCreateUserReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t alterType;
|
int8_t alterType;
|
||||||
int8_t superUser;
|
int8_t superUser;
|
||||||
int8_t sysInfo;
|
int8_t sysInfo;
|
||||||
int8_t enable;
|
int8_t enable;
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
char pass[TSDB_USET_PASSWORD_LEN];
|
char pass[TSDB_USET_PASSWORD_LEN];
|
||||||
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
||||||
char tabName[TSDB_TABLE_NAME_LEN];
|
char tabName[TSDB_TABLE_NAME_LEN];
|
||||||
char* tagCond;
|
char* tagCond;
|
||||||
int32_t tagCondLen;
|
int32_t tagCondLen;
|
||||||
|
int32_t numIpRanges;
|
||||||
|
SIpV4Range* pIpRanges;
|
||||||
} SAlterUserReq;
|
} SAlterUserReq;
|
||||||
|
|
||||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
|
@ -973,14 +984,12 @@ typedef struct {
|
||||||
int64_t offset;
|
int64_t offset;
|
||||||
} SInterval;
|
} SInterval;
|
||||||
|
|
||||||
|
|
||||||
typedef struct STbVerInfo {
|
typedef struct STbVerInfo {
|
||||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||||
int32_t sversion;
|
int32_t sversion;
|
||||||
int32_t tversion;
|
int32_t tversion;
|
||||||
} STbVerInfo;
|
} STbVerInfo;
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t code;
|
int32_t code;
|
||||||
int64_t affectedRows;
|
int64_t affectedRows;
|
||||||
|
@ -1203,13 +1212,13 @@ typedef struct {
|
||||||
int16_t sstTrigger;
|
int16_t sstTrigger;
|
||||||
} SDbCfgRsp;
|
} SDbCfgRsp;
|
||||||
|
|
||||||
typedef SDbCfgRsp SDbCfgInfo;
|
typedef SDbCfgRsp SDbCfgInfo;
|
||||||
|
|
||||||
int32_t tSerializeSDbCfgRspImpl(SEncoder *encoder, const SDbCfgRsp *pRsp);
|
int32_t tSerializeSDbCfgRspImpl(SEncoder* encoder, const SDbCfgRsp* pRsp);
|
||||||
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
|
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
|
||||||
int32_t tDeserializeSDbCfgRsp(void* buf, int32_t bufLen, SDbCfgRsp* pRsp);
|
int32_t tDeserializeSDbCfgRsp(void* buf, int32_t bufLen, SDbCfgRsp* pRsp);
|
||||||
int32_t tDeserializeSDbCfgRspImpl(SDecoder* decoder, SDbCfgRsp *pRsp);
|
int32_t tDeserializeSDbCfgRspImpl(SDecoder* decoder, SDbCfgRsp* pRsp);
|
||||||
void tFreeSDbCfgRsp(SDbCfgRsp *pRsp);
|
void tFreeSDbCfgRsp(SDbCfgRsp* pRsp);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t rowNum;
|
int32_t rowNum;
|
||||||
|
@ -1266,8 +1275,8 @@ int32_t tDeserializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp
|
||||||
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
|
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SUseDbRsp *useDbRsp;
|
SUseDbRsp* useDbRsp;
|
||||||
SDbCfgRsp *cfgRsp;
|
SDbCfgRsp* cfgRsp;
|
||||||
} SDbHbRsp;
|
} SDbHbRsp;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -1368,6 +1377,7 @@ typedef struct {
|
||||||
char locale[TD_LOCALE_LEN]; // tsLocale
|
char locale[TD_LOCALE_LEN]; // tsLocale
|
||||||
char charset[TD_LOCALE_LEN]; // tsCharset
|
char charset[TD_LOCALE_LEN]; // tsCharset
|
||||||
int8_t ttlChangeOnWrite;
|
int8_t ttlChangeOnWrite;
|
||||||
|
int8_t enableWhiteList;
|
||||||
} SClusterCfg;
|
} SClusterCfg;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -1402,7 +1412,7 @@ typedef struct {
|
||||||
int64_t numOfBatchInsertReqs;
|
int64_t numOfBatchInsertReqs;
|
||||||
int64_t numOfBatchInsertSuccessReqs;
|
int64_t numOfBatchInsertSuccessReqs;
|
||||||
int32_t numOfCachedTables;
|
int32_t numOfCachedTables;
|
||||||
int32_t learnerProgress; // use one reservered
|
int32_t learnerProgress; // use one reservered
|
||||||
} SVnodeLoad;
|
} SVnodeLoad;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -2062,17 +2072,16 @@ typedef struct {
|
||||||
int32_t tversion;
|
int32_t tversion;
|
||||||
} SResReadyRsp;
|
} SResReadyRsp;
|
||||||
|
|
||||||
|
|
||||||
typedef struct SOperatorParam {
|
typedef struct SOperatorParam {
|
||||||
int32_t opType;
|
int32_t opType;
|
||||||
int32_t downstreamIdx;
|
int32_t downstreamIdx;
|
||||||
void* value;
|
void* value;
|
||||||
SArray* pChildren; //SArray<SOperatorParam*>
|
SArray* pChildren; // SArray<SOperatorParam*>
|
||||||
} SOperatorParam;
|
} SOperatorParam;
|
||||||
|
|
||||||
typedef struct STableScanOperatorParam {
|
typedef struct STableScanOperatorParam {
|
||||||
bool tableSeq;
|
bool tableSeq;
|
||||||
SArray* pUidList;
|
SArray* pUidList;
|
||||||
} STableScanOperatorParam;
|
} STableScanOperatorParam;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -2161,7 +2170,6 @@ typedef struct {
|
||||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||||
|
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TASK_NOTIFY_FINISHED = 1,
|
TASK_NOTIFY_FINISHED = 1,
|
||||||
} ETaskNotifyType;
|
} ETaskNotifyType;
|
||||||
|
@ -2285,10 +2293,10 @@ typedef struct {
|
||||||
char clientId[256];
|
char clientId[256];
|
||||||
SArray* topicNames; // SArray<char**>
|
SArray* topicNames; // SArray<char**>
|
||||||
|
|
||||||
int8_t withTbName;
|
int8_t withTbName;
|
||||||
int8_t autoCommit;
|
int8_t autoCommit;
|
||||||
int32_t autoCommitInterval;
|
int32_t autoCommitInterval;
|
||||||
int8_t resetOffsetCfg;
|
int8_t resetOffsetCfg;
|
||||||
} SCMSubscribeReq;
|
} SCMSubscribeReq;
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
|
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
|
||||||
|
@ -2665,7 +2673,7 @@ typedef struct {
|
||||||
int32_t newTTL;
|
int32_t newTTL;
|
||||||
int32_t newCommentLen;
|
int32_t newCommentLen;
|
||||||
char* newComment;
|
char* newComment;
|
||||||
int64_t ctimeMs; // fill by vnode
|
int64_t ctimeMs; // fill by vnode
|
||||||
} SVAlterTbReq;
|
} SVAlterTbReq;
|
||||||
|
|
||||||
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
|
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
|
||||||
|
@ -3069,7 +3077,7 @@ typedef struct {
|
||||||
int64_t suid;
|
int64_t suid;
|
||||||
} SMqRebVgReq;
|
} SMqRebVgReq;
|
||||||
|
|
||||||
static FORCE_INLINE int tEncodeSMqRebVgReq(SEncoder *pCoder, const SMqRebVgReq* pReq) {
|
static FORCE_INLINE int tEncodeSMqRebVgReq(SEncoder* pCoder, const SMqRebVgReq* pReq) {
|
||||||
if (tStartEncode(pCoder) < 0) return -1;
|
if (tStartEncode(pCoder) < 0) return -1;
|
||||||
if (tEncodeI64(pCoder, pReq->leftForVer) < 0) return -1;
|
if (tEncodeI64(pCoder, pReq->leftForVer) < 0) return -1;
|
||||||
if (tEncodeI32(pCoder, pReq->vgId) < 0) return -1;
|
if (tEncodeI32(pCoder, pReq->vgId) < 0) return -1;
|
||||||
|
@ -3089,7 +3097,7 @@ static FORCE_INLINE int tEncodeSMqRebVgReq(SEncoder *pCoder, const SMqRebVgReq*
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE int tDecodeSMqRebVgReq(SDecoder *pCoder, SMqRebVgReq* pReq) {
|
static FORCE_INLINE int tDecodeSMqRebVgReq(SDecoder* pCoder, SMqRebVgReq* pReq) {
|
||||||
if (tStartDecode(pCoder) < 0) return -1;
|
if (tStartDecode(pCoder) < 0) return -1;
|
||||||
|
|
||||||
if (tDecodeI64(pCoder, &pReq->leftForVer) < 0) return -1;
|
if (tDecodeI64(pCoder, &pReq->leftForVer) < 0) return -1;
|
||||||
|
@ -3105,7 +3113,7 @@ static FORCE_INLINE int tDecodeSMqRebVgReq(SDecoder *pCoder, SMqRebVgReq* pReq)
|
||||||
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
|
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
|
||||||
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
|
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||||
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
|
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
|
||||||
if (!tDecodeIsEnd(pCoder)){
|
if (!tDecodeIsEnd(pCoder)) {
|
||||||
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
|
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3629,12 +3637,12 @@ typedef struct {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
STqOffsetVal offset;
|
STqOffsetVal offset;
|
||||||
int64_t rows;
|
int64_t rows;
|
||||||
}OffsetRows;
|
} OffsetRows;
|
||||||
|
|
||||||
typedef struct{
|
typedef struct {
|
||||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||||
SArray* offsetRows;
|
SArray* offsetRows;
|
||||||
}TopicOffsetRows;
|
} TopicOffsetRows;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t consumerId;
|
int64_t consumerId;
|
||||||
|
@ -3647,9 +3655,9 @@ typedef struct {
|
||||||
} SMqHbRsp;
|
} SMqHbRsp;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int64_t consumerId;
|
int64_t consumerId;
|
||||||
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
|
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
|
||||||
} SMqSeekReq;
|
} SMqSeekReq;
|
||||||
|
|
||||||
#define TD_AUTO_CREATE_TABLE 0x1
|
#define TD_AUTO_CREATE_TABLE 0x1
|
||||||
|
@ -3703,7 +3711,7 @@ typedef struct SDeleteRes {
|
||||||
int64_t affectedRows;
|
int64_t affectedRows;
|
||||||
char tableFName[TSDB_TABLE_NAME_LEN];
|
char tableFName[TSDB_TABLE_NAME_LEN];
|
||||||
char tsColName[TSDB_COL_NAME_LEN];
|
char tsColName[TSDB_COL_NAME_LEN];
|
||||||
int64_t ctimeMs; // fill by vnode
|
int64_t ctimeMs; // fill by vnode
|
||||||
} SDeleteRes;
|
} SDeleteRes;
|
||||||
|
|
||||||
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
|
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
|
||||||
|
@ -3781,8 +3789,8 @@ int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||||
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||||
int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
|
int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
|
||||||
|
|
||||||
int32_t tSerializeSMqSeekReq(void *buf, int32_t bufLen, SMqSeekReq *pReq);
|
int32_t tSerializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);
|
||||||
int32_t tDeserializeSMqSeekReq(void *buf, int32_t bufLen, SMqSeekReq *pReq);
|
int32_t tDeserializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);
|
||||||
|
|
||||||
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
|
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
|
||||||
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
|
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
|
||||||
|
@ -3797,7 +3805,7 @@ typedef struct {
|
||||||
SArray* aRowP;
|
SArray* aRowP;
|
||||||
SArray* aCol;
|
SArray* aCol;
|
||||||
};
|
};
|
||||||
int64_t ctimeMs;
|
int64_t ctimeMs;
|
||||||
} SSubmitTbData;
|
} SSubmitTbData;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#ifndef _TD_COMMON_TOKEN_H_
|
#ifndef _TD_COMMON_TOKEN_H_
|
||||||
#define _TD_COMMON_TOKEN_H_
|
#define _TD_COMMON_TOKEN_H_
|
||||||
|
|
||||||
|
|
||||||
#define TK_OR 1
|
#define TK_OR 1
|
||||||
#define TK_AND 2
|
#define TK_AND 2
|
||||||
#define TK_UNION 3
|
#define TK_UNION 3
|
||||||
|
@ -48,313 +49,313 @@
|
||||||
#define TK_USERS 30
|
#define TK_USERS 30
|
||||||
#define TK_CONNS 31
|
#define TK_CONNS 31
|
||||||
#define TK_STATE 32
|
#define TK_STATE 32
|
||||||
#define TK_USER 33
|
#define TK_NK_COMMA 33
|
||||||
#define TK_ENABLE 34
|
#define TK_HOST 34
|
||||||
#define TK_NK_INTEGER 35
|
#define TK_USER 35
|
||||||
#define TK_SYSINFO 36
|
#define TK_ENABLE 36
|
||||||
#define TK_DROP 37
|
#define TK_NK_INTEGER 37
|
||||||
#define TK_GRANT 38
|
#define TK_SYSINFO 38
|
||||||
#define TK_ON 39
|
#define TK_ADD 39
|
||||||
#define TK_TO 40
|
#define TK_DROP 40
|
||||||
#define TK_REVOKE 41
|
#define TK_GRANT 41
|
||||||
#define TK_FROM 42
|
#define TK_ON 42
|
||||||
#define TK_SUBSCRIBE 43
|
#define TK_TO 43
|
||||||
#define TK_NK_COMMA 44
|
#define TK_REVOKE 44
|
||||||
#define TK_READ 45
|
#define TK_FROM 45
|
||||||
#define TK_WRITE 46
|
#define TK_SUBSCRIBE 46
|
||||||
#define TK_NK_DOT 47
|
#define TK_READ 47
|
||||||
#define TK_WITH 48
|
#define TK_WRITE 48
|
||||||
#define TK_DNODE 49
|
#define TK_NK_DOT 49
|
||||||
#define TK_PORT 50
|
#define TK_WITH 50
|
||||||
#define TK_DNODES 51
|
#define TK_DNODE 51
|
||||||
#define TK_RESTORE 52
|
#define TK_PORT 52
|
||||||
#define TK_NK_IPTOKEN 53
|
#define TK_DNODES 53
|
||||||
#define TK_FORCE 54
|
#define TK_RESTORE 54
|
||||||
#define TK_UNSAFE 55
|
#define TK_NK_IPTOKEN 55
|
||||||
#define TK_LOCAL 56
|
#define TK_FORCE 56
|
||||||
#define TK_QNODE 57
|
#define TK_UNSAFE 57
|
||||||
#define TK_BNODE 58
|
#define TK_LOCAL 58
|
||||||
#define TK_SNODE 59
|
#define TK_QNODE 59
|
||||||
#define TK_MNODE 60
|
#define TK_BNODE 60
|
||||||
#define TK_VNODE 61
|
#define TK_SNODE 61
|
||||||
#define TK_DATABASE 62
|
#define TK_MNODE 62
|
||||||
#define TK_USE 63
|
#define TK_VNODE 63
|
||||||
#define TK_FLUSH 64
|
#define TK_DATABASE 64
|
||||||
#define TK_TRIM 65
|
#define TK_USE 65
|
||||||
#define TK_COMPACT 66
|
#define TK_FLUSH 66
|
||||||
#define TK_IF 67
|
#define TK_TRIM 67
|
||||||
#define TK_NOT 68
|
#define TK_COMPACT 68
|
||||||
#define TK_EXISTS 69
|
#define TK_IF 69
|
||||||
#define TK_BUFFER 70
|
#define TK_NOT 70
|
||||||
#define TK_CACHEMODEL 71
|
#define TK_EXISTS 71
|
||||||
#define TK_CACHESIZE 72
|
#define TK_BUFFER 72
|
||||||
#define TK_COMP 73
|
#define TK_CACHEMODEL 73
|
||||||
#define TK_DURATION 74
|
#define TK_CACHESIZE 74
|
||||||
#define TK_NK_VARIABLE 75
|
#define TK_COMP 75
|
||||||
#define TK_MAXROWS 76
|
#define TK_DURATION 76
|
||||||
#define TK_MINROWS 77
|
#define TK_NK_VARIABLE 77
|
||||||
#define TK_KEEP 78
|
#define TK_MAXROWS 78
|
||||||
#define TK_PAGES 79
|
#define TK_MINROWS 79
|
||||||
#define TK_PAGESIZE 80
|
#define TK_KEEP 80
|
||||||
#define TK_TSDB_PAGESIZE 81
|
#define TK_PAGES 81
|
||||||
#define TK_PRECISION 82
|
#define TK_PAGESIZE 82
|
||||||
#define TK_REPLICA 83
|
#define TK_TSDB_PAGESIZE 83
|
||||||
#define TK_VGROUPS 84
|
#define TK_PRECISION 84
|
||||||
#define TK_SINGLE_STABLE 85
|
#define TK_REPLICA 85
|
||||||
#define TK_RETENTIONS 86
|
#define TK_VGROUPS 86
|
||||||
#define TK_SCHEMALESS 87
|
#define TK_SINGLE_STABLE 87
|
||||||
#define TK_WAL_LEVEL 88
|
#define TK_RETENTIONS 88
|
||||||
#define TK_WAL_FSYNC_PERIOD 89
|
#define TK_SCHEMALESS 89
|
||||||
#define TK_WAL_RETENTION_PERIOD 90
|
#define TK_WAL_LEVEL 90
|
||||||
#define TK_WAL_RETENTION_SIZE 91
|
#define TK_WAL_FSYNC_PERIOD 91
|
||||||
#define TK_WAL_ROLL_PERIOD 92
|
#define TK_WAL_RETENTION_PERIOD 92
|
||||||
#define TK_WAL_SEGMENT_SIZE 93
|
#define TK_WAL_RETENTION_SIZE 93
|
||||||
#define TK_STT_TRIGGER 94
|
#define TK_WAL_ROLL_PERIOD 94
|
||||||
#define TK_TABLE_PREFIX 95
|
#define TK_WAL_SEGMENT_SIZE 95
|
||||||
#define TK_TABLE_SUFFIX 96
|
#define TK_STT_TRIGGER 96
|
||||||
#define TK_NK_COLON 97
|
#define TK_TABLE_PREFIX 97
|
||||||
#define TK_BWLIMIT 98
|
#define TK_TABLE_SUFFIX 98
|
||||||
#define TK_START 99
|
#define TK_NK_COLON 99
|
||||||
#define TK_TIMESTAMP 100
|
#define TK_BWLIMIT 100
|
||||||
#define TK_END 101
|
#define TK_START 101
|
||||||
#define TK_TABLE 102
|
#define TK_TIMESTAMP 102
|
||||||
#define TK_NK_LP 103
|
#define TK_END 103
|
||||||
#define TK_NK_RP 104
|
#define TK_TABLE 104
|
||||||
#define TK_STABLE 105
|
#define TK_NK_LP 105
|
||||||
#define TK_ADD 106
|
#define TK_NK_RP 106
|
||||||
#define TK_COLUMN 107
|
#define TK_STABLE 107
|
||||||
#define TK_MODIFY 108
|
#define TK_COLUMN 108
|
||||||
#define TK_RENAME 109
|
#define TK_MODIFY 109
|
||||||
#define TK_TAG 110
|
#define TK_RENAME 110
|
||||||
#define TK_SET 111
|
#define TK_TAG 111
|
||||||
#define TK_NK_EQ 112
|
#define TK_SET 112
|
||||||
#define TK_USING 113
|
#define TK_NK_EQ 113
|
||||||
#define TK_TAGS 114
|
#define TK_USING 114
|
||||||
#define TK_BOOL 115
|
#define TK_TAGS 115
|
||||||
#define TK_TINYINT 116
|
#define TK_BOOL 116
|
||||||
#define TK_SMALLINT 117
|
#define TK_TINYINT 117
|
||||||
#define TK_INT 118
|
#define TK_SMALLINT 118
|
||||||
#define TK_INTEGER 119
|
#define TK_INT 119
|
||||||
#define TK_BIGINT 120
|
#define TK_INTEGER 120
|
||||||
#define TK_FLOAT 121
|
#define TK_BIGINT 121
|
||||||
#define TK_DOUBLE 122
|
#define TK_FLOAT 122
|
||||||
#define TK_BINARY 123
|
#define TK_DOUBLE 123
|
||||||
#define TK_NCHAR 124
|
#define TK_BINARY 124
|
||||||
#define TK_UNSIGNED 125
|
#define TK_NCHAR 125
|
||||||
#define TK_JSON 126
|
#define TK_UNSIGNED 126
|
||||||
#define TK_VARCHAR 127
|
#define TK_JSON 127
|
||||||
#define TK_MEDIUMBLOB 128
|
#define TK_VARCHAR 128
|
||||||
#define TK_BLOB 129
|
#define TK_MEDIUMBLOB 129
|
||||||
#define TK_VARBINARY 130
|
#define TK_BLOB 130
|
||||||
#define TK_GEOMETRY 131
|
#define TK_VARBINARY 131
|
||||||
#define TK_DECIMAL 132
|
#define TK_GEOMETRY 132
|
||||||
#define TK_COMMENT 133
|
#define TK_DECIMAL 133
|
||||||
#define TK_MAX_DELAY 134
|
#define TK_COMMENT 134
|
||||||
#define TK_WATERMARK 135
|
#define TK_MAX_DELAY 135
|
||||||
#define TK_ROLLUP 136
|
#define TK_WATERMARK 136
|
||||||
#define TK_TTL 137
|
#define TK_ROLLUP 137
|
||||||
#define TK_SMA 138
|
#define TK_TTL 138
|
||||||
#define TK_DELETE_MARK 139
|
#define TK_SMA 139
|
||||||
#define TK_FIRST 140
|
#define TK_DELETE_MARK 140
|
||||||
#define TK_LAST 141
|
#define TK_FIRST 141
|
||||||
#define TK_SHOW 142
|
#define TK_LAST 142
|
||||||
#define TK_PRIVILEGES 143
|
#define TK_SHOW 143
|
||||||
#define TK_DATABASES 144
|
#define TK_PRIVILEGES 144
|
||||||
#define TK_TABLES 145
|
#define TK_DATABASES 145
|
||||||
#define TK_STABLES 146
|
#define TK_TABLES 146
|
||||||
#define TK_MNODES 147
|
#define TK_STABLES 147
|
||||||
#define TK_QNODES 148
|
#define TK_MNODES 148
|
||||||
#define TK_FUNCTIONS 149
|
#define TK_QNODES 149
|
||||||
#define TK_INDEXES 150
|
#define TK_FUNCTIONS 150
|
||||||
#define TK_ACCOUNTS 151
|
#define TK_INDEXES 151
|
||||||
#define TK_APPS 152
|
#define TK_ACCOUNTS 152
|
||||||
#define TK_CONNECTIONS 153
|
#define TK_APPS 153
|
||||||
#define TK_LICENCES 154
|
#define TK_CONNECTIONS 154
|
||||||
#define TK_GRANTS 155
|
#define TK_LICENCES 155
|
||||||
#define TK_QUERIES 156
|
#define TK_GRANTS 156
|
||||||
#define TK_SCORES 157
|
#define TK_QUERIES 157
|
||||||
#define TK_TOPICS 158
|
#define TK_SCORES 158
|
||||||
#define TK_VARIABLES 159
|
#define TK_TOPICS 159
|
||||||
#define TK_CLUSTER 160
|
#define TK_VARIABLES 160
|
||||||
#define TK_BNODES 161
|
#define TK_CLUSTER 161
|
||||||
#define TK_SNODES 162
|
#define TK_BNODES 162
|
||||||
#define TK_TRANSACTIONS 163
|
#define TK_SNODES 163
|
||||||
#define TK_DISTRIBUTED 164
|
#define TK_TRANSACTIONS 164
|
||||||
#define TK_CONSUMERS 165
|
#define TK_DISTRIBUTED 165
|
||||||
#define TK_SUBSCRIPTIONS 166
|
#define TK_CONSUMERS 166
|
||||||
#define TK_VNODES 167
|
#define TK_SUBSCRIPTIONS 167
|
||||||
#define TK_ALIVE 168
|
#define TK_VNODES 168
|
||||||
#define TK_LIKE 169
|
#define TK_ALIVE 169
|
||||||
#define TK_TBNAME 170
|
#define TK_LIKE 170
|
||||||
#define TK_QTAGS 171
|
#define TK_TBNAME 171
|
||||||
#define TK_AS 172
|
#define TK_QTAGS 172
|
||||||
#define TK_INDEX 173
|
#define TK_AS 173
|
||||||
#define TK_FUNCTION 174
|
#define TK_INDEX 174
|
||||||
#define TK_INTERVAL 175
|
#define TK_FUNCTION 175
|
||||||
#define TK_COUNT 176
|
#define TK_INTERVAL 176
|
||||||
#define TK_LAST_ROW 177
|
#define TK_COUNT 177
|
||||||
#define TK_META 178
|
#define TK_LAST_ROW 178
|
||||||
#define TK_ONLY 179
|
#define TK_META 179
|
||||||
#define TK_TOPIC 180
|
#define TK_ONLY 180
|
||||||
#define TK_CONSUMER 181
|
#define TK_TOPIC 181
|
||||||
#define TK_GROUP 182
|
#define TK_CONSUMER 182
|
||||||
#define TK_DESC 183
|
#define TK_GROUP 183
|
||||||
#define TK_DESCRIBE 184
|
#define TK_DESC 184
|
||||||
#define TK_RESET 185
|
#define TK_DESCRIBE 185
|
||||||
#define TK_QUERY 186
|
#define TK_RESET 186
|
||||||
#define TK_CACHE 187
|
#define TK_QUERY 187
|
||||||
#define TK_EXPLAIN 188
|
#define TK_CACHE 188
|
||||||
#define TK_ANALYZE 189
|
#define TK_EXPLAIN 189
|
||||||
#define TK_VERBOSE 190
|
#define TK_ANALYZE 190
|
||||||
#define TK_NK_BOOL 191
|
#define TK_VERBOSE 191
|
||||||
#define TK_RATIO 192
|
#define TK_NK_BOOL 192
|
||||||
#define TK_NK_FLOAT 193
|
#define TK_RATIO 193
|
||||||
#define TK_OUTPUTTYPE 194
|
#define TK_NK_FLOAT 194
|
||||||
#define TK_AGGREGATE 195
|
#define TK_OUTPUTTYPE 195
|
||||||
#define TK_BUFSIZE 196
|
#define TK_AGGREGATE 196
|
||||||
#define TK_LANGUAGE 197
|
#define TK_BUFSIZE 197
|
||||||
#define TK_REPLACE 198
|
#define TK_LANGUAGE 198
|
||||||
#define TK_STREAM 199
|
#define TK_REPLACE 199
|
||||||
#define TK_INTO 200
|
#define TK_STREAM 200
|
||||||
#define TK_PAUSE 201
|
#define TK_INTO 201
|
||||||
#define TK_RESUME 202
|
#define TK_PAUSE 202
|
||||||
#define TK_TRIGGER 203
|
#define TK_RESUME 203
|
||||||
#define TK_AT_ONCE 204
|
#define TK_TRIGGER 204
|
||||||
#define TK_WINDOW_CLOSE 205
|
#define TK_AT_ONCE 205
|
||||||
#define TK_IGNORE 206
|
#define TK_WINDOW_CLOSE 206
|
||||||
#define TK_EXPIRED 207
|
#define TK_IGNORE 207
|
||||||
#define TK_FILL_HISTORY 208
|
#define TK_EXPIRED 208
|
||||||
#define TK_UPDATE 209
|
#define TK_FILL_HISTORY 209
|
||||||
#define TK_SUBTABLE 210
|
#define TK_UPDATE 210
|
||||||
#define TK_UNTREATED 211
|
#define TK_SUBTABLE 211
|
||||||
#define TK_KILL 212
|
#define TK_UNTREATED 212
|
||||||
#define TK_CONNECTION 213
|
#define TK_KILL 213
|
||||||
#define TK_TRANSACTION 214
|
#define TK_CONNECTION 214
|
||||||
#define TK_BALANCE 215
|
#define TK_TRANSACTION 215
|
||||||
#define TK_VGROUP 216
|
#define TK_BALANCE 216
|
||||||
#define TK_LEADER 217
|
#define TK_VGROUP 217
|
||||||
#define TK_MERGE 218
|
#define TK_LEADER 218
|
||||||
#define TK_REDISTRIBUTE 219
|
#define TK_MERGE 219
|
||||||
#define TK_SPLIT 220
|
#define TK_REDISTRIBUTE 220
|
||||||
#define TK_DELETE 221
|
#define TK_SPLIT 221
|
||||||
#define TK_INSERT 222
|
#define TK_DELETE 222
|
||||||
#define TK_NULL 223
|
#define TK_INSERT 223
|
||||||
#define TK_NK_QUESTION 224
|
#define TK_NULL 224
|
||||||
#define TK_NK_ARROW 225
|
#define TK_NK_QUESTION 225
|
||||||
#define TK_ROWTS 226
|
#define TK_NK_ARROW 226
|
||||||
#define TK_QSTART 227
|
#define TK_ROWTS 227
|
||||||
#define TK_QEND 228
|
#define TK_QSTART 228
|
||||||
#define TK_QDURATION 229
|
#define TK_QEND 229
|
||||||
#define TK_WSTART 230
|
#define TK_QDURATION 230
|
||||||
#define TK_WEND 231
|
#define TK_WSTART 231
|
||||||
#define TK_WDURATION 232
|
#define TK_WEND 232
|
||||||
#define TK_IROWTS 233
|
#define TK_WDURATION 233
|
||||||
#define TK_ISFILLED 234
|
#define TK_IROWTS 234
|
||||||
#define TK_CAST 235
|
#define TK_ISFILLED 235
|
||||||
#define TK_NOW 236
|
#define TK_CAST 236
|
||||||
#define TK_TODAY 237
|
#define TK_NOW 237
|
||||||
#define TK_TIMEZONE 238
|
#define TK_TODAY 238
|
||||||
#define TK_CLIENT_VERSION 239
|
#define TK_TIMEZONE 239
|
||||||
#define TK_SERVER_VERSION 240
|
#define TK_CLIENT_VERSION 240
|
||||||
#define TK_SERVER_STATUS 241
|
#define TK_SERVER_VERSION 241
|
||||||
#define TK_CURRENT_USER 242
|
#define TK_SERVER_STATUS 242
|
||||||
#define TK_CASE 243
|
#define TK_CURRENT_USER 243
|
||||||
#define TK_WHEN 244
|
#define TK_CASE 244
|
||||||
#define TK_THEN 245
|
#define TK_WHEN 245
|
||||||
#define TK_ELSE 246
|
#define TK_THEN 246
|
||||||
#define TK_BETWEEN 247
|
#define TK_ELSE 247
|
||||||
#define TK_IS 248
|
#define TK_BETWEEN 248
|
||||||
#define TK_NK_LT 249
|
#define TK_IS 249
|
||||||
#define TK_NK_GT 250
|
#define TK_NK_LT 250
|
||||||
#define TK_NK_LE 251
|
#define TK_NK_GT 251
|
||||||
#define TK_NK_GE 252
|
#define TK_NK_LE 252
|
||||||
#define TK_NK_NE 253
|
#define TK_NK_GE 253
|
||||||
#define TK_MATCH 254
|
#define TK_NK_NE 254
|
||||||
#define TK_NMATCH 255
|
#define TK_MATCH 255
|
||||||
#define TK_CONTAINS 256
|
#define TK_NMATCH 256
|
||||||
#define TK_IN 257
|
#define TK_CONTAINS 257
|
||||||
#define TK_JOIN 258
|
#define TK_IN 258
|
||||||
#define TK_INNER 259
|
#define TK_JOIN 259
|
||||||
#define TK_SELECT 260
|
#define TK_INNER 260
|
||||||
#define TK_NK_HINT 261
|
#define TK_SELECT 261
|
||||||
#define TK_DISTINCT 262
|
#define TK_NK_HINT 262
|
||||||
#define TK_WHERE 263
|
#define TK_DISTINCT 263
|
||||||
#define TK_PARTITION 264
|
#define TK_WHERE 264
|
||||||
#define TK_BY 265
|
#define TK_PARTITION 265
|
||||||
#define TK_SESSION 266
|
#define TK_BY 266
|
||||||
#define TK_STATE_WINDOW 267
|
#define TK_SESSION 267
|
||||||
#define TK_EVENT_WINDOW 268
|
#define TK_STATE_WINDOW 268
|
||||||
#define TK_SLIDING 269
|
#define TK_EVENT_WINDOW 269
|
||||||
#define TK_FILL 270
|
#define TK_SLIDING 270
|
||||||
#define TK_VALUE 271
|
#define TK_FILL 271
|
||||||
#define TK_VALUE_F 272
|
#define TK_VALUE 272
|
||||||
#define TK_NONE 273
|
#define TK_VALUE_F 273
|
||||||
#define TK_PREV 274
|
#define TK_NONE 274
|
||||||
#define TK_NULL_F 275
|
#define TK_PREV 275
|
||||||
#define TK_LINEAR 276
|
#define TK_NULL_F 276
|
||||||
#define TK_NEXT 277
|
#define TK_LINEAR 277
|
||||||
#define TK_HAVING 278
|
#define TK_NEXT 278
|
||||||
#define TK_RANGE 279
|
#define TK_HAVING 279
|
||||||
#define TK_EVERY 280
|
#define TK_RANGE 280
|
||||||
#define TK_ORDER 281
|
#define TK_EVERY 281
|
||||||
#define TK_SLIMIT 282
|
#define TK_ORDER 282
|
||||||
#define TK_SOFFSET 283
|
#define TK_SLIMIT 283
|
||||||
#define TK_LIMIT 284
|
#define TK_SOFFSET 284
|
||||||
#define TK_OFFSET 285
|
#define TK_LIMIT 285
|
||||||
#define TK_ASC 286
|
#define TK_OFFSET 286
|
||||||
#define TK_NULLS 287
|
#define TK_ASC 287
|
||||||
#define TK_ABORT 288
|
#define TK_NULLS 288
|
||||||
#define TK_AFTER 289
|
#define TK_ABORT 289
|
||||||
#define TK_ATTACH 290
|
#define TK_AFTER 290
|
||||||
#define TK_BEFORE 291
|
#define TK_ATTACH 291
|
||||||
#define TK_BEGIN 292
|
#define TK_BEFORE 292
|
||||||
#define TK_BITAND 293
|
#define TK_BEGIN 293
|
||||||
#define TK_BITNOT 294
|
#define TK_BITAND 294
|
||||||
#define TK_BITOR 295
|
#define TK_BITNOT 295
|
||||||
#define TK_BLOCKS 296
|
#define TK_BITOR 296
|
||||||
#define TK_CHANGE 297
|
#define TK_BLOCKS 297
|
||||||
#define TK_COMMA 298
|
#define TK_CHANGE 298
|
||||||
#define TK_CONCAT 299
|
#define TK_COMMA 299
|
||||||
#define TK_CONFLICT 300
|
#define TK_CONCAT 300
|
||||||
#define TK_COPY 301
|
#define TK_CONFLICT 301
|
||||||
#define TK_DEFERRED 302
|
#define TK_COPY 302
|
||||||
#define TK_DELIMITERS 303
|
#define TK_DEFERRED 303
|
||||||
#define TK_DETACH 304
|
#define TK_DELIMITERS 304
|
||||||
#define TK_DIVIDE 305
|
#define TK_DETACH 305
|
||||||
#define TK_DOT 306
|
#define TK_DIVIDE 306
|
||||||
#define TK_EACH 307
|
#define TK_DOT 307
|
||||||
#define TK_FAIL 308
|
#define TK_EACH 308
|
||||||
#define TK_FILE 309
|
#define TK_FAIL 309
|
||||||
#define TK_FOR 310
|
#define TK_FILE 310
|
||||||
#define TK_GLOB 311
|
#define TK_FOR 311
|
||||||
#define TK_ID 312
|
#define TK_GLOB 312
|
||||||
#define TK_IMMEDIATE 313
|
#define TK_ID 313
|
||||||
#define TK_IMPORT 314
|
#define TK_IMMEDIATE 314
|
||||||
#define TK_INITIALLY 315
|
#define TK_IMPORT 315
|
||||||
#define TK_INSTEAD 316
|
#define TK_INITIALLY 316
|
||||||
#define TK_ISNULL 317
|
#define TK_INSTEAD 317
|
||||||
#define TK_KEY 318
|
#define TK_ISNULL 318
|
||||||
#define TK_MODULES 319
|
#define TK_KEY 319
|
||||||
#define TK_NK_BITNOT 320
|
#define TK_MODULES 320
|
||||||
#define TK_NK_SEMI 321
|
#define TK_NK_BITNOT 321
|
||||||
#define TK_NOTNULL 322
|
#define TK_NK_SEMI 322
|
||||||
#define TK_OF 323
|
#define TK_NOTNULL 323
|
||||||
#define TK_PLUS 324
|
#define TK_OF 324
|
||||||
#define TK_PRIVILEGE 325
|
#define TK_PLUS 325
|
||||||
#define TK_RAISE 326
|
#define TK_PRIVILEGE 326
|
||||||
#define TK_RESTRICT 327
|
#define TK_RAISE 327
|
||||||
#define TK_ROW 328
|
#define TK_RESTRICT 328
|
||||||
#define TK_SEMI 329
|
#define TK_ROW 329
|
||||||
#define TK_STAR 330
|
#define TK_SEMI 330
|
||||||
#define TK_STATEMENT 331
|
#define TK_STAR 331
|
||||||
#define TK_STRICT 332
|
#define TK_STATEMENT 332
|
||||||
#define TK_STRING 333
|
#define TK_STRICT 333
|
||||||
#define TK_TIMES 334
|
#define TK_STRING 334
|
||||||
#define TK_VALUES 335
|
#define TK_TIMES 335
|
||||||
#define TK_VARIABLE 336
|
#define TK_VALUES 336
|
||||||
#define TK_VIEW 337
|
#define TK_VARIABLE 337
|
||||||
#define TK_WAL 338
|
#define TK_VIEW 338
|
||||||
|
#define TK_WAL 339
|
||||||
|
|
||||||
|
|
||||||
#define TK_NK_SPACE 600
|
#define TK_NK_SPACE 600
|
||||||
|
|
|
@ -222,6 +222,10 @@ typedef struct SCreateUserStmt {
|
||||||
char userName[TSDB_USER_LEN];
|
char userName[TSDB_USER_LEN];
|
||||||
char password[TSDB_USET_PASSWORD_LEN];
|
char password[TSDB_USET_PASSWORD_LEN];
|
||||||
int8_t sysinfo;
|
int8_t sysinfo;
|
||||||
|
int32_t numIpRanges;
|
||||||
|
SIpV4Range* pIpRanges;
|
||||||
|
|
||||||
|
SNodeList* pNodeListIpRanges;
|
||||||
} SCreateUserStmt;
|
} SCreateUserStmt;
|
||||||
|
|
||||||
typedef struct SAlterUserStmt {
|
typedef struct SAlterUserStmt {
|
||||||
|
@ -231,6 +235,10 @@ typedef struct SAlterUserStmt {
|
||||||
char password[TSDB_USET_PASSWORD_LEN];
|
char password[TSDB_USET_PASSWORD_LEN];
|
||||||
int8_t enable;
|
int8_t enable;
|
||||||
int8_t sysinfo;
|
int8_t sysinfo;
|
||||||
|
int32_t numIpRanges;
|
||||||
|
SIpV4Range* pIpRanges;
|
||||||
|
|
||||||
|
SNodeList* pNodeListIpRanges;
|
||||||
} SAlterUserStmt;
|
} SAlterUserStmt;
|
||||||
|
|
||||||
typedef struct SDropUserStmt {
|
typedef struct SDropUserStmt {
|
||||||
|
|
|
@ -41,16 +41,16 @@ enum {
|
||||||
STREAM_STATUS__PAUSE,
|
STREAM_STATUS__PAUSE,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
typedef enum ETaskStatus {
|
||||||
TASK_STATUS__NORMAL = 0,
|
TASK_STATUS__NORMAL = 0,
|
||||||
TASK_STATUS__DROPPING,
|
TASK_STATUS__DROPPING,
|
||||||
TASK_STATUS__FAIL,
|
TASK_STATUS__UNINIT, // not used, an placeholder
|
||||||
TASK_STATUS__STOP,
|
TASK_STATUS__STOP,
|
||||||
TASK_STATUS__SCAN_HISTORY, // stream task scan history data by using tsdbread in the stream scanner
|
TASK_STATUS__SCAN_HISTORY, // stream task scan history data by using tsdbread in the stream scanner
|
||||||
TASK_STATUS__HALT, // pause, but not be manipulated by user command
|
TASK_STATUS__HALT, // pause, but not be manipulated by user command
|
||||||
TASK_STATUS__PAUSE, // pause
|
TASK_STATUS__PAUSE, // pause
|
||||||
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
|
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
|
||||||
};
|
} ETaskStatus;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TASK_SCHED_STATUS__INACTIVE = 1,
|
TASK_SCHED_STATUS__INACTIVE = 1,
|
||||||
|
|
|
@ -710,6 +710,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
|
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
|
||||||
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
|
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
|
||||||
#define TSDB_CODE_PAR_INVALID_VARBINARY TAOS_DEF_ERROR_CODE(0, 0x266A)
|
#define TSDB_CODE_PAR_INVALID_VARBINARY TAOS_DEF_ERROR_CODE(0, 0x266A)
|
||||||
|
#define TSDB_CODE_PAR_INVALID_IP_RANGE TAOS_DEF_ERROR_CODE(0, 0x266B)
|
||||||
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
||||||
|
|
||||||
//planner
|
//planner
|
||||||
|
|
|
@ -81,6 +81,7 @@ int64_t tsMndLogRetention = 2000;
|
||||||
int8_t tsGrant = 1;
|
int8_t tsGrant = 1;
|
||||||
int32_t tsMndGrantMode = 0;
|
int32_t tsMndGrantMode = 0;
|
||||||
bool tsMndSkipGrant = false;
|
bool tsMndSkipGrant = false;
|
||||||
|
bool tsEnableWhiteList = false; // ip white list cfg
|
||||||
|
|
||||||
// dnode
|
// dnode
|
||||||
int64_t tsDndStart = 0;
|
int64_t tsDndStart = 0;
|
||||||
|
@ -659,6 +660,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, 1024 * 1024 * 1024,
|
if (cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, 1024 * 1024 * 1024,
|
||||||
CFG_SCOPE_SERVER) != 0)
|
CFG_SCOPE_SERVER) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
if (cfgAddBool(pCfg, "enableWhiteList", tsEnableWhiteList, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
|
||||||
GRANT_CFG_ADD;
|
GRANT_CFG_ADD;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1047,6 +1049,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
|
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
|
||||||
tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval;
|
tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval;
|
||||||
tsMndGrantMode = cfgGetItem(pCfg, "grantMode")->i32;
|
tsMndGrantMode = cfgGetItem(pCfg, "grantMode")->i32;
|
||||||
|
tsEnableWhiteList = cfgGetItem(pCfg, "enableWhiteList")->bval;
|
||||||
|
|
||||||
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
|
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
|
||||||
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));
|
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));
|
||||||
|
|
|
@ -1060,6 +1060,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
if (tEncodeCStr(&encoder, pReq->clusterCfg.timezone) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->clusterCfg.timezone) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pReq->clusterCfg.locale) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->clusterCfg.locale) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pReq->clusterCfg.charset) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->clusterCfg.charset) < 0) return -1;
|
||||||
|
if (tEncodeI8(&encoder, pReq->clusterCfg.enableWhiteList) < 0) return -1;
|
||||||
|
|
||||||
// vnode loads
|
// vnode loads
|
||||||
int32_t vlen = (int32_t)taosArrayGetSize(pReq->pVloads);
|
int32_t vlen = (int32_t)taosArrayGetSize(pReq->pVloads);
|
||||||
|
@ -1147,6 +1148,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
if (tDecodeCStrTo(&decoder, pReq->clusterCfg.timezone) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->clusterCfg.timezone) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pReq->clusterCfg.locale) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->clusterCfg.locale) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pReq->clusterCfg.charset) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->clusterCfg.charset) < 0) return -1;
|
||||||
|
if (tDecodeI8(&decoder, &pReq->clusterCfg.enableWhiteList) < 0) return -1;
|
||||||
|
|
||||||
// vnode loads
|
// vnode loads
|
||||||
int32_t vlen = 0;
|
int32_t vlen = 0;
|
||||||
|
@ -1380,6 +1382,11 @@ int32_t tSerializeSCreateUserReq(void *buf, int32_t bufLen, SCreateUserReq *pReq
|
||||||
if (tEncodeI8(&encoder, pReq->enable) < 0) return -1;
|
if (tEncodeI8(&encoder, pReq->enable) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->numIpRanges) < 0) return -1;
|
||||||
|
for (int32_t i = 0; i < pReq->numIpRanges; ++i) {
|
||||||
|
if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1;
|
||||||
|
if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1;
|
||||||
|
}
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -1398,12 +1405,21 @@ int32_t tDeserializeSCreateUserReq(void *buf, int32_t bufLen, SCreateUserReq *pR
|
||||||
if (tDecodeI8(&decoder, &pReq->enable) < 0) return -1;
|
if (tDecodeI8(&decoder, &pReq->enable) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pReq->numIpRanges) < 0) return -1;
|
||||||
|
pReq->pIpRanges = taosMemoryMalloc(pReq->numIpRanges * sizeof(SIpV4Range));
|
||||||
|
if (pReq->pIpRanges == NULL) return -1;
|
||||||
|
for (int32_t i = 0; i < pReq->numIpRanges; ++i) {
|
||||||
|
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1;
|
||||||
|
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1;
|
||||||
|
}
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tFreeSCreateUserReq(SCreateUserReq *pReq) { taosMemoryFree(pReq->pIpRanges); }
|
||||||
|
|
||||||
int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq) {
|
int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq) {
|
||||||
SEncoder encoder = {0};
|
SEncoder encoder = {0};
|
||||||
tEncoderInit(&encoder, buf, bufLen);
|
tEncoderInit(&encoder, buf, bufLen);
|
||||||
|
@ -1422,6 +1438,11 @@ int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq)
|
||||||
if (tEncodeCStr(&encoder, pReq->tabName) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->tabName) < 0) return -1;
|
||||||
}
|
}
|
||||||
if (tEncodeBinary(&encoder, pReq->tagCond, pReq->tagCondLen) < 0) return -1;
|
if (tEncodeBinary(&encoder, pReq->tagCond, pReq->tagCondLen) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->numIpRanges) < 0) return -1;
|
||||||
|
for (int32_t i = 0; i < pReq->numIpRanges; ++i) {
|
||||||
|
if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1;
|
||||||
|
if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1;
|
||||||
|
}
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -1451,13 +1472,23 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq
|
||||||
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->tagCond, &tagCondLen) < 0) return -1;
|
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->tagCond, &tagCondLen) < 0) return -1;
|
||||||
pReq->tagCondLen = tagCondLen;
|
pReq->tagCondLen = tagCondLen;
|
||||||
}
|
}
|
||||||
|
if (tDecodeI32(&decoder, &pReq->numIpRanges) < 0) return -1;
|
||||||
|
pReq->pIpRanges = taosMemoryMalloc(pReq->numIpRanges * sizeof(SIpV4Range));
|
||||||
|
if (pReq->pIpRanges == NULL) return -1;
|
||||||
|
for (int32_t i = 0; i < pReq->numIpRanges; ++i) {
|
||||||
|
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1;
|
||||||
|
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1;
|
||||||
|
}
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tFreeSAlterUserReq(SAlterUserReq *pReq) { taosMemoryFreeClear(pReq->tagCond); }
|
void tFreeSAlterUserReq(SAlterUserReq *pReq) {
|
||||||
|
taosMemoryFreeClear(pReq->tagCond);
|
||||||
|
taosMemoryFree(pReq->pIpRanges);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tSerializeSGetUserAuthReq(void *buf, int32_t bufLen, SGetUserAuthReq *pReq) {
|
int32_t tSerializeSGetUserAuthReq(void *buf, int32_t bufLen, SGetUserAuthReq *pReq) {
|
||||||
SEncoder encoder = {0};
|
SEncoder encoder = {0};
|
||||||
|
@ -4730,7 +4761,7 @@ int32_t tDeserializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnode
|
||||||
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
|
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!tDecodeIsEnd(&decoder)){
|
if (!tDecodeIsEnd(&decoder)) {
|
||||||
if (tDecodeI32(&decoder, &pReq->changeVersion) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->changeVersion) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5601,17 +5632,17 @@ void tFreeSSubQueryMsg(SSubQueryMsg *pReq) {
|
||||||
taosMemoryFreeClear(pReq->msg);
|
taosMemoryFreeClear(pReq->msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tSerializeSOperatorParam(SEncoder* pEncoder, SOperatorParam* pOpParam) {
|
int32_t tSerializeSOperatorParam(SEncoder *pEncoder, SOperatorParam *pOpParam) {
|
||||||
if (tEncodeI32(pEncoder, pOpParam->opType) < 0) return -1;
|
if (tEncodeI32(pEncoder, pOpParam->opType) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pOpParam->downstreamIdx) < 0) return -1;
|
if (tEncodeI32(pEncoder, pOpParam->downstreamIdx) < 0) return -1;
|
||||||
switch (pOpParam->opType) {
|
switch (pOpParam->opType) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
|
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
|
||||||
STableScanOperatorParam* pScan = (STableScanOperatorParam*)pOpParam->value;
|
STableScanOperatorParam *pScan = (STableScanOperatorParam *)pOpParam->value;
|
||||||
if (tEncodeI8(pEncoder, pScan->tableSeq) < 0) return -1;
|
if (tEncodeI8(pEncoder, pScan->tableSeq) < 0) return -1;
|
||||||
int32_t uidNum = taosArrayGetSize(pScan->pUidList);
|
int32_t uidNum = taosArrayGetSize(pScan->pUidList);
|
||||||
if (tEncodeI32(pEncoder, uidNum) < 0) return -1;
|
if (tEncodeI32(pEncoder, uidNum) < 0) return -1;
|
||||||
for (int32_t m = 0; m < uidNum; ++m) {
|
for (int32_t m = 0; m < uidNum; ++m) {
|
||||||
int64_t* pUid = taosArrayGet(pScan->pUidList, m);
|
int64_t *pUid = taosArrayGet(pScan->pUidList, m);
|
||||||
if (tEncodeI64(pEncoder, *pUid) < 0) return -1;
|
if (tEncodeI64(pEncoder, *pUid) < 0) return -1;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -5619,25 +5650,25 @@ int32_t tSerializeSOperatorParam(SEncoder* pEncoder, SOperatorParam* pOpParam) {
|
||||||
default:
|
default:
|
||||||
return TSDB_CODE_INVALID_PARA;
|
return TSDB_CODE_INVALID_PARA;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t n = taosArrayGetSize(pOpParam->pChildren);
|
int32_t n = taosArrayGetSize(pOpParam->pChildren);
|
||||||
if (tEncodeI32(pEncoder, n) < 0) return -1;
|
if (tEncodeI32(pEncoder, n) < 0) return -1;
|
||||||
for (int32_t i = 0; i < n; ++i) {
|
for (int32_t i = 0; i < n; ++i) {
|
||||||
SOperatorParam* pChild = *(SOperatorParam**)taosArrayGet(pOpParam->pChildren, i);
|
SOperatorParam *pChild = *(SOperatorParam **)taosArrayGet(pOpParam->pChildren, i);
|
||||||
if (tSerializeSOperatorParam(pEncoder, pChild) < 0) return -1;
|
if (tSerializeSOperatorParam(pEncoder, pChild) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam* pOpParam) {
|
int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam) {
|
||||||
if (tDecodeI32(pDecoder, &pOpParam->opType) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pOpParam->opType) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pOpParam->downstreamIdx) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pOpParam->downstreamIdx) < 0) return -1;
|
||||||
switch (pOpParam->opType) {
|
switch (pOpParam->opType) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
|
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
|
||||||
STableScanOperatorParam* pScan = taosMemoryMalloc(sizeof(STableScanOperatorParam));
|
STableScanOperatorParam *pScan = taosMemoryMalloc(sizeof(STableScanOperatorParam));
|
||||||
if (NULL == pScan) return -1;
|
if (NULL == pScan) return -1;
|
||||||
if (tDecodeI8(pDecoder, (int8_t*)&pScan->tableSeq) < 0) return -1;
|
if (tDecodeI8(pDecoder, (int8_t *)&pScan->tableSeq) < 0) return -1;
|
||||||
int32_t uidNum = 0;
|
int32_t uidNum = 0;
|
||||||
int64_t uid = 0;
|
int64_t uid = 0;
|
||||||
if (tDecodeI32(pDecoder, &uidNum) < 0) return -1;
|
if (tDecodeI32(pDecoder, &uidNum) < 0) return -1;
|
||||||
|
@ -5664,7 +5695,7 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam* pOpParam)
|
||||||
pOpParam->pChildren = taosArrayInit(childrenNum, POINTER_BYTES);
|
pOpParam->pChildren = taosArrayInit(childrenNum, POINTER_BYTES);
|
||||||
if (NULL == pOpParam->pChildren) return -1;
|
if (NULL == pOpParam->pChildren) return -1;
|
||||||
for (int32_t i = 0; i < childrenNum; ++i) {
|
for (int32_t i = 0; i < childrenNum; ++i) {
|
||||||
SOperatorParam* pChild = taosMemoryCalloc(1, sizeof(SOperatorParam));
|
SOperatorParam *pChild = taosMemoryCalloc(1, sizeof(SOperatorParam));
|
||||||
if (NULL == pChild) return -1;
|
if (NULL == pChild) return -1;
|
||||||
if (tDeserializeSOperatorParam(pDecoder, pChild) < 0) return -1;
|
if (tDeserializeSOperatorParam(pDecoder, pChild) < 0) return -1;
|
||||||
taosArrayPush(pOpParam->pChildren, &pChild);
|
taosArrayPush(pOpParam->pChildren, &pChild);
|
||||||
|
@ -5676,7 +5707,6 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam* pOpParam)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
|
int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
|
||||||
int32_t headLen = sizeof(SMsgHead);
|
int32_t headLen = sizeof(SMsgHead);
|
||||||
if (buf != NULL) {
|
if (buf != NULL) {
|
||||||
|
@ -5737,7 +5767,7 @@ int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq)
|
||||||
if (NULL == pReq->pOpParam) return -1;
|
if (NULL == pReq->pOpParam) return -1;
|
||||||
if (tDeserializeSOperatorParam(&decoder, pReq->pOpParam) < 0) return -1;
|
if (tDeserializeSOperatorParam(&decoder, pReq->pOpParam) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -5925,7 +5955,7 @@ int32_t tDeserializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pR
|
||||||
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->refId) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->refId) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pReq->execId) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->execId) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, (int32_t*)&pReq->type) < 0) return -1;
|
if (tDecodeI32(&decoder, (int32_t *)&pReq->type) < 0) return -1;
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
|
@ -5933,7 +5963,6 @@ int32_t tDeserializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pR
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
||||||
SEncoder encoder = {0};
|
SEncoder encoder = {0};
|
||||||
tEncoderInit(&encoder, buf, bufLen);
|
tEncoderInit(&encoder, buf, bufLen);
|
||||||
|
@ -5951,7 +5980,7 @@ int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp
|
||||||
if (tEncodeI32(&encoder, pVer->tversion) < 0) return -1;
|
if (tEncodeI32(&encoder, pVer->tversion) < 0) return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -5979,7 +6008,7 @@ int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pR
|
||||||
if (tDecodeI32(&decoder, &tbVer.tversion) < 0) return -1;
|
if (tDecodeI32(&decoder, &tbVer.tversion) < 0) return -1;
|
||||||
if (NULL == taosArrayPush(pRsp->tbVerInfo, &tbVer)) return -1;
|
if (NULL == taosArrayPush(pRsp->tbVerInfo, &tbVer)) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
|
@ -91,6 +91,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
||||||
req.clusterCfg.statusInterval = tsStatusInterval;
|
req.clusterCfg.statusInterval = tsStatusInterval;
|
||||||
req.clusterCfg.checkTime = 0;
|
req.clusterCfg.checkTime = 0;
|
||||||
req.clusterCfg.ttlChangeOnWrite = tsTtlChangeOnWrite;
|
req.clusterCfg.ttlChangeOnWrite = tsTtlChangeOnWrite;
|
||||||
|
req.clusterCfg.enableWhiteList = tsEnableWhiteList ? 1 : 0;
|
||||||
char timestr[32] = "1970-01-01 00:00:00.00";
|
char timestr[32] = "1970-01-01 00:00:00.00";
|
||||||
(void)taosParseTime(timestr, &req.clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
(void)taosParseTime(timestr, &req.clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
||||||
memcpy(req.clusterCfg.timezone, tsTimezoneStr, TD_TIMEZONE_LEN);
|
memcpy(req.clusterCfg.timezone, tsTimezoneStr, TD_TIMEZONE_LEN);
|
||||||
|
|
|
@ -136,6 +136,7 @@ typedef enum {
|
||||||
DND_REASON_LOCALE_NOT_MATCH,
|
DND_REASON_LOCALE_NOT_MATCH,
|
||||||
DND_REASON_CHARSET_NOT_MATCH,
|
DND_REASON_CHARSET_NOT_MATCH,
|
||||||
DND_REASON_TTL_CHANGE_ON_WRITE_NOT_MATCH,
|
DND_REASON_TTL_CHANGE_ON_WRITE_NOT_MATCH,
|
||||||
|
DND_REASON_ENABLE_WHITELIST_NOT_MATCH,
|
||||||
DND_REASON_OTHERS
|
DND_REASON_OTHERS
|
||||||
} EDndReason;
|
} EDndReason;
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "mndDnode.h"
|
#include "mndDnode.h"
|
||||||
|
#include "audit.h"
|
||||||
|
#include "mndCluster.h"
|
||||||
#include "mndDb.h"
|
#include "mndDb.h"
|
||||||
#include "mndMnode.h"
|
#include "mndMnode.h"
|
||||||
#include "mndPrivilege.h"
|
#include "mndPrivilege.h"
|
||||||
|
@ -25,8 +27,6 @@
|
||||||
#include "mndUser.h"
|
#include "mndUser.h"
|
||||||
#include "mndVgroup.h"
|
#include "mndVgroup.h"
|
||||||
#include "tmisce.h"
|
#include "tmisce.h"
|
||||||
#include "mndCluster.h"
|
|
||||||
#include "audit.h"
|
|
||||||
|
|
||||||
#define TSDB_DNODE_VER_NUMBER 2
|
#define TSDB_DNODE_VER_NUMBER 2
|
||||||
#define TSDB_DNODE_RESERVE_SIZE 64
|
#define TSDB_DNODE_RESERVE_SIZE 64
|
||||||
|
@ -421,6 +421,11 @@ static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const S
|
||||||
tsTtlChangeOnWrite);
|
tsTtlChangeOnWrite);
|
||||||
return DND_REASON_TTL_CHANGE_ON_WRITE_NOT_MATCH;
|
return DND_REASON_TTL_CHANGE_ON_WRITE_NOT_MATCH;
|
||||||
}
|
}
|
||||||
|
int8_t enable = tsEnableWhiteList ? 1 : 0;
|
||||||
|
if (pCfg->enableWhiteList != enable) {
|
||||||
|
mError("dnode:%d, enable :%d inconsistent with cluster:%d", pDnode->id, pCfg->enableWhiteList, enable);
|
||||||
|
return DND_REASON_ENABLE_WHITELIST_NOT_MATCH;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -926,14 +931,10 @@ _OVER:
|
||||||
|
|
||||||
extern int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq);
|
extern int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq);
|
||||||
|
|
||||||
int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq){
|
int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq) { return mndProcessRestoreDnodeReqImpl(pReq); }
|
||||||
return mndProcessRestoreDnodeReqImpl(pReq);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef TD_ENTERPRISE
|
#ifndef TD_ENTERPRISE
|
||||||
int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq){
|
int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq) { return 0; }
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMnodeObj *pMObj, SQnodeObj *pQObj,
|
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMnodeObj *pMObj, SQnodeObj *pQObj,
|
||||||
|
@ -1004,15 +1005,14 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
mInfo("dnode:%d, start to drop, ep:%s:%d, force:%s, unsafe:%s",
|
mInfo("dnode:%d, start to drop, ep:%s:%d, force:%s, unsafe:%s", dropReq.dnodeId, dropReq.fqdn, dropReq.port,
|
||||||
dropReq.dnodeId, dropReq.fqdn, dropReq.port, dropReq.force?"true":"false", dropReq.unsafe?"true":"false");
|
dropReq.force ? "true" : "false", dropReq.unsafe ? "true" : "false");
|
||||||
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
|
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool force = dropReq.force;
|
bool force = dropReq.force;
|
||||||
if(dropReq.unsafe)
|
if (dropReq.unsafe) {
|
||||||
{
|
|
||||||
force = true;
|
force = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1043,19 +1043,19 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
|
int32_t numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
|
||||||
bool isonline = mndIsDnodeOnline(pDnode, taosGetTimestampMs());
|
bool isonline = mndIsDnodeOnline(pDnode, taosGetTimestampMs());
|
||||||
|
|
||||||
if (isonline && force) {
|
if (isonline && force) {
|
||||||
terrno = TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE;
|
terrno = TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE;
|
||||||
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
|
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
|
||||||
numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
|
numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isonline && !force) {
|
if (!isonline && !force) {
|
||||||
terrno = TSDB_CODE_DNODE_OFFLINE;
|
terrno = TSDB_CODE_DNODE_OFFLINE;
|
||||||
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
|
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
|
||||||
numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
|
numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1065,8 +1065,8 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
|
||||||
char obj1[30] = {0};
|
char obj1[30] = {0};
|
||||||
sprintf(obj1, "%d", dropReq.dnodeId);
|
sprintf(obj1, "%d", dropReq.dnodeId);
|
||||||
|
|
||||||
//char obj2[150] = {0};
|
// char obj2[150] = {0};
|
||||||
//sprintf(obj2, "%s:%d", dropReq.fqdn, dropReq.port);
|
// sprintf(obj2, "%s:%d", dropReq.fqdn, dropReq.port);
|
||||||
|
|
||||||
char detail[100] = {0};
|
char detail[100] = {0};
|
||||||
sprintf(detail, "force:%d, unsafe:%d", dropReq.force, dropReq.unsafe);
|
sprintf(detail, "force:%d, unsafe:%d", dropReq.force, dropReq.unsafe);
|
||||||
|
@ -1164,8 +1164,8 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
||||||
if (code < 0) return code;
|
if (code < 0) return code;
|
||||||
|
|
||||||
if (flag < 0) {
|
if (flag < 0) {
|
||||||
mError("dnode:%d, failed to config ttlBatchDropNum since value:%d. Valid range: [0, %d]", cfgReq.dnodeId,
|
mError("dnode:%d, failed to config ttlBatchDropNum since value:%d. Valid range: [0, %d]", cfgReq.dnodeId, flag,
|
||||||
flag, INT32_MAX);
|
INT32_MAX);
|
||||||
terrno = TSDB_CODE_INVALID_CFG;
|
terrno = TSDB_CODE_INVALID_CFG;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1255,12 +1255,12 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
|
||||||
sprintf(detail, "colVer:%d, delay1:%" PRId64 ", delay2:%" PRId64 ", deleteMark1:%" PRId64 ", "
|
sprintf(detail, "colVer:%d, delay1:%" PRId64 ", delay2:%" PRId64 ", deleteMark1:%" PRId64 ", "
|
||||||
"deleteMark2:%" PRId64 ", igExists:%d, numOfColumns:%d, numOfFuncs:%d, numOfTags:%d, "
|
"deleteMark2:%" PRId64 ", igExists:%d, numOfColumns:%d, numOfFuncs:%d, numOfTags:%d, "
|
||||||
"source:%d, suid:%" PRId64 ", tagVer:%d, ttl:%d, "
|
"source:%d, suid:%" PRId64 ", tagVer:%d, ttl:%d, "
|
||||||
"watermark1:%" PRId64 ", watermark2:%" PRId64,
|
"watermark1:%" PRId64 ", watermark2:%" PRId64,
|
||||||
createReq.colVer, createReq.delay1, createReq.delay2, createReq.deleteMark1,
|
createReq.colVer, createReq.delay1, createReq.delay2, createReq.deleteMark1,
|
||||||
createReq.deleteMark2, createReq.igExists, createReq.numOfColumns, createReq.numOfFuncs, createReq.numOfTags,
|
createReq.deleteMark2, createReq.igExists, createReq.numOfColumns, createReq.numOfFuncs, createReq.numOfTags,
|
||||||
createReq.source, createReq.suid, createReq.tagVer, createReq.ttl,
|
createReq.source, createReq.suid, createReq.tagVer, createReq.ttl,
|
||||||
createReq.watermark1, createReq.watermark2);
|
createReq.watermark1, createReq.watermark2);
|
||||||
|
|
||||||
mndAuditFieldStr(detail, createReq.pColumns, createReq.numOfColumns, AUDIT_DETAIL_MAX);
|
mndAuditFieldStr(detail, createReq.pColumns, createReq.numOfColumns, AUDIT_DETAIL_MAX);
|
||||||
mndAuditFieldStr(detail, createReq.pTags, createReq.numOfTags, AUDIT_DETAIL_MAX);
|
mndAuditFieldStr(detail, createReq.pTags, createReq.numOfTags, AUDIT_DETAIL_MAX);
|
||||||
|
|
||||||
|
@ -2610,7 +2610,7 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
|
||||||
char detail[2000] = {0};
|
char detail[2000] = {0};
|
||||||
sprintf(detail, "igNotExists:%d, source:%d" ,
|
sprintf(detail, "igNotExists:%d, source:%d" ,
|
||||||
dropReq.igNotExists, dropReq.source);
|
dropReq.igNotExists, dropReq.source);
|
||||||
|
|
||||||
SName name = {0};
|
SName name = {0};
|
||||||
tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||||
|
|
||||||
|
|
|
@ -868,6 +868,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
|
|
||||||
taosThreadMutexLock(&execNodeList.lock);
|
taosThreadMutexLock(&execNodeList.lock);
|
||||||
|
mDebug("register to stream task node list");
|
||||||
keepStreamTasksInBuf(&streamObj, &execNodeList);
|
keepStreamTasksInBuf(&streamObj, &execNodeList);
|
||||||
taosThreadMutexUnlock(&execNodeList.lock);
|
taosThreadMutexUnlock(&execNodeList.lock);
|
||||||
|
|
||||||
|
@ -876,13 +877,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
||||||
char detail[2000] = {0};
|
char detail[2000] = {0};
|
||||||
sprintf(detail,
|
sprintf(detail,
|
||||||
"checkpointFreq:%" PRId64 ", createStb:%d, deleteMark:%" PRId64
|
"checkpointFreq:%" PRId64 ", createStb:%d, deleteMark:%" PRId64
|
||||||
", "
|
", fillHistory:%d, igExists:%d, igExpired:%d, igUpdate:%d, lastTs:%" PRId64 ", maxDelay:%" PRId64
|
||||||
"fillHistory:%d, igExists:%d, "
|
", numOfTags:%d, sourceDB:%s, targetStbFullName:%s, triggerType:%d, watermark:%" PRId64,
|
||||||
"igExpired:%d, igUpdate:%d, lastTs:%" PRId64
|
|
||||||
", "
|
|
||||||
"maxDelay:%" PRId64
|
|
||||||
", numOfTags:%d, sourceDB:%s, "
|
|
||||||
"targetStbFullName:%s, triggerType:%d, watermark:%" PRId64,
|
|
||||||
createStreamReq.checkpointFreq, createStreamReq.createStb, createStreamReq.deleteMark,
|
createStreamReq.checkpointFreq, createStreamReq.createStb, createStreamReq.deleteMark,
|
||||||
createStreamReq.fillHistory, createStreamReq.igExists, createStreamReq.igExpired, createStreamReq.igUpdate,
|
createStreamReq.fillHistory, createStreamReq.igExists, createStreamReq.igExpired, createStreamReq.igUpdate,
|
||||||
createStreamReq.lastTs, createStreamReq.maxDelay, createStreamReq.numOfTags, createStreamReq.sourceDB,
|
createStreamReq.lastTs, createStreamReq.maxDelay, createStreamReq.numOfTags, createStreamReq.sourceDB,
|
||||||
|
@ -1579,8 +1575,8 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
||||||
} else if (taskStatus == TASK_STATUS__DROPPING) {
|
} else if (taskStatus == TASK_STATUS__DROPPING) {
|
||||||
memcpy(varDataVal(status), "dropping", 8);
|
memcpy(varDataVal(status), "dropping", 8);
|
||||||
varDataSetLen(status, 8);
|
varDataSetLen(status, 8);
|
||||||
} else if (taskStatus == TASK_STATUS__FAIL) {
|
} else if (taskStatus == TASK_STATUS__UNINIT) {
|
||||||
memcpy(varDataVal(status), "fail", 4);
|
memcpy(varDataVal(status), "uninit", 6);
|
||||||
varDataSetLen(status, 4);
|
varDataSetLen(status, 4);
|
||||||
} else if (taskStatus == TASK_STATUS__STOP) {
|
} else if (taskStatus == TASK_STATUS__STOP) {
|
||||||
memcpy(varDataVal(status), "stop", 4);
|
memcpy(varDataVal(status), "stop", 4);
|
||||||
|
@ -2021,14 +2017,11 @@ static int32_t createStreamUpdateTrans(SMnode *pMnode, SStreamObj *pStream, SVgr
|
||||||
|
|
||||||
static bool isNodeEpsetChanged(const SEpSet *pPrevEpset, const SEpSet *pCurrent) {
|
static bool isNodeEpsetChanged(const SEpSet *pPrevEpset, const SEpSet *pCurrent) {
|
||||||
const SEp *pEp = GET_ACTIVE_EP(pPrevEpset);
|
const SEp *pEp = GET_ACTIVE_EP(pPrevEpset);
|
||||||
|
const SEp* p = GET_ACTIVE_EP(pCurrent);
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCurrent->numOfEps; ++i) {
|
if (pEp->port == p->port && strncmp(pEp->fqdn, p->fqdn, TSDB_FQDN_LEN) == 0) {
|
||||||
const SEp *p = &(pCurrent->eps[i]);
|
return false;
|
||||||
if (pEp->port == p->port && strncmp(pEp->fqdn, p->fqdn, TSDB_FQDN_LEN) == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2125,6 +2118,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
|
||||||
mDebug("stream:0x%" PRIx64 " involved node changed, create update trans", pStream->uid);
|
mDebug("stream:0x%" PRIx64 " involved node changed, create update trans", pStream->uid);
|
||||||
int32_t code = createStreamUpdateTrans(pMnode, pStream, pChangeInfo);
|
int32_t code = createStreamUpdateTrans(pMnode, pStream, pChangeInfo);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
sdbCancelFetch(pSdb, pIter);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2228,18 +2222,22 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
||||||
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
||||||
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
|
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
|
||||||
code = mndProcessVgroupChange(pMnode, &changeInfo);
|
code = mndProcessVgroupChange(pMnode, &changeInfo);
|
||||||
|
|
||||||
|
// keep the new vnode snapshot
|
||||||
|
if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
|
mDebug("create trans successfully, update cached node list");
|
||||||
|
taosArrayDestroy(execNodeList.pNodeEntryList);
|
||||||
|
execNodeList.pNodeEntryList = pNodeSnapshot;
|
||||||
|
execNodeList.ts = ts;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mDebug("no update found in nodeList");
|
||||||
|
taosArrayDestroy(pNodeSnapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(changeInfo.pUpdateNodeList);
|
taosArrayDestroy(changeInfo.pUpdateNodeList);
|
||||||
taosHashCleanup(changeInfo.pDBMap);
|
taosHashCleanup(changeInfo.pDBMap);
|
||||||
|
|
||||||
// keep the new vnode snapshot
|
|
||||||
if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
|
||||||
taosArrayDestroy(execNodeList.pNodeEntryList);
|
|
||||||
execNodeList.pNodeEntryList = pNodeSnapshot;
|
|
||||||
execNodeList.ts = ts;
|
|
||||||
}
|
|
||||||
|
|
||||||
mDebug("end to do stream task node change checking");
|
mDebug("end to do stream task node change checking");
|
||||||
atomic_store_32(&mndNodeCheckSentinel, 0);
|
atomic_store_32(&mndNodeCheckSentinel, 0);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2289,7 +2287,6 @@ static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *p
|
||||||
// todo: this process should be executed by the write queue worker of the mnode
|
// todo: this process should be executed by the write queue worker of the mnode
|
||||||
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||||
SMnode *pMnode = pReq->info.node;
|
SMnode *pMnode = pReq->info.node;
|
||||||
|
|
||||||
SStreamHbMsg req = {0};
|
SStreamHbMsg req = {0};
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
@ -2314,10 +2311,13 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < req.numOfTasks; ++i) {
|
for (int32_t i = 0; i < req.numOfTasks; ++i) {
|
||||||
STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i);
|
STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i);
|
||||||
int64_t k[2] = {p->streamId, p->taskId};
|
int64_t k[2] = {p->streamId, p->taskId};
|
||||||
int32_t index = *(int32_t *)taosHashGet(execNodeList.pTaskMap, &k, sizeof(k));
|
int32_t *index = taosHashGet(execNodeList.pTaskMap, &k, sizeof(k));
|
||||||
|
if (index == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, index);
|
STaskStatusEntry *pStatusEntry = taosArrayGet(execNodeList.pTaskList, *index);
|
||||||
pStatusEntry->status = p->status;
|
pStatusEntry->status = p->status;
|
||||||
if (p->status != TASK_STATUS__NORMAL) {
|
if (p->status != TASK_STATUS__NORMAL) {
|
||||||
mDebug("received s-task:0x%x not in ready status:%s", p->taskId, streamGetTaskStatusStr(p->status));
|
mDebug("received s-task:0x%x not in ready status:%s", p->taskId, streamGetTaskStatusStr(p->status));
|
||||||
|
|
|
@ -642,7 +642,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
|
||||||
SName tableName = {0};
|
SName tableName = {0};
|
||||||
tNameFromString(&tableName, createTopicReq.subStbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
tNameFromString(&tableName, createTopicReq.subStbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||||
|
|
||||||
sprintf(detail, "igExists:%d, subStbName:%s, subType:%d, withMeta:%d, sql:%s",
|
sprintf(detail, "igExists:%d, subStbName:%s, subType:%d, withMeta:%d, sql:%s",
|
||||||
createTopicReq.igExists, tableName.tname, createTopicReq.subType, createTopicReq.withMeta, sql);
|
createTopicReq.igExists, tableName.tname, createTopicReq.subType, createTopicReq.withMeta, sql);
|
||||||
|
|
||||||
SName dbname = {0};
|
SName dbname = {0};
|
||||||
|
|
|
@ -657,7 +657,7 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) {
|
||||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
|
||||||
char detail[1000] = {0};
|
char detail[1000] = {0};
|
||||||
sprintf(detail, "createType:%d, enable:%d, superUser:%d, sysInfo:%d",
|
sprintf(detail, "createType:%d, enable:%d, superUser:%d, sysInfo:%d",
|
||||||
createReq.createType, createReq.enable, createReq.superUser, createReq.sysInfo);
|
createReq.createType, createReq.enable, createReq.superUser, createReq.sysInfo);
|
||||||
|
|
||||||
auditRecord(pReq, pMnode->clusterId, "createUser", createReq.user, "", detail);
|
auditRecord(pReq, pMnode->clusterId, "createUser", createReq.user, "", detail);
|
||||||
|
@ -669,7 +669,7 @@ _OVER:
|
||||||
|
|
||||||
mndReleaseUser(pMnode, pUser);
|
mndReleaseUser(pMnode, pUser);
|
||||||
mndReleaseUser(pMnode, pOperUser);
|
mndReleaseUser(pMnode, pOperUser);
|
||||||
|
tFreeSCreateUserReq(&createReq);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1039,16 +1039,16 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
|
||||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
|
||||||
char detail[1000] = {0};
|
char detail[1000] = {0};
|
||||||
sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:",
|
sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:",
|
||||||
mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo, alterReq.tabName);
|
mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo, alterReq.tabName);
|
||||||
|
|
||||||
if(alterReq.alterType == TSDB_ALTER_USER_PASSWD){
|
if(alterReq.alterType == TSDB_ALTER_USER_PASSWD){
|
||||||
sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:xxx",
|
sprintf(detail, "alterType:%s, enable:%d, superUser:%d, sysInfo:%d, tabName:%s, password:xxx",
|
||||||
mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo,
|
mndUserAuditTypeStr(alterReq.alterType), alterReq.enable, alterReq.superUser, alterReq.sysInfo,
|
||||||
alterReq.tabName);
|
alterReq.tabName);
|
||||||
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail);
|
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail);
|
||||||
}
|
}
|
||||||
else if(alterReq.alterType == TSDB_ALTER_USER_SUPERUSER ||
|
else if(alterReq.alterType == TSDB_ALTER_USER_SUPERUSER ||
|
||||||
alterReq.alterType == TSDB_ALTER_USER_ENABLE ||
|
alterReq.alterType == TSDB_ALTER_USER_ENABLE ||
|
||||||
alterReq.alterType == TSDB_ALTER_USER_SYSINFO){
|
alterReq.alterType == TSDB_ALTER_USER_SYSINFO){
|
||||||
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail);
|
auditRecord(pReq, pMnode->clusterId, "alterUser", alterReq.user, "", detail);
|
||||||
|
|
|
@ -165,6 +165,7 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
|
||||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
||||||
int32_t tqScanWal(STQ* pTq);
|
int32_t tqScanWal(STQ* pTq);
|
||||||
int32_t tqCheckAndRunStreamTask(STQ* pTq);
|
int32_t tqCheckAndRunStreamTask(STQ* pTq);
|
||||||
|
int32_t tqStartStreamTasks(STQ* pTq);
|
||||||
int32_t tqStopStreamTasks(STQ* pTq);
|
int32_t tqStopStreamTasks(STQ* pTq);
|
||||||
|
|
||||||
// tq util
|
// tq util
|
||||||
|
|
|
@ -1429,7 +1429,7 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
||||||
}
|
}
|
||||||
|
|
||||||
int8_t status = pTask->status.taskStatus;
|
int8_t status = pTask->status.taskStatus;
|
||||||
if (status == TASK_STATUS__NORMAL || status == TASK_STATUS__SCAN_HISTORY) {
|
if (status == TASK_STATUS__NORMAL || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) {
|
||||||
// no lock needs to secure the access of the version
|
// no lock needs to secure the access of the version
|
||||||
if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) {
|
if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) {
|
||||||
// discard all the data when the stream task is suspended.
|
// discard all the data when the stream task is suspended.
|
||||||
|
@ -1714,20 +1714,47 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
|
||||||
tqDebug("s-task:%s receive task nodeEp update msg from mnode", pTask->id.idStr);
|
tqDebug("s-task:%s receive task nodeEp update msg from mnode", pTask->id.idStr);
|
||||||
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
||||||
|
streamSetStatusNormal(pTask);
|
||||||
|
|
||||||
|
SStreamTask** ppHTask = NULL;
|
||||||
|
if (pTask->historyTaskId.taskId != 0) {
|
||||||
|
keys[0] = pTask->historyTaskId.streamId;
|
||||||
|
keys[1] = pTask->historyTaskId.taskId;
|
||||||
|
|
||||||
|
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||||
|
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||||
|
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||||
|
pMeta->vgId, req.taskId);
|
||||||
|
} else {
|
||||||
|
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||||
|
streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
streamSetStatusNormal(pTask);
|
|
||||||
streamMetaSaveTask(pMeta, pTask);
|
streamMetaSaveTask(pMeta, pTask);
|
||||||
|
if (ppHTask != NULL) {
|
||||||
|
streamMetaSaveTask(pMeta, *ppHTask);
|
||||||
|
}
|
||||||
|
|
||||||
if (streamMetaCommit(pMeta) < 0) {
|
if (streamMetaCommit(pMeta) < 0) {
|
||||||
// persist to disk
|
// persist to disk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamTaskStop(pTask);
|
streamTaskStop(pTask);
|
||||||
|
if (ppHTask != NULL) {
|
||||||
|
streamTaskStop(*ppHTask);
|
||||||
|
}
|
||||||
|
|
||||||
tqDebug("s-task:%s task nodeEp update completed", pTask->id.idStr);
|
tqDebug("s-task:%s task nodeEp update completed", pTask->id.idStr);
|
||||||
|
|
||||||
pMeta->closedTask += 1;
|
pMeta->closedTask += 1;
|
||||||
|
if (ppHTask != NULL) {
|
||||||
|
pMeta->closedTask += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// possibly only handle the stream task.
|
||||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||||
allStopped = (pMeta->closedTask == numOfTasks);
|
allStopped = (pMeta->closedTask == numOfTasks);
|
||||||
if (allStopped) {
|
if (allStopped) {
|
||||||
|
@ -1766,6 +1793,7 @@ _end:
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||||
vInfo("vgId:%d, restart all stream tasks", vgId);
|
vInfo("vgId:%d, restart all stream tasks", vgId);
|
||||||
|
tqStartStreamTasks(pTq);
|
||||||
tqCheckAndRunStreamTaskAsync(pTq);
|
tqCheckAndRunStreamTaskAsync(pTq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,6 +224,35 @@ int32_t tqStopStreamTasks(STQ* pTq) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tqStartStreamTasks(STQ* pTq) {
|
||||||
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
|
int32_t vgId = TD_VID(pTq->pVnode);
|
||||||
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
|
|
||||||
|
tqDebug("vgId:%d start to stop all %d stream task(s)", vgId, numOfTasks);
|
||||||
|
|
||||||
|
if (numOfTasks == 0) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
|
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
|
|
||||||
|
int64_t key[2] = {pTaskId->streamId, pTaskId->taskId};
|
||||||
|
SStreamTask** pTask = taosHashGet(pMeta->pTasks, key, sizeof(key));
|
||||||
|
|
||||||
|
int8_t status = (*pTask)->status.taskStatus;
|
||||||
|
if (status == TASK_STATUS__STOP) {
|
||||||
|
streamSetStatusNormal(*pTask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
||||||
// seek the stored version and extract data from WAL
|
// seek the stored version and extract data from WAL
|
||||||
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
||||||
|
@ -383,6 +412,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
|
if (pItem != NULL) {
|
||||||
|
streamFreeQitem(pItem);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -425,6 +425,20 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
|
||||||
if (code) goto _exit;
|
if (code) goto _exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pWriter->pStreamTaskWriter) {
|
||||||
|
code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback);
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pWriter->pStreamStateWriter) {
|
||||||
|
code = streamStateSnapWriterClose(pWriter->pStreamStateWriter, rollback);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
code = streamStateRebuildFromSnap(pWriter->pStreamStateWriter, 0);
|
||||||
|
pWriter->pStreamStateWriter = NULL;
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
|
||||||
if (pWriter->pRsmaSnapWriter) {
|
if (pWriter->pRsmaSnapWriter) {
|
||||||
code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
|
code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
|
||||||
if (code) goto _exit;
|
if (code) goto _exit;
|
||||||
|
|
|
@ -560,6 +560,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
||||||
vInfo("vgId:%d, not launch stream tasks, since stream tasks are disabled", vgId);
|
vInfo("vgId:%d, not launch stream tasks, since stream tasks are disabled", vgId);
|
||||||
} else {
|
} else {
|
||||||
vInfo("vgId:%d start to launch stream tasks", pVnode->config.vgId);
|
vInfo("vgId:%d start to launch stream tasks", pVnode->config.vgId);
|
||||||
|
tqStartStreamTasks(pVnode->pTq);
|
||||||
tqCheckAndRunStreamTaskAsync(pVnode->pTq);
|
tqCheckAndRunStreamTaskAsync(pVnode->pTq);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -924,8 +924,17 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
nodesDestroyNode((SNode*)pStmt->pVal);
|
nodesDestroyNode((SNode*)pStmt->pVal);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case QUERY_NODE_CREATE_USER_STMT: // no pointer field
|
case QUERY_NODE_CREATE_USER_STMT: {
|
||||||
case QUERY_NODE_ALTER_USER_STMT: // no pointer field
|
SCreateUserStmt* pStmt = (SCreateUserStmt*)pNode;
|
||||||
|
taosMemoryFree(pStmt->pIpRanges);
|
||||||
|
nodesDestroyList(pStmt->pNodeListIpRanges);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case QUERY_NODE_ALTER_USER_STMT: {
|
||||||
|
SAlterUserStmt* pStmt = (SAlterUserStmt*)pNode;
|
||||||
|
taosMemoryFree(pStmt->pIpRanges);
|
||||||
|
nodesDestroyList(pStmt->pNodeListIpRanges);
|
||||||
|
}
|
||||||
case QUERY_NODE_DROP_USER_STMT: // no pointer field
|
case QUERY_NODE_DROP_USER_STMT: // no pointer field
|
||||||
case QUERY_NODE_USE_DATABASE_STMT: // no pointer field
|
case QUERY_NODE_USE_DATABASE_STMT: // no pointer field
|
||||||
case QUERY_NODE_CREATE_DNODE_STMT: // no pointer field
|
case QUERY_NODE_CREATE_DNODE_STMT: // no pointer field
|
||||||
|
|
|
@ -192,7 +192,8 @@ SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SN
|
||||||
SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint);
|
SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint);
|
||||||
SNode* createShowTableTagsStmt(SAstCreateContext* pCxt, SNode* pTbName, SNode* pDbName, SNodeList* pTags);
|
SNode* createShowTableTagsStmt(SAstCreateContext* pCxt, SNode* pTbName, SNode* pDbName, SNodeList* pTags);
|
||||||
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo);
|
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo);
|
||||||
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal);
|
SNode* addCreateUserStmtWhiteList(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pIpRangesNodeList);
|
||||||
|
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, void* pAlterInfo);
|
||||||
SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName);
|
SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName);
|
||||||
SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const SToken* pPort);
|
SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const SToken* pPort);
|
||||||
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool force, bool unsafe);
|
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, bool force, bool unsafe);
|
||||||
|
|
|
@ -81,11 +81,31 @@ alter_account_option ::= USERS literal.
|
||||||
alter_account_option ::= CONNS literal. { }
|
alter_account_option ::= CONNS literal. { }
|
||||||
alter_account_option ::= STATE literal. { }
|
alter_account_option ::= STATE literal. { }
|
||||||
|
|
||||||
|
%type ip_range_list { SNodeList* }
|
||||||
|
%destructor ip_range_list { nodesDestroyList($$); }
|
||||||
|
ip_range_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); }
|
||||||
|
ip_range_list(A) ::= ip_range_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); }
|
||||||
|
|
||||||
|
%type white_list { SNodeList* }
|
||||||
|
%destructor white_list { nodesDestroyList($$); }
|
||||||
|
white_list(A) ::= HOST ip_range_list(B). { A = B; }
|
||||||
|
|
||||||
|
%type white_list_opt { SNodeList* }
|
||||||
|
%destructor white_list_opt { nodesDestroyList($$); }
|
||||||
|
white_list_opt(A) ::= . { A = NULL; }
|
||||||
|
white_list_opt(A) ::= white_list(B). { A = B; }
|
||||||
|
|
||||||
/************************************************ create/alter/drop user **********************************************/
|
/************************************************ create/alter/drop user **********************************************/
|
||||||
cmd ::= CREATE USER user_name(A) PASS NK_STRING(B) sysinfo_opt(C). { pCxt->pRootNode = createCreateUserStmt(pCxt, &A, &B, C); }
|
cmd ::= CREATE USER user_name(A) PASS NK_STRING(B) sysinfo_opt(C)
|
||||||
|
white_list_opt(D). {
|
||||||
|
pCxt->pRootNode = createCreateUserStmt(pCxt, &A, &B, C);
|
||||||
|
pCxt->pRootNode = addCreateUserStmtWhiteList(pCxt, pCxt->pRootNode, D);
|
||||||
|
}
|
||||||
cmd ::= ALTER USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PASSWD, &B); }
|
cmd ::= ALTER USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PASSWD, &B); }
|
||||||
cmd ::= ALTER USER user_name(A) ENABLE NK_INTEGER(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_ENABLE, &B); }
|
cmd ::= ALTER USER user_name(A) ENABLE NK_INTEGER(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_ENABLE, &B); }
|
||||||
cmd ::= ALTER USER user_name(A) SYSINFO NK_INTEGER(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_SYSINFO, &B); }
|
cmd ::= ALTER USER user_name(A) SYSINFO NK_INTEGER(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_SYSINFO, &B); }
|
||||||
|
cmd ::= ALTER USER user_name(A) ADD white_list(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_ADD_WHITE_LIST, B); }
|
||||||
|
cmd ::= ALTER USER user_name(A) DROP white_list(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_DROP_WHITE_LIST, B); }
|
||||||
cmd ::= DROP USER user_name(A). { pCxt->pRootNode = createDropUserStmt(pCxt, &A); }
|
cmd ::= DROP USER user_name(A). { pCxt->pRootNode = createDropUserStmt(pCxt, &A); }
|
||||||
|
|
||||||
%type sysinfo_opt { int8_t }
|
%type sysinfo_opt { int8_t }
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
* You should have received a copy of the GNU Affero General Public License
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
#include <uv.h>
|
||||||
#include <regex.h>
|
#include <regex.h>
|
||||||
|
|
||||||
#include "parAst.h"
|
#include "parAst.h"
|
||||||
|
@ -1653,6 +1653,86 @@ SNode* createShowTableTagsStmt(SAstCreateContext* pCxt, SNode* pTbName, SNode* p
|
||||||
return (SNode*)pStmt;
|
return (SNode*)pStmt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t getIpV4RangeFromWhitelistItem(char* ipRange, SIpV4Range* pIpRange) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
char* ipCopy = taosStrdup(ipRange);
|
||||||
|
char* slash = strchr(ipCopy, '/');
|
||||||
|
if (slash) {
|
||||||
|
*slash = '\0';
|
||||||
|
struct in_addr addr;
|
||||||
|
if (uv_inet_pton(AF_INET, ipCopy, &addr) == 0) {
|
||||||
|
int prefix = atoi(slash + 1);
|
||||||
|
if (prefix < 0 || prefix > 32) {
|
||||||
|
code = TSDB_CODE_PAR_INVALID_IP_RANGE;
|
||||||
|
} else {
|
||||||
|
pIpRange->ip = addr.s_addr;
|
||||||
|
uint32_t mask = (1 << (32 - prefix)) - 1;
|
||||||
|
mask = htonl(~mask);
|
||||||
|
pIpRange->mask = mask;
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
code = TSDB_CODE_PAR_INVALID_IP_RANGE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
struct in_addr addr;
|
||||||
|
if (uv_inet_pton(AF_INET, ipCopy, &addr) == 0) {
|
||||||
|
pIpRange->ip = addr.s_addr;
|
||||||
|
pIpRange->mask = 0xFFFFFFFF;
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
|
} else {
|
||||||
|
code = TSDB_CODE_PAR_INVALID_IP_RANGE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosMemoryFreeClear(ipCopy);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t fillIpRangesFromWhiteList(SAstCreateContext* pCxt, SNodeList* pIpRangesNodeList, SIpV4Range* pIpRanges) {
|
||||||
|
int32_t i = 0;
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
SNode* pNode = NULL;
|
||||||
|
FOREACH(pNode, pIpRangesNodeList) {
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pNode)) {
|
||||||
|
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IP_RANGE);
|
||||||
|
return TSDB_CODE_PAR_INVALID_IP_RANGE;
|
||||||
|
}
|
||||||
|
SValueNode* pValNode = (SValueNode*)(pNode);
|
||||||
|
code = getIpV4RangeFromWhitelistItem(pValNode->literal, pIpRanges + i);
|
||||||
|
++i;
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, code, "Invalid IP range %s", pValNode->literal);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SNode* addCreateUserStmtWhiteList(SAstCreateContext* pCxt, SNode* pCreateUserStmt, SNodeList* pIpRangesNodeList) {
|
||||||
|
if (NULL == pCreateUserStmt || NULL == pIpRangesNodeList) {
|
||||||
|
return pCreateUserStmt;
|
||||||
|
}
|
||||||
|
|
||||||
|
((SCreateUserStmt*)pCreateUserStmt)->pNodeListIpRanges = pIpRangesNodeList;
|
||||||
|
SCreateUserStmt* pCreateUser = (SCreateUserStmt*)pCreateUserStmt;
|
||||||
|
pCreateUser->numIpRanges = LIST_LENGTH(pIpRangesNodeList);
|
||||||
|
pCreateUser->pIpRanges = taosMemoryMalloc(pCreateUser->numIpRanges * sizeof(SIpV4Range));
|
||||||
|
if (NULL == pCreateUser->pIpRanges) {
|
||||||
|
pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
nodesDestroyNode(pCreateUserStmt);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = fillIpRangesFromWhiteList(pCxt, pIpRangesNodeList, pCreateUser->pIpRanges);
|
||||||
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
nodesDestroyNode(pCreateUserStmt);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return pCreateUserStmt;
|
||||||
|
}
|
||||||
|
|
||||||
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo) {
|
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo) {
|
||||||
CHECK_PARSER_STATUS(pCxt);
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
char password[TSDB_USET_PASSWORD_LEN + 3] = {0};
|
char password[TSDB_USET_PASSWORD_LEN + 3] = {0};
|
||||||
|
@ -1667,7 +1747,7 @@ SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const ST
|
||||||
return (SNode*)pStmt;
|
return (SNode*)pStmt;
|
||||||
}
|
}
|
||||||
|
|
||||||
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal) {
|
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, void* pAlterInfo) {
|
||||||
CHECK_PARSER_STATUS(pCxt);
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
if (!checkUserName(pCxt, pUserName)) {
|
if (!checkUserName(pCxt, pUserName)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1679,6 +1759,7 @@ SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t al
|
||||||
switch (alterType) {
|
switch (alterType) {
|
||||||
case TSDB_ALTER_USER_PASSWD: {
|
case TSDB_ALTER_USER_PASSWD: {
|
||||||
char password[TSDB_USET_PASSWORD_LEN] = {0};
|
char password[TSDB_USET_PASSWORD_LEN] = {0};
|
||||||
|
SToken* pVal = pAlterInfo;
|
||||||
if (!checkPassword(pCxt, pVal, password)) {
|
if (!checkPassword(pCxt, pVal, password)) {
|
||||||
nodesDestroyNode((SNode*)pStmt);
|
nodesDestroyNode((SNode*)pStmt);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1686,12 +1767,35 @@ SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t al
|
||||||
strcpy(pStmt->password, password);
|
strcpy(pStmt->password, password);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_USER_ENABLE:
|
case TSDB_ALTER_USER_ENABLE: {
|
||||||
|
SToken* pVal = pAlterInfo;
|
||||||
pStmt->enable = taosStr2Int8(pVal->z, NULL, 10);
|
pStmt->enable = taosStr2Int8(pVal->z, NULL, 10);
|
||||||
break;
|
break;
|
||||||
case TSDB_ALTER_USER_SYSINFO:
|
}
|
||||||
|
case TSDB_ALTER_USER_SYSINFO: {
|
||||||
|
SToken* pVal = pAlterInfo;
|
||||||
pStmt->sysinfo = taosStr2Int8(pVal->z, NULL, 10);
|
pStmt->sysinfo = taosStr2Int8(pVal->z, NULL, 10);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
case TSDB_ALTER_USER_ADD_WHITE_LIST:
|
||||||
|
case TSDB_ALTER_USER_DROP_WHITE_LIST: {
|
||||||
|
SNodeList* pIpRangesNodeList = pAlterInfo;
|
||||||
|
pStmt->pNodeListIpRanges = pIpRangesNodeList;
|
||||||
|
pStmt->numIpRanges = LIST_LENGTH(pIpRangesNodeList);
|
||||||
|
pStmt->pIpRanges = taosMemoryMalloc(pStmt->numIpRanges * sizeof(SIpV4Range));
|
||||||
|
if (NULL == pStmt->pIpRanges) {
|
||||||
|
pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
nodesDestroyNode((SNode*)pStmt);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t code = fillIpRangesFromWhiteList(pCxt, pIpRangesNodeList, pStmt->pIpRanges);
|
||||||
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
nodesDestroyNode((SNode*)pStmt);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,6 +109,7 @@ static SKeyword keywordTable[] = {
|
||||||
{"GRANTS", TK_GRANTS},
|
{"GRANTS", TK_GRANTS},
|
||||||
{"GROUP", TK_GROUP},
|
{"GROUP", TK_GROUP},
|
||||||
{"HAVING", TK_HAVING},
|
{"HAVING", TK_HAVING},
|
||||||
|
{"HOST", TK_HOST},
|
||||||
{"IF", TK_IF},
|
{"IF", TK_IF},
|
||||||
{"IGNORE", TK_IGNORE},
|
{"IGNORE", TK_IGNORE},
|
||||||
{"IMPORT", TK_IMPORT},
|
{"IMPORT", TK_IMPORT},
|
||||||
|
|
|
@ -3734,7 +3734,7 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
static int32_t removeConstantValueFromList(SNodeList** pList) {
|
static int32_t removeConstantValueFromList(SNodeList** pList) {
|
||||||
SNode* pNode = NULL;
|
SNode* pNode = NULL;
|
||||||
WHERE_EACH(pNode, *pList) {
|
WHERE_EACH(pNode, *pList) {
|
||||||
if (nodeType(pNode) == QUERY_NODE_VALUE ||
|
if (nodeType(pNode) == QUERY_NODE_VALUE ||
|
||||||
(nodeType(pNode) == QUERY_NODE_FUNCTION && fmIsConstantResFunc((SFunctionNode*)pNode) && fmIsScalarFunc(((SFunctionNode*)pNode)->funcId))) {
|
(nodeType(pNode) == QUERY_NODE_FUNCTION && fmIsConstantResFunc((SFunctionNode*)pNode) && fmIsScalarFunc(((SFunctionNode*)pNode)->funcId))) {
|
||||||
ERASE_NODE(*pList);
|
ERASE_NODE(*pList);
|
||||||
continue;
|
continue;
|
||||||
|
@ -3753,7 +3753,11 @@ static int32_t removeConstantValueFromList(SNodeList** pList) {
|
||||||
static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
|
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
if (pSelect->pPartitionByList) {
|
||||||
|
code = removeConstantValueFromList(&pSelect->pPartitionByList);
|
||||||
|
}
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code && pSelect->pPartitionByList) {
|
if (TSDB_CODE_SUCCESS == code && pSelect->pPartitionByList) {
|
||||||
int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable);
|
int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable);
|
||||||
SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0);
|
SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0);
|
||||||
|
@ -5740,8 +5744,15 @@ static int32_t translateCreateUser(STranslateContext* pCxt, SCreateUserStmt* pSt
|
||||||
createReq.sysInfo = pStmt->sysinfo;
|
createReq.sysInfo = pStmt->sysinfo;
|
||||||
createReq.enable = 1;
|
createReq.enable = 1;
|
||||||
strcpy(createReq.pass, pStmt->password);
|
strcpy(createReq.pass, pStmt->password);
|
||||||
|
|
||||||
return buildCmdMsg(pCxt, TDMT_MND_CREATE_USER, (FSerializeFunc)tSerializeSCreateUserReq, &createReq);
|
createReq.numIpRanges = pStmt->numIpRanges;
|
||||||
|
if (pStmt->numIpRanges > 0) {
|
||||||
|
createReq.pIpRanges = taosMemoryMalloc(createReq.numIpRanges * sizeof(SIpV4Range));
|
||||||
|
memcpy(createReq.pIpRanges, pStmt->pIpRanges, sizeof(SIpV4Range) * createReq.numIpRanges);
|
||||||
|
}
|
||||||
|
int32_t code = buildCmdMsg(pCxt, TDMT_MND_CREATE_USER, (FSerializeFunc)tSerializeSCreateUserReq, &createReq);
|
||||||
|
tFreeSCreateUserReq(&createReq);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateAlterUser(STranslateContext* pCxt, SAlterUserStmt* pStmt) {
|
static int32_t translateAlterUser(STranslateContext* pCxt, SAlterUserStmt* pStmt) {
|
||||||
|
@ -5756,7 +5767,14 @@ static int32_t translateAlterUser(STranslateContext* pCxt, SAlterUserStmt* pStmt
|
||||||
snprintf(alterReq.objname, sizeof(alterReq.objname), "%s", pCxt->pParseCxt->db);
|
snprintf(alterReq.objname, sizeof(alterReq.objname), "%s", pCxt->pParseCxt->db);
|
||||||
}
|
}
|
||||||
|
|
||||||
return buildCmdMsg(pCxt, TDMT_MND_ALTER_USER, (FSerializeFunc)tSerializeSAlterUserReq, &alterReq);
|
alterReq.numIpRanges = pStmt->numIpRanges;
|
||||||
|
if (pStmt->numIpRanges > 0) {
|
||||||
|
alterReq.pIpRanges = taosMemoryMalloc(alterReq.numIpRanges * sizeof(SIpV4Range));
|
||||||
|
memcpy(alterReq.pIpRanges, pStmt->pIpRanges, sizeof(SIpV4Range) * alterReq.numIpRanges);
|
||||||
|
}
|
||||||
|
int32_t code = buildCmdMsg(pCxt, TDMT_MND_ALTER_USER, (FSerializeFunc)tSerializeSAlterUserReq, &alterReq);
|
||||||
|
tFreeSAlterUserReq(&alterReq);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt) {
|
static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt) {
|
||||||
|
|
|
@ -184,6 +184,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
||||||
return "No valid function in window query";
|
return "No valid function in window query";
|
||||||
case TSDB_CODE_PAR_INVALID_OPTR_USAGE:
|
case TSDB_CODE_PAR_INVALID_OPTR_USAGE:
|
||||||
return "Invalid usage of expr: %s";
|
return "Invalid usage of expr: %s";
|
||||||
|
case TSDB_CODE_PAR_INVALID_IP_RANGE:
|
||||||
|
return "invalid ip range";
|
||||||
case TSDB_CODE_OUT_OF_MEMORY:
|
case TSDB_CODE_OUT_OF_MEMORY:
|
||||||
return "Out of memory";
|
return "Out of memory";
|
||||||
default:
|
default:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -373,6 +373,11 @@ static bool tagScanNodeHasTbname(SNode* pKeys) {
|
||||||
static int32_t tagScanSetExecutionMode(SScanLogicNode* pScan) {
|
static int32_t tagScanSetExecutionMode(SScanLogicNode* pScan) {
|
||||||
pScan->onlyMetaCtbIdx = false;
|
pScan->onlyMetaCtbIdx = false;
|
||||||
|
|
||||||
|
if (pScan->tableType == TSDB_CHILD_TABLE) {
|
||||||
|
pScan->onlyMetaCtbIdx = false;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
if (tagScanNodeListHasTbname(pScan->pScanPseudoCols)) {
|
if (tagScanNodeListHasTbname(pScan->pScanPseudoCols)) {
|
||||||
pScan->onlyMetaCtbIdx = false;
|
pScan->onlyMetaCtbIdx = false;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -442,7 +447,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
||||||
pScan->pScanPseudoCols = pNewScanPseudoCols;
|
pScan->pScanPseudoCols = pNewScanPseudoCols;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL != pScan->pScanCols) {
|
if (NULL != pScan->pScanCols) {
|
||||||
|
@ -511,7 +516,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
||||||
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||||
pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
|
pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
|
||||||
pJoin->isLowLevelJoin = pJoinTable->isLowLevelJoin;
|
pJoin->isLowLevelJoin = pJoinTable->isLowLevelJoin;
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
// set left and right node
|
// set left and right node
|
||||||
|
@ -559,7 +564,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
||||||
code = createColumnByRewriteExprs(pColList, &pJoin->node.pTargets);
|
code = createColumnByRewriteExprs(pColList, &pJoin->node.pTargets);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
SNodeList* pColList = NULL;
|
SNodeList* pColList = NULL;
|
||||||
if (QUERY_NODE_REAL_TABLE == nodeType(pJoinTable->pRight) && !pJoin->isLowLevelJoin) {
|
if (QUERY_NODE_REAL_TABLE == nodeType(pJoinTable->pRight) && !pJoin->isLowLevelJoin) {
|
||||||
|
|
|
@ -539,7 +539,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
|
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
|
||||||
|
|
||||||
pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx;
|
pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx;
|
||||||
|
|
||||||
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode);
|
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode);
|
||||||
|
@ -726,7 +726,7 @@ static int32_t mergeEqCond(SNode** ppDst, SNode** ppSrc) {
|
||||||
*ppSrc = NULL;
|
*ppSrc = NULL;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
|
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
|
||||||
if (NULL == pLogicCond) {
|
if (NULL == pLogicCond) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -754,7 +754,7 @@ static int32_t getJoinDataBlockDescNode(SNodeList* pChildren, int32_t idx, SData
|
||||||
planError("Invalid join children num:%d or child type:%d", pChildren->length, nodeType(nodesListGetNode(pChildren, 0)));
|
planError("Invalid join children num:%d or child type:%d", pChildren->length, nodeType(nodesListGetNode(pChildren, 0)));
|
||||||
return TSDB_CODE_PLAN_INTERNAL_ERROR;
|
return TSDB_CODE_PLAN_INTERNAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -775,12 +775,12 @@ static int32_t createMergeJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = getJoinDataBlockDescNode(pChildren, 1, &pRightDesc);
|
code = getJoinDataBlockDescNode(pChildren, 1, &pRightDesc);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pPrimKeyEqCond,
|
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pPrimKeyEqCond,
|
||||||
&pJoin->pPrimKeyCond);
|
&pJoin->pPrimKeyCond);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets,
|
code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets,
|
||||||
&pJoin->pTargets);
|
&pJoin->pTargets);
|
||||||
|
@ -869,7 +869,7 @@ static int32_t createHashJoinColList(int16_t lBlkId, int16_t rBlkId, SNode* pEq1
|
||||||
if (NULL == pJoin->pOnLeft || NULL == pJoin->pOnRight) {
|
if (NULL == pJoin->pOnLeft || NULL == pJoin->pOnRight) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = extractHashJoinOnCols(lBlkId, rBlkId, pEq1, pJoin);
|
code = extractHashJoinOnCols(lBlkId, rBlkId, pEq1, pJoin);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = extractHashJoinOnCols(lBlkId, rBlkId, pEq2, pJoin);
|
code = extractHashJoinOnCols(lBlkId, rBlkId, pEq2, pJoin);
|
||||||
|
@ -893,10 +893,10 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
SNodeList* pNew = nodesMakeList();
|
SNodeList* pNew = nodesMakeList();
|
||||||
|
|
||||||
FOREACH(pNode, pJoin->pTargets) {
|
FOREACH(pNode, pJoin->pTargets) {
|
||||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||||
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
|
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
|
||||||
tSimpleHashPut(pHash, name, len, &pCol, POINTER_BYTES);
|
tSimpleHashPut(pHash, name, len, &pCol, POINTER_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -905,7 +905,7 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys
|
||||||
|
|
||||||
FOREACH(pNode, pJoin->pOnLeft) {
|
FOREACH(pNode, pJoin->pOnLeft) {
|
||||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||||
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
|
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
|
||||||
SNode** p = tSimpleHashGet(pHash, name, len);
|
SNode** p = tSimpleHashGet(pHash, name, len);
|
||||||
if (p) {
|
if (p) {
|
||||||
nodesListStrictAppend(pJoin->pTargets, *p);
|
nodesListStrictAppend(pJoin->pTargets, *p);
|
||||||
|
@ -914,7 +914,7 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys
|
||||||
}
|
}
|
||||||
FOREACH(pNode, pJoin->pOnRight) {
|
FOREACH(pNode, pJoin->pOnRight) {
|
||||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||||
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
|
int32_t len = getSlotKey(pNode, NULL, name, TSDB_COL_FNAME_LEN);
|
||||||
SNode** p = tSimpleHashGet(pHash, name, len);
|
SNode** p = tSimpleHashGet(pHash, name, len);
|
||||||
if (p) {
|
if (p) {
|
||||||
nodesListStrictAppend(pJoin->pTargets, *p);
|
nodesListStrictAppend(pJoin->pTargets, *p);
|
||||||
|
@ -930,7 +930,7 @@ static int32_t sortHashJoinTargets(int16_t lBlkId, int16_t rBlkId, SHashJoinPhys
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
nodesListStrictAppend(pJoin->pTargets, *p);
|
nodesListStrictAppend(pJoin->pTargets, *p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -958,10 +958,10 @@ static int32_t createHashJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil
|
||||||
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pPrimKeyEqCond, &pJoin->pPrimKeyCond);
|
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pPrimKeyEqCond, &pJoin->pPrimKeyCond);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pColEqCond, &pJoin->pColEqCond);
|
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pColEqCond, &pJoin->pColEqCond);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pTagEqCond, &pJoin->pTagEqCond);
|
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pTagEqCond, &pJoin->pTagEqCond);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOtherOnCond) {
|
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOtherOnCond) {
|
||||||
code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOtherOnCond, &pJoin->pFilterConditions);
|
code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOtherOnCond, &pJoin->pFilterConditions);
|
||||||
}
|
}
|
||||||
|
@ -973,10 +973,10 @@ static int32_t createHashJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = createHashJoinColList(pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoin->pPrimKeyCond, pJoin->pColEqCond, pJoin->pTagEqCond, pJoin);
|
code = createHashJoinColList(pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoin->pPrimKeyCond, pJoin->pColEqCond, pJoin->pTagEqCond, pJoin);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = sortHashJoinTargets(pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoin);
|
code = sortHashJoinTargets(pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoin);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc);
|
code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc);
|
||||||
}
|
}
|
||||||
|
@ -1001,7 +1001,7 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
|
||||||
planError("Invalid join algorithm:%d", pJoinLogicNode->joinAlgo);
|
planError("Invalid join algorithm:%d", pJoinLogicNode->joinAlgo);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1019,7 +1019,7 @@ static int32_t createGroupCachePhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh
|
||||||
pGrpCache->batchFetch = pLogicNode->batchFetch;
|
pGrpCache->batchFetch = pLogicNode->batchFetch;
|
||||||
SDataBlockDescNode* pChildDesc = ((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc;
|
SDataBlockDescNode* pChildDesc = ((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc;
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
/*
|
/*
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = setListSlotId(pCxt, pChildDesc->dataBlockId, -1, pLogicNode->pGroupCols, &pGrpCache->pGroupCols);
|
code = setListSlotId(pCxt, pChildDesc->dataBlockId, -1, pLogicNode->pGroupCols, &pGrpCache->pGroupCols);
|
||||||
}
|
}
|
||||||
|
@ -1045,7 +1045,7 @@ static int32_t updateDynQueryCtrlStbJoinInfo(SPhysiPlanContext* pCxt, SNodeList*
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
memcpy(pDynCtrl->stbJoin.srcScan, pLogicNode->stbJoin.srcScan, sizeof(pDynCtrl->stbJoin.srcScan));
|
memcpy(pDynCtrl->stbJoin.srcScan, pLogicNode->stbJoin.srcScan, sizeof(pDynCtrl->stbJoin.srcScan));
|
||||||
|
|
||||||
SNode* pNode = NULL;
|
SNode* pNode = NULL;
|
||||||
int32_t i = 0;
|
int32_t i = 0;
|
||||||
FOREACH(pNode, pVgList) {
|
FOREACH(pNode, pVgList) {
|
||||||
|
@ -1062,7 +1062,7 @@ static int32_t updateDynQueryCtrlStbJoinInfo(SPhysiPlanContext* pCxt, SNodeList*
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t createDynQueryCtrlPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SDynQueryCtrlLogicNode* pLogicNode,
|
static int32_t createDynQueryCtrlPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SDynQueryCtrlLogicNode* pLogicNode,
|
||||||
SPhysiNode** pPhyNode) {
|
SPhysiNode** pPhyNode) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -391,8 +391,8 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
SStreamTask* pTask = param;
|
||||||
|
|
||||||
if (streamTaskShouldStop(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||||
qDebug("s-task:%s should stop, abort from timer", pTask->id.idStr);
|
qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,17 +409,22 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||||
qDebug("s-task:%s should stop, abort from timer", pTask->id.idStr);
|
qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
||||||
|
qDebug("s-task:%s send success, jump out of timer, ref:%d", pTask->id.idStr, ref);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration) {
|
void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration) {
|
||||||
qError("s-task:%s dispatch data in %" PRId64 "ms", pTask->id.idStr, waitDuration);
|
qWarn("s-task:%s dispatch data in %" PRId64 "ms, in timer", pTask->id.idStr, waitDuration);
|
||||||
taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->launchTaskTimer);
|
if (pTask->launchTaskTimer != NULL) {
|
||||||
|
taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->launchTaskTimer);
|
||||||
|
} else {
|
||||||
|
pTask->launchTaskTimer = taosTmrStart(doRetryDispatchData, waitDuration, pTask, streamEnv.timer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
|
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
|
||||||
|
@ -540,8 +545,10 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry
|
if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry
|
||||||
qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms",
|
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
||||||
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS);
|
|
||||||
|
qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d",
|
||||||
|
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -982,8 +989,6 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
||||||
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
|
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
|
||||||
}
|
}
|
||||||
|
|
||||||
streamFreeQitem(pTask->msgInfo.pData);
|
|
||||||
pTask->msgInfo.pData = NULL;
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -997,8 +1002,9 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
||||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
||||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
||||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data",
|
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
||||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS);
|
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data, ref:%d",
|
||||||
|
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
} else { // pipeline send data in output queue
|
} else { // pipeline send data in output queue
|
||||||
// this message has been sent successfully, let's try next one.
|
// this message has been sent successfully, let's try next one.
|
||||||
|
|
|
@ -521,6 +521,13 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
ASSERT(pTask->status.timerActive == 0);
|
ASSERT(pTask->status.timerActive == 0);
|
||||||
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
||||||
|
|
||||||
|
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
||||||
|
qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
||||||
|
taosTmrStop(pTask->schedInfo.pTimer);
|
||||||
|
pTask->info.triggerParam = 0;
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
}
|
||||||
|
|
||||||
streamMetaRemoveTask(pMeta, keys);
|
streamMetaRemoveTask(pMeta, keys);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
} else {
|
} else {
|
||||||
|
@ -659,6 +666,8 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId};
|
int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId};
|
||||||
void* p = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
void* p = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
|
// pTask->chkInfo.checkpointVer may be 0, when a follower is become a leader
|
||||||
|
// In this case, we try not to start fill-history task anymore.
|
||||||
if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.checkpointVer) < 0) {
|
if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.checkpointVer) < 0) {
|
||||||
doClear(pKey, pVal, pCur, pRecycleList);
|
doClear(pKey, pVal, pCur, pRecycleList);
|
||||||
tFreeStreamTask(pTask);
|
tFreeStreamTask(pTask);
|
||||||
|
@ -757,7 +766,6 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
SStreamHbMsg hbMsg = {0};
|
SStreamHbMsg hbMsg = {0};
|
||||||
SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid);
|
SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid);
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
// taosMemoryFree(param);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -779,6 +787,7 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||||
|
|
||||||
SEpSet epset = {0};
|
SEpSet epset = {0};
|
||||||
|
bool hasValEpset = false;
|
||||||
|
|
||||||
hbMsg.vgId = pMeta->vgId;
|
hbMsg.vgId = pMeta->vgId;
|
||||||
hbMsg.pTaskStatus = taosArrayInit(numOfTasks, sizeof(STaskStatusEntry));
|
hbMsg.pTaskStatus = taosArrayInit(numOfTasks, sizeof(STaskStatusEntry));
|
||||||
|
@ -797,51 +806,53 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
epsetAssign(&epset, &(*pTask)->info.mnodeEpset);
|
epsetAssign(&epset, &(*pTask)->info.mnodeEpset);
|
||||||
|
hasValEpset = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hbMsg.numOfTasks = taosArrayGetSize(hbMsg.pTaskStatus);
|
hbMsg.numOfTasks = taosArrayGetSize(hbMsg.pTaskStatus);
|
||||||
taosRUnLockLatch(&pMeta->lock);
|
taosRUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
int32_t code = 0;
|
if (hasValEpset) {
|
||||||
int32_t tlen = 0;
|
int32_t code = 0;
|
||||||
|
int32_t tlen = 0;
|
||||||
|
|
||||||
tEncodeSize(tEncodeStreamHbMsg, &hbMsg, tlen, code);
|
tEncodeSize(tEncodeStreamHbMsg, &hbMsg, tlen, code);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* buf = rpcMallocCont(tlen);
|
void* buf = rpcMallocCont(tlen);
|
||||||
if (buf == NULL) {
|
if (buf == NULL) {
|
||||||
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SEncoder encoder;
|
SEncoder encoder;
|
||||||
tEncoderInit(&encoder, buf, tlen);
|
tEncoderInit(&encoder, buf, tlen);
|
||||||
if ((code = tEncodeStreamHbMsg(&encoder, &hbMsg)) < 0) {
|
if ((code = tEncodeStreamHbMsg(&encoder, &hbMsg)) < 0) {
|
||||||
rpcFreeCont(buf);
|
rpcFreeCont(buf);
|
||||||
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
|
SRpcMsg msg = {0};
|
||||||
|
initRpcMsg(&msg, TDMT_MND_STREAM_HEARTBEAT, buf, tlen);
|
||||||
|
msg.info.noResp = 1;
|
||||||
|
|
||||||
|
qDebug("vgId:%d, build and send hb to mnode", pMeta->vgId);
|
||||||
|
tmsgSendReq(&epset, &msg);
|
||||||
}
|
}
|
||||||
tEncoderClear(&encoder);
|
|
||||||
|
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
|
|
||||||
SRpcMsg msg = {0};
|
|
||||||
initRpcMsg(&msg, TDMT_MND_STREAM_HEARTBEAT, buf, tlen);
|
|
||||||
msg.info.noResp = 1;
|
|
||||||
|
|
||||||
qDebug("vgId:%d, build and send hb to mnode", pMeta->vgId);
|
|
||||||
|
|
||||||
tmsgSendReq(&epset, &msg);
|
|
||||||
taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamEnv.timer, &pMeta->hbInfo.hbTmr);
|
taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamEnv.timer, &pMeta->hbInfo.hbTmr);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
}
|
}
|
||||||
|
@ -905,4 +916,4 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
int64_t el = taosGetTimestampMs() - st;
|
int64_t el = taosGetTimestampMs() - st;
|
||||||
qDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el);
|
qDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el);
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,7 +235,13 @@ static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) {
|
||||||
qDebug("s-task:%s enter into scan-history data stage, status:%s", id, str);
|
qDebug("s-task:%s enter into scan-history data stage, status:%s", id, str);
|
||||||
streamTaskLaunchScanHistory(pTask);
|
streamTaskLaunchScanHistory(pTask);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str);
|
if (pTask->info.fillHistory == 1) {
|
||||||
|
qDebug("s-task:%s fill-history is set normal when start it, try to remove it,set it task to be dropping", id);
|
||||||
|
pTask->status.taskStatus = TASK_STATUS__DROPPING;
|
||||||
|
ASSERT(pTask->historyTaskId.taskId == 0);
|
||||||
|
} else {
|
||||||
|
qDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// when current stream task is ready, check the related fill history task.
|
// when current stream task is ready, check the related fill history task.
|
||||||
|
@ -579,19 +585,17 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
// todo fix the bug: 2. race condition
|
// todo fix the bug: 2. race condition
|
||||||
// an fill history task needs to be started.
|
// an fill history task needs to be started.
|
||||||
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
||||||
int32_t tId = pTask->historyTaskId.taskId;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
if (tId == 0) {
|
int32_t hTaskId = pTask->historyTaskId.taskId;
|
||||||
|
if (hTaskId == 0) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pTask->status.downstreamReady == 1);
|
ASSERT(pTask->status.downstreamReady == 1);
|
||||||
qDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr,
|
qDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr,
|
||||||
pTask->historyTaskId.streamId, tId);
|
pTask->historyTaskId.streamId, hTaskId);
|
||||||
|
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
int64_t keys[2] = {pTask->historyTaskId.streamId, hTaskId};
|
||||||
int32_t hTaskId = pTask->historyTaskId.taskId;
|
|
||||||
|
|
||||||
int64_t keys[2] = {pTask->historyTaskId.streamId, pTask->historyTaskId.taskId};
|
|
||||||
|
|
||||||
// Set the execute conditions, including the query time window and the version range
|
// Set the execute conditions, including the query time window and the version range
|
||||||
SStreamTask** pHTask = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
SStreamTask** pHTask = taosHashGet(pMeta->pTasks, keys, sizeof(keys));
|
||||||
|
@ -610,11 +614,12 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
||||||
// todo failed to create timer
|
// todo failed to create timer
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
} else {
|
} else {
|
||||||
atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active
|
int32_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active
|
||||||
|
ASSERT(ref == 1);
|
||||||
qDebug("s-task:%s set timer active flag", pTask->id.idStr);
|
qDebug("s-task:%s set timer active flag", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
} else { // timer exists
|
} else { // timer exists
|
||||||
ASSERT(pTask->status.timerActive > 0);
|
ASSERT(pTask->status.timerActive == 1);
|
||||||
qDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr);
|
qDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr);
|
||||||
taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer);
|
taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer);
|
||||||
}
|
}
|
||||||
|
@ -918,6 +923,13 @@ void streamTaskHalt(SStreamTask* pTask) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wait for checkpoint completed
|
||||||
|
while(pTask->status.taskStatus == TASK_STATUS__CK) {
|
||||||
|
qDebug("s-task:%s status:%s during generating checkpoint, wait for 1sec and retry set status:halt", pTask->id.idStr,
|
||||||
|
streamGetTaskStatusStr(TASK_STATUS__CK));
|
||||||
|
taosMsleep(1000);
|
||||||
|
}
|
||||||
|
|
||||||
// upgrade to halt status
|
// upgrade to halt status
|
||||||
if (status == TASK_STATUS__PAUSE) {
|
if (status == TASK_STATUS__PAUSE) {
|
||||||
qDebug("s-task:%s upgrade status to %s from %s", pTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__HALT),
|
qDebug("s-task:%s upgrade status to %s from %s", pTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__HALT),
|
||||||
|
|
|
@ -2156,12 +2156,10 @@ static void cliSchedMsgToNextNode(SCliMsg* pMsg, SCliThrd* pThrd) {
|
||||||
|
|
||||||
if (rpcDebugFlag & DEBUG_DEBUG) {
|
if (rpcDebugFlag & DEBUG_DEBUG) {
|
||||||
STraceId* trace = &pMsg->msg.info.traceId;
|
STraceId* trace = &pMsg->msg.info.traceId;
|
||||||
char* tbuf = taosMemoryCalloc(1, TSDB_FQDN_LEN * 5);
|
char tbuf[512] = {0};
|
||||||
|
|
||||||
EPSET_TO_STR(&pCtx->epSet, tbuf);
|
EPSET_TO_STR(&pCtx->epSet, tbuf);
|
||||||
tGDebug("%s retry on next node,use:%s, step: %d,timeout:%" PRId64 "", transLabel(pThrd->pTransInst), tbuf,
|
tGDebug("%s retry on next node,use:%s, step: %d,timeout:%" PRId64 "", transLabel(pThrd->pTransInst), tbuf,
|
||||||
pCtx->retryStep, pCtx->retryNextInterval);
|
pCtx->retryStep, pCtx->retryNextInterval);
|
||||||
taosMemoryFree(tbuf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg));
|
STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg));
|
||||||
|
@ -2387,7 +2385,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
|
||||||
bool hasEpSet = cliTryExtractEpSet(pResp, &pCtx->epSet);
|
bool hasEpSet = cliTryExtractEpSet(pResp, &pCtx->epSet);
|
||||||
if (hasEpSet) {
|
if (hasEpSet) {
|
||||||
if (rpcDebugFlag & DEBUG_TRACE) {
|
if (rpcDebugFlag & DEBUG_TRACE) {
|
||||||
char tbuf[256] = {0};
|
char tbuf[512] = {0};
|
||||||
EPSET_TO_STR(&pCtx->epSet, tbuf);
|
EPSET_TO_STR(&pCtx->epSet, tbuf);
|
||||||
tGTrace("%s conn %p extract epset from msg", CONN_GET_INST_LABEL(pConn), pConn);
|
tGTrace("%s conn %p extract epset from msg", CONN_GET_INST_LABEL(pConn), pConn);
|
||||||
}
|
}
|
||||||
|
|
|
@ -556,7 +556,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY, "Window query not su
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DROP_COL, "No columns can be dropped")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DROP_COL, "No columns can be dropped")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COL_JSON, "Only tag can be json type")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COL_JSON, "Only tag can be json type")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_VALUE_TOO_LONG, "Value too long for column/tag")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_VALUE_TOO_LONG, "Value too long for column/tag")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VARBINARY, "Invalid varbinary value")
|
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DELETE_WHERE, "The DELETE statement must have a definite time window range")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DELETE_WHERE, "The DELETE statement must have a definite time window range")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG, "The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG, "The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC, "Fill not allowed")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC, "Fill not allowed")
|
||||||
|
@ -573,6 +572,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_VARBINARY, "Invalidate varbinary value")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_IP_RANGE, "Invalid IPV4 address ranges")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
|
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
|
||||||
|
|
||||||
//planner
|
//planner
|
||||||
|
|
|
@ -6,6 +6,21 @@
|
||||||
,,y,unit-test,bash test.sh
|
,,y,unit-test,bash test.sh
|
||||||
|
|
||||||
#system test
|
#system test
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_session.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_state_window.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_interval.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_state_window.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_session.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval_ext.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval_ext.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session_ext.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/partition_interval.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py
|
||||||
|
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 3
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 3
|
||||||
|
@ -24,7 +39,7 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 2
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 2
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 2
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 3
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 3
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 4
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 4
|
||||||
|
@ -171,6 +186,8 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py
|
||||||
,,n,system-test,python3 ./test.py -f 0-others/compatibility.py
|
,,n,system-test,python3 ./test.py -f 0-others/compatibility.py
|
||||||
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
|
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
|
||||||
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
|
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
|
||||||
|
@ -467,7 +484,7 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3
|
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3
|
||||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3
|
#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3
|
||||||
,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3
|
,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3
|
||||||
,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1
|
#,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6
|
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3
|
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3
|
||||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
|
#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
|
||||||
|
@ -809,6 +826,7 @@
|
||||||
|
|
||||||
,,y,script,./test.sh -f tsim/user/basic.sim
|
,,y,script,./test.sh -f tsim/user/basic.sim
|
||||||
,,y,script,./test.sh -f tsim/user/password.sim
|
,,y,script,./test.sh -f tsim/user/password.sim
|
||||||
|
,,y,script,./test.sh -f tsim/user/whitelist.sim
|
||||||
,,y,script,./test.sh -f tsim/user/privilege_db.sim
|
,,y,script,./test.sh -f tsim/user/privilege_db.sim
|
||||||
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
|
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
|
||||||
,,y,script,./test.sh -f tsim/user/privilege_topic.sim
|
,,y,script,./test.sh -f tsim/user/privilege_topic.sim
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -111,7 +111,7 @@ class TDSql:
|
||||||
return self.error_info
|
return self.error_info
|
||||||
|
|
||||||
|
|
||||||
def query(self, sql, row_tag=None,queryTimes=10):
|
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
||||||
self.sql = sql
|
self.sql = sql
|
||||||
i=1
|
i=1
|
||||||
while i <= queryTimes:
|
while i <= queryTimes:
|
||||||
|
@ -120,6 +120,17 @@ class TDSql:
|
||||||
self.queryResult = self.cursor.fetchall()
|
self.queryResult = self.cursor.fetchall()
|
||||||
self.queryRows = len(self.queryResult)
|
self.queryRows = len(self.queryResult)
|
||||||
self.queryCols = len(self.cursor.description)
|
self.queryCols = len(self.cursor.description)
|
||||||
|
|
||||||
|
if count_expected_res is not None:
|
||||||
|
counter = 0
|
||||||
|
while count_expected_res != self.queryResult[0][0]:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.queryResult = self.cursor.fetchall()
|
||||||
|
if counter < queryTimes:
|
||||||
|
counter += 0.5
|
||||||
|
time.sleep(0.5)
|
||||||
|
else:
|
||||||
|
return False
|
||||||
if row_tag:
|
if row_tag:
|
||||||
return self.queryResult
|
return self.queryResult
|
||||||
return self.queryRows
|
return self.queryRows
|
||||||
|
@ -501,7 +512,8 @@ class TDSql:
|
||||||
|
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
# tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||||
|
raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||||
|
|
||||||
def checkNotEqual(self, elm, expect_elm):
|
def checkNotEqual(self, elm, expect_elm):
|
||||||
if elm != expect_elm:
|
if elm != expect_elm:
|
||||||
|
@ -509,7 +521,8 @@ class TDSql:
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
def get_times(self, time_str, precision="ms"):
|
def get_times(self, time_str, precision="ms"):
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
|
|
@ -90,7 +90,7 @@ endi
|
||||||
|
|
||||||
sql select tags tbname,t,b from stt1 order by t
|
sql select tags tbname,t,b from stt1 order by t
|
||||||
print $rows
|
print $rows
|
||||||
print $data00 $data01 $data02 $data10 $data11 $data12 $data20 $data21 $data22 $data30 $data31 $data32
|
print $data00 $data01 $data02 $data10 $data11 $data12 $data20 $data21 $data22 $data30 $data31 $data32
|
||||||
if $rows != 4 then
|
if $rows != 4 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
@ -103,7 +103,7 @@ endi
|
||||||
|
|
||||||
sql select tags t,b from stt1 where t=1
|
sql select tags t,b from stt1 where t=1
|
||||||
print $rows
|
print $rows
|
||||||
print $data00 $data01
|
print $data00 $data01
|
||||||
if $rows != 1 then
|
if $rows != 1 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
@ -116,7 +116,20 @@ endi
|
||||||
|
|
||||||
sql select tags t,b from stt1 where tbname='ctt11'
|
sql select tags t,b from stt1 where tbname='ctt11'
|
||||||
print $rows
|
print $rows
|
||||||
print $data00 $data01
|
print $data00 $data01
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != @1@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data01 != @1aa@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select tags t,b from ctt11
|
||||||
|
print $rows
|
||||||
|
print $data00 $data01
|
||||||
if $rows != 1 then
|
if $rows != 1 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
print ============= step1
|
||||||
|
sql create user u_read pass 'taosdata1' host '127.0.0.1/24','192.168.1.0/24'
|
||||||
|
sql create user u_write pass 'taosdata1' host '127.0.0.1','192.168.1.0'
|
||||||
|
|
||||||
|
sql alter user u_read add host '3.3.3.4/24'
|
||||||
|
sql alter user u_write drop host '4.4.4.5/25'
|
||||||
|
|
||||||
|
sql show users
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ============= step2
|
||||||
|
sql_error create user read1 pass 'taosdata1' host '127.0.0/24'
|
||||||
|
sql_error create user write1 pass 'taosdata1' host '4.4.4.4/33'
|
||||||
|
|
||||||
|
sql show users
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,37 @@
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 1, "ttlChangeOnWrite": 0}
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
self.ttl = 5
|
||||||
|
self.dbname = "test"
|
||||||
|
|
||||||
|
def check_ttl_result(self):
|
||||||
|
tdSql.execute(f'create database {self.dbname}')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}')
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
time.sleep(self.ttl + 2)
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.check_ttl_result()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,59 @@
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 3, "ttlChangeOnWrite": 1, "trimVDbIntervalSec": 360,
|
||||||
|
"ttlFlushThreshold": 100, "ttlBatchDropNum": 10}
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
self.ttl = 5
|
||||||
|
self.tables = 100
|
||||||
|
self.dbname = "test"
|
||||||
|
|
||||||
|
def check_batch_drop_num(self):
|
||||||
|
tdSql.execute(f'create database {self.dbname} vgroups 1')
|
||||||
|
tdSql.execute(f'use {self.dbname}')
|
||||||
|
tdSql.execute(f'create table stb(ts timestamp, c1 int) tags(t1 int)')
|
||||||
|
for i in range(self.tables):
|
||||||
|
tdSql.execute(f'create table t{i} using stb tags({i}) ttl {self.ttl}')
|
||||||
|
|
||||||
|
time.sleep(self.ttl * 2)
|
||||||
|
tdSql.query('show tables')
|
||||||
|
tdSql.checkRows(90)
|
||||||
|
|
||||||
|
def check_ttl_result(self):
|
||||||
|
tdSql.execute(f'drop database if exists {self.dbname}')
|
||||||
|
tdSql.execute(f'create database {self.dbname}')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}')
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
time.sleep(self.ttl)
|
||||||
|
tdSql.execute(f'insert into {self.dbname}.t2 values(now, 1)');
|
||||||
|
|
||||||
|
time.sleep(self.ttl)
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
time.sleep(self.ttl * 2)
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.check_batch_drop_num()
|
||||||
|
self.check_ttl_result()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,220 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None):
|
||||||
|
tdLog.info(f"*** testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***")
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
if partition == "tbname":
|
||||||
|
if case_when:
|
||||||
|
stream_case_when_partition = case_when
|
||||||
|
else:
|
||||||
|
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||||
|
|
||||||
|
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||||
|
elif partition == "c1":
|
||||||
|
if case_when:
|
||||||
|
stream_case_when_partition = case_when
|
||||||
|
else:
|
||||||
|
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||||
|
partition_elm_alias = self.tdCom.partition_col_alias
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||||
|
elif partition is None:
|
||||||
|
partition_elm_alias = '"no_partition"'
|
||||||
|
else:
|
||||||
|
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||||
|
if partition == "tbname" or partition is None:
|
||||||
|
if case_when:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
if partition:
|
||||||
|
partition_elm = f'partition by {partition} {partition_elm_alias}'
|
||||||
|
else:
|
||||||
|
partition_elm = ""
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||||
|
start_time = self.tdCom.date_time
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||||
|
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value)
|
||||||
|
if i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value)
|
||||||
|
if i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
if partition:
|
||||||
|
partition_elm = f'partition by {partition}'
|
||||||
|
else:
|
||||||
|
partition_elm = ""
|
||||||
|
|
||||||
|
if not fill_value:
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||||
|
|
||||||
|
if self.tdCom.subtable:
|
||||||
|
for tname in [self.stb_name, self.ctb_name]:
|
||||||
|
tdSql.query(f'select * from {self.ctb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition is None:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
tdSql.query(f'select * from {self.tb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition is None:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
if fill_value:
|
||||||
|
end_date_time = self.tdCom.date_time
|
||||||
|
final_range_count = self.tdCom.range_count
|
||||||
|
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||||
|
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||||
|
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||||
|
end_ts = self.tdCom.time_cast(future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||||
|
self.tdCom.date_time = start_time
|
||||||
|
# update
|
||||||
|
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||||
|
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||||
|
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||||
|
end_ts = self.tdCom.time_cast(future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||||
|
self.tdCom.date_time = start_time
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||||
|
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
if self.delete:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
if partition == "tbname":
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||||
|
if partition == "tbname":
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||||
|
|
||||||
|
if self.delete:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
if partition == "tbname":
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||||
|
if partition == "tbname":
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True)
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True)
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True)
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True)
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL")
|
||||||
|
# for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||||
|
for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value)
|
||||||
|
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,209 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False):
|
||||||
|
tdLog.info(f"*** testing stream at_once+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, delete: {delete}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||||
|
if use_except:
|
||||||
|
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm or len(stb_field_name_value.split(",")) == len(self.tdCom.partitial_stb_filter_des_select_elm.split(",")):
|
||||||
|
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||||
|
else:
|
||||||
|
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||||
|
else:
|
||||||
|
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||||
|
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||||
|
else:
|
||||||
|
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||||
|
|
||||||
|
if stb_field_name_value is not None:
|
||||||
|
if len(stb_field_name_value) == 0:
|
||||||
|
stb_field_name_value = ",".join(self.tdCom.tb_filter_des_select_elm.split(",")[:5])
|
||||||
|
# else:
|
||||||
|
# stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
defined_tag_count = len(tag_value.split()) if tag_value is not None else 0
|
||||||
|
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, ext_stb=use_exist_stb)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
|
||||||
|
if partition == "tbname":
|
||||||
|
if case_when:
|
||||||
|
stream_case_when_partition = case_when
|
||||||
|
else:
|
||||||
|
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||||
|
|
||||||
|
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||||
|
elif partition == "c1":
|
||||||
|
if case_when:
|
||||||
|
stream_case_when_partition = case_when
|
||||||
|
else:
|
||||||
|
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||||
|
partition_elm_alias = self.tdCom.partition_col_alias
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||||
|
elif partition == "tbname,t1,c1":
|
||||||
|
partition_elm_alias = f'{self.tdCom.partition_tbname_alias},t1,c1'
|
||||||
|
else:
|
||||||
|
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||||
|
if subtable:
|
||||||
|
if partition == "tbname":
|
||||||
|
if case_when:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
if subtable == "constant":
|
||||||
|
# stb_subtable_value = f'"{self.tdCom.ext_ctb_stream_des_table}"'
|
||||||
|
stb_subtable_value = f'"constant_{self.tdCom.ext_ctb_stream_des_table}"'
|
||||||
|
else:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(cast({subtable} as int unsigned) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = None
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
# self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.ext_tb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||||
|
if partition:
|
||||||
|
stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except)
|
||||||
|
else:
|
||||||
|
stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except)
|
||||||
|
if stream_sql:
|
||||||
|
tdSql.error(stream_sql)
|
||||||
|
return
|
||||||
|
start_time = self.tdCom.date_time
|
||||||
|
if subtable == "constant":
|
||||||
|
range_count = 1
|
||||||
|
else:
|
||||||
|
range_count = self.tdCom.range_count
|
||||||
|
|
||||||
|
for i in range(range_count):
|
||||||
|
latency = 0
|
||||||
|
tag_value_list = list()
|
||||||
|
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||||
|
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
if tag_value:
|
||||||
|
if subtable == "constant":
|
||||||
|
tdSql.query(f'select {tag_value} from constant_{self.tdCom.ext_ctb_stream_des_table}')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||||
|
tag_value_list = tdSql.queryResult
|
||||||
|
if not fill_value:
|
||||||
|
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||||
|
elif stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, cast(max(c2) as tinyint), cast(min(c1) as smallint) from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||||
|
else:
|
||||||
|
if partition:
|
||||||
|
if tag_value == self.tdCom.exchange_tag_filter_des_select_elm:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.partitial_tag_stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list)
|
||||||
|
elif tag_value == self.tdCom.cast_tag_filter_des_select_elm:
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart')
|
||||||
|
limit_row = tdSql.queryRows
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.cast_tag_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select cast(t1 as TINYINT UNSIGNED),cast(t2 as varchar(256)),cast(t3 as bool) from {self.stb_name} order by ts limit {limit_row}')
|
||||||
|
tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};')
|
||||||
|
while list(set(tdSql.queryResult)) != [(None, None, None, None, None, None, None, None, None, None)]:
|
||||||
|
tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};')
|
||||||
|
if latency < self.tdCom.default_interval:
|
||||||
|
latency += 1
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
tdSql.checkEqual(list(set(tdSql.queryResult)), [(None, None, None, None, None, None, None, None, None, None)])
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list)
|
||||||
|
else:
|
||||||
|
if use_exist_stb and not tag_value:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, use_exist_stb=use_exist_stb)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, subtable=subtable)
|
||||||
|
|
||||||
|
if subtable:
|
||||||
|
for tname in [self.stb_name]:
|
||||||
|
tdSql.query(f'select * from {self.ctb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))')
|
||||||
|
subtable_value = tdSql.queryResult[0][0]
|
||||||
|
if subtable == "constant":
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
for delete in [True, False]:
|
||||||
|
for fill_history_value in [0, 1]:
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.partitial_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.exchange_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||||
|
# self-define tag
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.partitial_tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.exchange_tag_filter_des_select_elm, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition="t1 as t5,t2 as t11,t3 as t13", subtable=None, stb_field_name_value=None, tag_value="t5,t11,t13", use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t1", use_exist_stb=True)
|
||||||
|
# error cases
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value="", tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm.replace("c1","c19"), tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="ttt", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t15", use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="c5", use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as t14", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True)
|
||||||
|
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as c13", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,c13", use_exist_stb=True, use_except=True)
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,223 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def at_once_session(self, session, ignore_expired=None, ignore_update=None, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True):
|
||||||
|
tdLog.info(f"*** testing stream at_once+interval: session: {session}, ignore_expired: {ignore_expired}, ignore_update: {ignore_update}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, case_when: {case_when}, subtable: {subtable} ***")
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(session=session, fill_history_value=fill_history_value)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
if partition == "tbname":
|
||||||
|
if case_when:
|
||||||
|
stream_case_when_partition = case_when
|
||||||
|
else:
|
||||||
|
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||||
|
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||||
|
elif partition == "c1":
|
||||||
|
partition_elm_alias = self.tdCom.partition_col_alias
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
if subtable:
|
||||||
|
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||||
|
else:
|
||||||
|
partition_elm_alias = "constant"
|
||||||
|
else:
|
||||||
|
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||||
|
if partition == "tbname" or subtable is None:
|
||||||
|
if case_when:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
if subtable:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
if 'abs' in partition:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
|
||||||
|
else:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
|
||||||
|
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
ctb_name = self.tdCom.get_long_name()
|
||||||
|
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session)
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + 1
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session)
|
||||||
|
if i == 0:
|
||||||
|
record_window_close_ts = window_close_ts
|
||||||
|
for ts_value in [self.tdCom.date_time, window_close_ts]:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||||
|
ts_value += 1
|
||||||
|
|
||||||
|
# check result
|
||||||
|
if partition != "tbname":
|
||||||
|
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||||
|
if "first" not in colname and "last" not in colname:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True)
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;')
|
||||||
|
else:
|
||||||
|
for tbname in [self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||||
|
|
||||||
|
if self.tdCom.disorder:
|
||||||
|
if ignore_expired:
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s')
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.checkEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s')
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.checkEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
if ignore_update:
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||||
|
if partition != "tbname":
|
||||||
|
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||||
|
if "first" not in colname and "last" not in colname:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True)
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;')
|
||||||
|
else:
|
||||||
|
for tbname in [self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||||
|
|
||||||
|
if fill_history_value:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.record_history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.record_history_ts)
|
||||||
|
if self.delete:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-"))
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-"))
|
||||||
|
|
||||||
|
if self.tdCom.subtable:
|
||||||
|
tdSql.query(f'select * from {self.ctb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if c1_value[1] is not None:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
if subtable:
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
tdSql.query(f'select * from {self.tb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if c1_value[1] is not None:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
if subtable:
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.at_once_session(session=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, delete=True, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||||
|
for subtable in [None, True]:
|
||||||
|
self.at_once_session(session=random.randint(10, 15), subtable=subtable, partition="abs(c1)")
|
||||||
|
for ignore_expired in [None, 0, 1]:
|
||||||
|
for fill_history_value in [None, 1]:
|
||||||
|
self.at_once_session(session=random.randint(10, 15), ignore_expired=ignore_expired, fill_history_value=fill_history_value)
|
||||||
|
for fill_history_value in [None, 1]:
|
||||||
|
self.at_once_session(session=random.randint(10, 15), partition="tbname", delete=True, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_session(session=random.randint(10, 15), partition="c1", delete=True, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, subtable=None, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_session(session=random.randint(10, 15), ignore_update=1, fill_history_value=fill_history_value)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,144 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def at_once_state_window(self, state_window, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True):
|
||||||
|
tdLog.info(f"*** testing stream at_once+interval: state_window: {state_window}, partition: {partition}, fill_history: {fill_history_value}, case_when: {case_when}***, delete: {delete}")
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(state_window=state_window, fill_history_value=fill_history_value)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
|
||||||
|
if partition == "tbname":
|
||||||
|
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||||
|
elif partition == "c1" and subtable is not None:
|
||||||
|
partition_elm_alias = self.tdCom.partition_col_alias
|
||||||
|
elif partition == "c1" and subtable is None:
|
||||||
|
partition_elm_alias = 'constant'
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||||
|
else:
|
||||||
|
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||||
|
if partition == "tbname" or subtable is None:
|
||||||
|
if partition == "tbname":
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
if 'abs' in partition:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
|
||||||
|
else:
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
|
||||||
|
state_window_col_name = self.tdCom.dataDict["state_window"]
|
||||||
|
if case_when:
|
||||||
|
stream_state_window = case_when
|
||||||
|
else:
|
||||||
|
stream_state_window = state_window_col_name
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
range_times = self.tdCom.range_count
|
||||||
|
state_window_max = self.tdCom.dataDict['state_window_max']
|
||||||
|
for i in range(range_times):
|
||||||
|
state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times))
|
||||||
|
for i in range(2, range_times+3):
|
||||||
|
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||||
|
tdSql.execute(f'delete from {self.ctb_name} where ts = {dt}')
|
||||||
|
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
tdSql.execute(f'delete from {self.tb_name} where ts = {dt}')
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
|
||||||
|
# for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True)
|
||||||
|
|
||||||
|
if fill_history_value:
|
||||||
|
self.tdCom.update_delete_history_data(self.delete)
|
||||||
|
|
||||||
|
if self.tdCom.subtable:
|
||||||
|
tdSql.query(f'select * from {self.ctb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
if subtable:
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||||
|
return
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
tdSql.query(f'select * from {self.tb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
if subtable:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||||
|
return
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.at_once_state_window(state_window="c2", partition="tbname", case_when="case when c1 < 0 then c1 else c2 end")
|
||||||
|
self.at_once_state_window(state_window="c1", partition="tbname", case_when="case when c1 >= 0 then c1 else c2 end")
|
||||||
|
for fill_history_value in [None, 1]:
|
||||||
|
self.at_once_state_window(state_window="c1", partition="tbname", fill_history_value=fill_history_value)
|
||||||
|
self.at_once_state_window(state_window="c1", partition="c1", fill_history_value=fill_history_value)
|
||||||
|
self.at_once_state_window(state_window="c1", partition="abs(c1)", fill_history_value=fill_history_value)
|
||||||
|
self.at_once_state_window(state_window="c1", partition="tbname", delete=True, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_state_window(state_window="c1", partition="c1", delete=True, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_state_window(state_window="c1", partition="abs(c1)", delete=True, fill_history_value=fill_history_value)
|
||||||
|
self.at_once_state_window(state_window="c1", partition="c1", subtable=None, fill_history_value=fill_history_value)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,161 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def watermark_max_delay_interval(self, interval, max_delay, watermark=None, fill_value=None, delete=False):
|
||||||
|
tdLog.info(f"*** testing stream max_delay+interval: interval: {interval}, watermark: {watermark}, fill_value: {fill_value}, delete: {delete} ***")
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
if watermark is not None:
|
||||||
|
self.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(interval=interval, watermark=watermark)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tdCom.date_time = 1658921623245
|
||||||
|
if watermark is not None:
|
||||||
|
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||||
|
fill_watermark_value = watermark_value
|
||||||
|
else:
|
||||||
|
watermark_value = None
|
||||||
|
fill_watermark_value = "0s"
|
||||||
|
|
||||||
|
max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s'
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value)
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value)
|
||||||
|
init_num = 0
|
||||||
|
start_time = self.tdCom.date_time
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||||
|
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||||
|
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
# if not fill_value:
|
||||||
|
# for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||||
|
# if tbname != self.tb_stream_des_table:
|
||||||
|
# tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||||
|
# else:
|
||||||
|
# tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||||
|
# tdSql.checkEqual(tdSql.queryRows, init_num)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
init_num = 2 + i
|
||||||
|
if watermark is not None:
|
||||||
|
init_num += 1
|
||||||
|
else:
|
||||||
|
init_num += 1
|
||||||
|
time.sleep(int(max_delay.replace("s", "")))
|
||||||
|
if not fill_value:
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)')
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)')
|
||||||
|
if fill_value:
|
||||||
|
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||||
|
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||||
|
future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||||
|
end_ts = self.tdCom.time_cast(future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||||
|
future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts)
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'])
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
if self.tdCom.update:
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||||
|
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||||
|
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if self.delete:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||||
|
time.sleep(int(max_delay.replace("s", "")))
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for watermark in [None, random.randint(20, 25)]:
|
||||||
|
self.watermark_max_delay_interval(interval=random.choice([15]), watermark=watermark, max_delay=f"{random.randint(5, 6)}s")
|
||||||
|
for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||||
|
self.watermark_max_delay_interval(interval=random.randint(10, 15), watermark=None, max_delay=f"{random.randint(5, 6)}s", fill_value=fill_value)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,101 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def watermark_max_delay_interval_ext(self, interval, max_delay, watermark=None, fill_value=None, partition="tbname", delete=False, fill_history_value=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False):
|
||||||
|
tdLog.info(f"*** testing stream max_delay+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, max_delay: {max_delay}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||||
|
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||||
|
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||||
|
else:
|
||||||
|
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||||
|
if not stb_field_name_value:
|
||||||
|
stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
defined_tag_count = len(tag_value.split())
|
||||||
|
if watermark is not None:
|
||||||
|
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
if subtable:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = None
|
||||||
|
self.tdCom.date_time = 1658921623245
|
||||||
|
if watermark is not None:
|
||||||
|
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||||
|
else:
|
||||||
|
watermark_value = None
|
||||||
|
|
||||||
|
max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s'
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||||
|
|
||||||
|
init_num = 0
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||||
|
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||||
|
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
init_num = 2 + i
|
||||||
|
if watermark is not None:
|
||||||
|
init_num += 1
|
||||||
|
else:
|
||||||
|
init_num += 1
|
||||||
|
time.sleep(int(max_delay.replace("s", "")))
|
||||||
|
if tag_value:
|
||||||
|
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||||
|
tag_value_list = tdSql.queryResult
|
||||||
|
if not fill_value:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts;', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for delete in [True, False]:
|
||||||
|
for fill_history_value in [0, 1]:
|
||||||
|
self.watermark_max_delay_interval_ext(interval=random.choice([15]), watermark=random.randint(20, 25), max_delay=f"{random.randint(5, 6)}s", delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,100 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def watermark_max_delay_session(self, session, watermark, max_delay, fill_history_value=None):
|
||||||
|
tdLog.info(f"*** testing stream max_delay+session: session: {session}, watermark: {watermark}, max_delay: {max_delay}, fill_history_value: {fill_history_value} ***")
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
if watermark is not None:
|
||||||
|
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tdCom.date_time = self.tdCom.dataDict["start_ts"]
|
||||||
|
|
||||||
|
if watermark is not None:
|
||||||
|
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||||
|
else:
|
||||||
|
watermark_value = None
|
||||||
|
max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s'
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value)
|
||||||
|
init_num = 0
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + 1
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||||
|
|
||||||
|
if watermark_value is not None:
|
||||||
|
for ts_value in [self.tdCom.date_time, window_close_ts-1]:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||||
|
if tbname != self.tb_stream_des_table:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||||
|
if not fill_history_value:
|
||||||
|
tdSql.checkEqual(tdSql.queryRows, init_num)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if i == 0:
|
||||||
|
init_num = 2 + i
|
||||||
|
else:
|
||||||
|
init_num += 1
|
||||||
|
if watermark_value is not None:
|
||||||
|
expected_value = init_num
|
||||||
|
else:
|
||||||
|
expected_value = i + 1
|
||||||
|
|
||||||
|
if not fill_history_value:
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay)
|
||||||
|
else:
|
||||||
|
self.tdCom.update_delete_history_data(delete=True)
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for fill_history_value in [None, 1]:
|
||||||
|
for watermark in [None, random.randint(20, 30)]:
|
||||||
|
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,105 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def partitionby_interval(self, interval=None, partition_by_elm="tbname", ignore_expired=None):
|
||||||
|
tdLog.info(f"*** testing stream partition+interval: interval: {interval}, partition_by: {partition_by_elm}, ignore_expired: {ignore_expired} ***")
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(interval=interval)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
ctb_name_list = list()
|
||||||
|
for i in range(1, self.tdCom.range_count):
|
||||||
|
ctb_name = self.tdCom.get_long_name()
|
||||||
|
ctb_name_list.append(ctb_name)
|
||||||
|
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||||
|
if interval is not None:
|
||||||
|
source_sql = f'select _wstart AS wstart, {self.tdCom.partition_by_stb_source_select_str} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s)'
|
||||||
|
else:
|
||||||
|
source_sql = f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm}'
|
||||||
|
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=source_sql, ignore_expired=ignore_expired)
|
||||||
|
# insert data
|
||||||
|
count = 1
|
||||||
|
step_count = 1
|
||||||
|
for i in range(1, self.tdCom.range_count):
|
||||||
|
if i == 1:
|
||||||
|
record_window_close_ts = self.tdCom.date_time - 15 * self.tdCom.offset
|
||||||
|
ctb_name = self.tdCom.get_long_name()
|
||||||
|
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||||
|
if i % 2 == 0:
|
||||||
|
step_count += i
|
||||||
|
for j in range(count, step_count):
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s')
|
||||||
|
for ctb_name in ctb_name_list:
|
||||||
|
self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s')
|
||||||
|
count += i
|
||||||
|
else:
|
||||||
|
step_count += 1
|
||||||
|
for i in range(2):
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s')
|
||||||
|
for ctb_name in ctb_name_list:
|
||||||
|
self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s')
|
||||||
|
count += 1
|
||||||
|
# check result
|
||||||
|
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||||
|
if "first" not in colname and "last" not in colname:
|
||||||
|
if interval is not None:
|
||||||
|
self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;')
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;')
|
||||||
|
|
||||||
|
if self.tdCom.disorder:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||||
|
for ctb_name in ctb_name_list:
|
||||||
|
self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=record_window_close_ts)
|
||||||
|
if ignore_expired:
|
||||||
|
if "first" not in colname and "last" not in colname:
|
||||||
|
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||||
|
if interval is not None:
|
||||||
|
tdSql.query(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.query(f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;')
|
||||||
|
|
||||||
|
else:
|
||||||
|
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||||
|
if "first" not in colname and "last" not in colname:
|
||||||
|
if interval is not None:
|
||||||
|
self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;')
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;')
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for interval in [None, 10]:
|
||||||
|
for ignore_expired in [0, 1]:
|
||||||
|
self.partitionby_interval(interval=interval, partition_by_elm="tbname", ignore_expired=ignore_expired)
|
||||||
|
self.partitionby_interval(interval=10, partition_by_elm="t1")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,154 @@
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def pause_resume_test(self, interval, partition="tbname", delete=False, fill_history_value=None, pause=True, resume=True, ignore_untreated=False):
|
||||||
|
tdLog.info(f"*** testing stream pause+resume: interval: {interval}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, ignore_untreated: {ignore_untreated} ***")
|
||||||
|
if_exist_value_list = [None, True]
|
||||||
|
if_exist = random.choice(if_exist_value_list)
|
||||||
|
reverse_check = True if ignore_untreated else False
|
||||||
|
range_count = (self.tdCom.range_count + 3) * 3
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
|
||||||
|
if partition == "tbname":
|
||||||
|
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||||
|
elif partition == "c1":
|
||||||
|
partition_elm_alias = self.tdCom.partition_col_alias
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||||
|
elif partition is None:
|
||||||
|
partition_elm_alias = '"no_partition"'
|
||||||
|
else:
|
||||||
|
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||||
|
if partition == "tbname" or partition is None:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
if partition:
|
||||||
|
partition_elm = f'partition by {partition} {partition_elm_alias}'
|
||||||
|
else:
|
||||||
|
partition_elm = ""
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
for i in range(range_count):
|
||||||
|
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||||
|
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
if partition:
|
||||||
|
partition_elm = f'partition by {partition}'
|
||||||
|
else:
|
||||||
|
partition_elm = ""
|
||||||
|
# if i == int(range_count/2):
|
||||||
|
if i > 2 and i % 3 == 0:
|
||||||
|
for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']:
|
||||||
|
if if_exist is not None:
|
||||||
|
tdSql.execute(f'pause stream if exists {stream_name}_no_exist')
|
||||||
|
tdSql.error(f'pause stream if not exists {stream_name}')
|
||||||
|
tdSql.error(f'pause stream {stream_name}_no_exist')
|
||||||
|
self.tdCom.pause_stream(stream_name, if_exist)
|
||||||
|
if pause and not resume and range_count-i <= 3:
|
||||||
|
time.sleep(self.tdCom.default_interval)
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart')
|
||||||
|
res_after_pause = tdSql.queryResult
|
||||||
|
if resume:
|
||||||
|
if i > 2 and i % 3 != 0:
|
||||||
|
for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']:
|
||||||
|
if if_exist is not None:
|
||||||
|
tdSql.execute(f'resume stream if exists {stream_name}_no_exist')
|
||||||
|
tdSql.error(f'resume stream if not exists {stream_name}')
|
||||||
|
self.tdCom.resume_stream(stream_name, if_exist, None, ignore_untreated)
|
||||||
|
if pause and not resume:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart')
|
||||||
|
res_without_resume = tdSql.queryResult
|
||||||
|
tdSql.checkEqual(res_after_pause, res_without_resume)
|
||||||
|
else:
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check)
|
||||||
|
|
||||||
|
if self.tdCom.subtable:
|
||||||
|
for tname in [self.stb_name, self.ctb_name]:
|
||||||
|
tdSql.query(f'select * from {self.ctb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition is None:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
tdSql.query(f'select * from {self.tb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition is None:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for delete in [True, False]:
|
||||||
|
for fill_history_value in [0, 1]:
|
||||||
|
# pause/resume
|
||||||
|
self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", ignore_untreated=False, fill_history_value=fill_history_value, delete=delete)
|
||||||
|
self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", ignore_untreated=True, fill_history_value=fill_history_value, delete=delete)
|
||||||
|
# self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", resume=False, fill_history_value=fill_history_value, delete=delete)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,177 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def scalar_function(self, partition="tbname", fill_history_value=None):
|
||||||
|
tdLog.info(f"*** testing stream scalar funtion partition: {partition}, fill_history_value: {fill_history_value} ***")
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
tdLog.info("preparing data ...")
|
||||||
|
self.tdCom.prepare_data(fill_history_value=fill_history_value)
|
||||||
|
# return
|
||||||
|
tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int);')
|
||||||
|
tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);')
|
||||||
|
tdSql.execute('create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20));')
|
||||||
|
if fill_history_value is None:
|
||||||
|
fill_history = ""
|
||||||
|
else:
|
||||||
|
tdLog.info("inserting fill_history data ...")
|
||||||
|
fill_history = f'fill_history {fill_history_value}'
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{i}s, 100, -100.1, "hebei", Null, "Bigdata");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{i}s, 100, -100.1, "heBei", Null, "Bigdata");')
|
||||||
|
|
||||||
|
# self.tdCom.write_latency(self.case_name)
|
||||||
|
math_function_list = ["abs", "acos", "asin", "atan", "ceil", "cos", "floor", "log", "pow", "round", "sin", "sqrt", "tan"]
|
||||||
|
string_function_list = ["char_length", "concat", "concat_ws", "length", "lower", "ltrim", "rtrim", "substr", "upper"]
|
||||||
|
for math_function in math_function_list:
|
||||||
|
tdLog.info(f"testing function {math_function} ...")
|
||||||
|
tdLog.info(f"creating stream for function {math_function} ...")
|
||||||
|
if math_function in ["log", "pow"]:
|
||||||
|
tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_stb partition by {partition};')
|
||||||
|
tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_ct1;')
|
||||||
|
tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_tb;')
|
||||||
|
else:
|
||||||
|
tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_stb partition by {partition};')
|
||||||
|
tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_ct1;')
|
||||||
|
tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_tb;')
|
||||||
|
self.tdCom.check_stream_field_type(f"describe output_{math_function}_stb", math_function)
|
||||||
|
self.tdCom.check_stream_field_type(f"describe output_{math_function}_ctb", math_function)
|
||||||
|
self.tdCom.check_stream_field_type(f"describe output_{math_function}_tb", math_function)
|
||||||
|
for tbname in ["scalar_ct1", "scalar_tb"]:
|
||||||
|
tdLog.info(f"function {math_function}: inserting data for tb --- {tbname} ...")
|
||||||
|
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");')
|
||||||
|
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");')
|
||||||
|
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);')
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");')
|
||||||
|
if i%2 == 0:
|
||||||
|
tdLog.info(f"function {math_function}: update testing ...")
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
else:
|
||||||
|
tdLog.info(f"function {math_function}: delete testing ...")
|
||||||
|
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||||
|
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||||
|
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||||
|
|
||||||
|
if fill_history_value:
|
||||||
|
tdLog.info(f"function {math_function}: disorder testing ...")
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)'
|
||||||
|
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||||
|
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||||
|
if math_function == "log" or math_function == "pow":
|
||||||
|
tdLog.info(f"function {math_function}: confirming query result ...")
|
||||||
|
self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_stb partition by {partition} order by ts;')
|
||||||
|
self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_ctb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_ct1;')
|
||||||
|
self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_tb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_tb;')
|
||||||
|
else:
|
||||||
|
tdLog.info(f"function {math_function}: confirming query result ...")
|
||||||
|
self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1), {math_function}(c2) from scalar_stb partition by {partition} order by ts;')
|
||||||
|
self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_ctb;', f'select {math_function}(c1), {math_function}(c2) from scalar_ct1;')
|
||||||
|
self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_tb;', f'select {math_function}(c1), {math_function}(c2) from scalar_tb;')
|
||||||
|
tdSql.execute(f'drop stream if exists stb_{math_function}_stream')
|
||||||
|
tdSql.execute(f'drop stream if exists ctb_{math_function}_stream')
|
||||||
|
tdSql.execute(f'drop stream if exists tb_{math_function}_stream')
|
||||||
|
|
||||||
|
for string_function in string_function_list:
|
||||||
|
tdLog.info(f"testing function {string_function} ...")
|
||||||
|
tdLog.info(f"creating stream for function {string_function} ...")
|
||||||
|
if string_function == "concat":
|
||||||
|
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb partition by {partition};')
|
||||||
|
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;')
|
||||||
|
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;')
|
||||||
|
elif string_function == "concat_ws":
|
||||||
|
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb partition by {partition};')
|
||||||
|
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;')
|
||||||
|
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;')
|
||||||
|
elif string_function == "substr":
|
||||||
|
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb partition by {partition};')
|
||||||
|
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;')
|
||||||
|
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;')
|
||||||
|
else:
|
||||||
|
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb partition by {partition};')
|
||||||
|
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;')
|
||||||
|
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;')
|
||||||
|
self.tdCom.check_stream_field_type(f"describe output_{string_function}_stb", string_function)
|
||||||
|
self.tdCom.check_stream_field_type(f"describe output_{string_function}_ctb", string_function)
|
||||||
|
self.tdCom.check_stream_field_type(f"describe output_{string_function}_tb", string_function)
|
||||||
|
for tbname in ["scalar_ct1", "scalar_tb"]:
|
||||||
|
tdLog.info(f"function {string_function}: inserting data for tb --- {tbname} ...")
|
||||||
|
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");')
|
||||||
|
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");')
|
||||||
|
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);')
|
||||||
|
|
||||||
|
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");')
|
||||||
|
if i%2 == 0:
|
||||||
|
tdLog.info(f"function {string_function}: update testing...")
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
else:
|
||||||
|
tdLog.info(f"function {string_function}: delete testing ...")
|
||||||
|
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||||
|
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||||
|
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||||
|
|
||||||
|
if fill_history_value:
|
||||||
|
tdLog.info(f"function {string_function}: disorder testing ...")
|
||||||
|
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||||
|
dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)'
|
||||||
|
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||||
|
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||||
|
|
||||||
|
|
||||||
|
if string_function == "concat":
|
||||||
|
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb order by ts;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;')
|
||||||
|
elif string_function == "concat_ws":
|
||||||
|
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb order by ts;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;')
|
||||||
|
elif string_function == "substr":
|
||||||
|
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb order by ts;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_ctb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_tb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;')
|
||||||
|
else:
|
||||||
|
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb order by ts;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_ctb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;')
|
||||||
|
self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_tb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;')
|
||||||
|
|
||||||
|
tdSql.execute(f'drop stream if exists stb_{string_function}_stream')
|
||||||
|
tdSql.execute(f'drop stream if exists ctb_{string_function}_stream')
|
||||||
|
tdSql.execute(f'drop stream if exists tb_{string_function}_stream')
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.scalar_function(partition="tbname", fill_history_value=1)
|
||||||
|
self.scalar_function(partition="tbname,c1,t1", fill_history_value=1)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,256 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def window_close_interval(self, interval, watermark=None, ignore_expired=None, partition="tbname", fill_value=None, delete=False):
|
||||||
|
tdLog.info(f"*** testing stream window_close+interval: interval: {interval}, watermark: {watermark}, ignore_expired: {ignore_expired}, partition: {partition}, fill: {fill_value}, delete: {delete} ***")
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
if watermark is not None:
|
||||||
|
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(interval=interval, watermark=watermark)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
|
||||||
|
if partition == "tbname":
|
||||||
|
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||||
|
elif partition == "c1":
|
||||||
|
partition_elm_alias = self.tdCom.partition_col_alias
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||||
|
else:
|
||||||
|
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||||
|
if partition == "tbname":
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
|
||||||
|
if watermark is not None:
|
||||||
|
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||||
|
else:
|
||||||
|
watermark_value = None
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=ctb_subtable_value, fill_value=fill_value)
|
||||||
|
if fill_value:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=tb_subtable_value, fill_value=fill_value)
|
||||||
|
|
||||||
|
start_time = self.tdCom.date_time
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||||
|
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||||
|
if i == 0:
|
||||||
|
record_window_close_ts = window_close_ts
|
||||||
|
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||||
|
ts_value=self.tdCom.date_time+num*self.tdCom.offset
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
dt = f'cast({ts_value-num*self.tdCom.offset} as timestamp)'
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||||
|
if not fill_value:
|
||||||
|
for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||||
|
if tbname != self.tb_stream_des_table:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||||
|
tdSql.checkEqual(tdSql.queryRows, i)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||||
|
if not fill_value:
|
||||||
|
for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||||
|
if tbname != self.tb_stream_des_table:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryRows, i)
|
||||||
|
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
if not fill_value:
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1)
|
||||||
|
if self.tdCom.disorder and not fill_value:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||||
|
if ignore_expired:
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||||
|
res1 = tdSql.queryResult
|
||||||
|
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}')
|
||||||
|
res2 = tdSql.queryResult
|
||||||
|
tdSql.checkNotEqual(res1, res2)
|
||||||
|
else:
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1)
|
||||||
|
if self.tdCom.subtable:
|
||||||
|
tdSql.query(f'select * from {self.ctb_name}')
|
||||||
|
for tname in [self.stb_name, self.ctb_name]:
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count)
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count)
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count)
|
||||||
|
ptn_counter += 1
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count)
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
tdSql.query(f'select * from {self.tb_name}')
|
||||||
|
ptn_counter = 0
|
||||||
|
for c1_value in tdSql.queryResult:
|
||||||
|
if partition == "c1":
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "abs(c1)":
|
||||||
|
abs_c1_value = abs(c1_value[1])
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||||
|
elif partition == "tbname" and ptn_counter == 0:
|
||||||
|
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||||
|
ptn_counter += 1
|
||||||
|
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||||
|
|
||||||
|
if fill_value:
|
||||||
|
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||||
|
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||||
|
future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||||
|
end_ts = self.tdCom.time_cast(future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||||
|
future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts)
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'])
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
|
||||||
|
if self.tdCom.update:
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
if watermark is not None:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||||
|
else:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||||
|
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||||
|
if i == 0:
|
||||||
|
record_window_close_ts = window_close_ts
|
||||||
|
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||||
|
ts_value=self.tdCom.date_time+num*self.tdCom.offset
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if self.delete:
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||||
|
self.tdCom.date_time = start_time
|
||||||
|
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||||
|
if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()):
|
||||||
|
additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}"
|
||||||
|
else:
|
||||||
|
additional_options = f"where ts >= {start_ts} and ts <= {end_ts}"
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
if "value" in fill_value.lower():
|
||||||
|
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||||
|
if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value)
|
||||||
|
else:
|
||||||
|
if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()):
|
||||||
|
additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}"
|
||||||
|
else:
|
||||||
|
additional_options = f"where ts >= {start_ts} and ts <= {end_ts}"
|
||||||
|
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for watermark in [None, random.randint(15, 20)]:
|
||||||
|
for ignore_expired in [0, 1]:
|
||||||
|
self.window_close_interval(interval=random.randint(10, 15), watermark=watermark, ignore_expired=ignore_expired)
|
||||||
|
for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||||
|
for watermark in [None, random.randint(15, 20)]:
|
||||||
|
self.window_close_interval(interval=random.randint(10, 12), watermark=watermark, fill_value=fill_value)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,98 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def watermark_window_close_session(self, session, watermark, fill_history_value=None, delete=True):
|
||||||
|
tdLog.info(f"*** testing stream window_close+session: session: {session}, watermark: {watermark}, fill_history: {fill_history_value}, delete: {delete} ***")
|
||||||
|
self.case_name = sys._getframe().f_code.co_name
|
||||||
|
if watermark is not None:
|
||||||
|
self.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tdCom.date_time = self.tdCom.dataDict["start_ts"]
|
||||||
|
if watermark is not None:
|
||||||
|
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||||
|
else:
|
||||||
|
watermark_value = None
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
# self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value)
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value)
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + 1
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||||
|
if watermark_value is not None:
|
||||||
|
expected_value = i + 1
|
||||||
|
for ts_value in [self.tdCom.date_time, window_close_ts-1]:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||||
|
# for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||||
|
for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||||
|
if tbname != self.tb_stream_des_table:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||||
|
else:
|
||||||
|
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||||
|
if not fill_history_value:
|
||||||
|
tdSql.checkEqual(tdSql.queryRows, i)
|
||||||
|
else:
|
||||||
|
expected_value = i
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
if fill_history_value:
|
||||||
|
self.tdCom.update_delete_history_data(delete=delete)
|
||||||
|
|
||||||
|
# for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||||
|
if not fill_history_value:
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value)
|
||||||
|
else:
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}')
|
||||||
|
else:
|
||||||
|
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for fill_history_value in [None, 1]:
|
||||||
|
for watermark in [None, random.randint(20, 25)]:
|
||||||
|
self.watermark_window_close_session(session=random.randint(10, 15), watermark=watermark, fill_history_value=fill_history_value)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,83 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def watermark_window_close_session_ext(self, session, watermark, fill_history_value=None, partition=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, delete=False):
|
||||||
|
tdLog.info(f"*** testing stream window_close+session+exist_stb+custom_tag: session: {session}, partition: {partition}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||||
|
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||||
|
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||||
|
else:
|
||||||
|
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||||
|
if not stb_field_name_value:
|
||||||
|
stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||||
|
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||||
|
defined_tag_count = len(tag_value.split())
|
||||||
|
if watermark is not None:
|
||||||
|
self.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||||
|
self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value, ext_stb=use_exist_stb)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tdCom.date_time = self.tdCom.dataDict["start_ts"]
|
||||||
|
if subtable:
|
||||||
|
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None
|
||||||
|
else:
|
||||||
|
stb_subtable_value = None
|
||||||
|
if watermark is not None:
|
||||||
|
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||||
|
else:
|
||||||
|
watermark_value = None
|
||||||
|
# create stb/ctb/tb stream
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, subtable_value=stb_subtable_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
if i == 0:
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||||
|
else:
|
||||||
|
self.tdCom.date_time = window_close_ts + 1
|
||||||
|
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||||
|
if watermark_value is not None:
|
||||||
|
expected_value = i + 1
|
||||||
|
for ts_value in [self.tdCom.date_time, window_close_ts-1]:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||||
|
else:
|
||||||
|
expected_value = i
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||||
|
|
||||||
|
if fill_history_value:
|
||||||
|
self.tdCom.update_delete_history_data(delete=delete)
|
||||||
|
if tag_value:
|
||||||
|
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||||
|
tag_value_list = tdSql.queryResult
|
||||||
|
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart limit {expected_value};', sorted=True, defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
#! TD-25893
|
||||||
|
# self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=False, fill_history_value=1)
|
||||||
|
self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=True)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,73 @@
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
self.tdCom = tdCom
|
||||||
|
|
||||||
|
def window_close_state_window(self, state_window, delete=True):
|
||||||
|
tdLog.info(f"*** testing stream window_close+session: state_window: {state_window}, delete: {delete} ***")
|
||||||
|
self.case_name = sys._getframe().f_code.co_name
|
||||||
|
self.delete = delete
|
||||||
|
self.tdCom.prepare_data(state_window=state_window)
|
||||||
|
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||||
|
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||||
|
state_window_col_name = self.tdCom.dataDict["state_window"]
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} state_window({state_window_col_name})', trigger_mode="window_close")
|
||||||
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} state_window({state_window_col_name})', trigger_mode="window_close")
|
||||||
|
state_window_max = self.tdCom.dataDict['state_window_max']
|
||||||
|
state_window_value_inmem = 0
|
||||||
|
sleep_step = 0
|
||||||
|
for i in range(self.tdCom.range_count):
|
||||||
|
state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count))
|
||||||
|
while state_window_value == state_window_value_inmem:
|
||||||
|
state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count))
|
||||||
|
if sleep_step < self.tdCom.default_interval:
|
||||||
|
sleep_step += 1
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
for j in range(2, self.tdCom.range_count+3):
|
||||||
|
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
if self.tdCom.update and i%2 == 0:
|
||||||
|
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||||
|
if self.delete and i%2 != 0:
|
||||||
|
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||||
|
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||||
|
self.tdCom.date_time += 1
|
||||||
|
for tbname in [self.ctb_name, self.tb_name]:
|
||||||
|
if tbname != self.tb_name:
|
||||||
|
self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i)
|
||||||
|
else:
|
||||||
|
self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i)
|
||||||
|
state_window_value_inmem = state_window_value
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for delete in [True, False]:
|
||||||
|
self.window_close_state_window(state_window="c1", delete=delete)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
Loading…
Reference in New Issue