Merge branch '3.0' of github.com:taosdata/TDengine into szhou/tms-again
This commit is contained in:
commit
fe28af9fa3
10
Jenkinsfile2
10
Jenkinsfile2
|
@ -151,14 +151,6 @@ def pre_test(){
|
|||
cd ${WKC}
|
||||
git submodule update --init --recursive
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKPY}
|
||||
git reset --hard
|
||||
git pull
|
||||
git log -5
|
||||
echo "python connector log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
echo >>${WKDIR}/jenkins.log
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
def pre_test_build_mac() {
|
||||
|
@ -409,7 +401,7 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('linux test') {
|
||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 "}
|
||||
agent{label " slave1_50 || slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 "}
|
||||
options { skipDefaultCheckout() }
|
||||
when {
|
||||
changeRequest()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.2.1.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.2.2.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -13,6 +13,6 @@ ExternalProject_Add(xml2
|
|||
BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install && ln -s $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
|
|
|
@ -5,7 +5,8 @@ description: This document describes how to insert data into TDengine.
|
|||
---
|
||||
|
||||
## Syntax
|
||||
|
||||
The writing of records supports two syntaxes, normal syntax and super table syntax. In the normal syntax, the table name immediately following `INSERT INTO` represents subtable names or regular table names. In the super table syntax, the table name immediately following `INSERT INTO` represents the super table name.
|
||||
### Normal Syntax
|
||||
```sql
|
||||
INSERT INTO
|
||||
tb_name
|
||||
|
@ -20,6 +21,15 @@ INSERT INTO
|
|||
|
||||
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||
```
|
||||
### Super Table Syntax
|
||||
```sql
|
||||
INSERT INTO
|
||||
stb1_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
[stb2_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
...];
|
||||
```
|
||||
|
||||
**Timestamps**
|
||||
|
||||
|
@ -32,26 +42,34 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
|
||||
**Syntax**
|
||||
|
||||
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
|
||||
1. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value.
|
||||
|
||||
2. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value.
|
||||
2. The VALUES clause inserts one or more rows of data into a table.
|
||||
|
||||
3. The VALUES clause inserts one or more rows of data into a table.
|
||||
3. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
||||
|
||||
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
||||
4. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
|
||||
|
||||
5. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
|
||||
|
||||
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
||||
5. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
||||
|
||||
```sql
|
||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||
```
|
||||
|
||||
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
||||
6. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
||||
|
||||
8. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
|
||||
**Normal Syntax**
|
||||
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
|
||||
|
||||
2. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
|
||||
|
||||
**Super Table Syntax**
|
||||
|
||||
1. The tbname column must be included in the field_name list and represents the name of the child table. This column is of string type, and the use of the . character is not permitted in the tbname column.
|
||||
|
||||
2. Tag columns are eligible for inclusion in the field_name list. If the specified child table doesn't exist, a new child table will be generated with the provided tag values. In the absence of specified tag values, the newly created table will have all NULL tag values. Existing child table tag values remain unchanged.
|
||||
|
||||
3. Param binding is not supported.
|
||||
## Insert a Record
|
||||
|
||||
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
|
||||
|
@ -134,3 +152,14 @@ When writing data from a file, you can automatically create the specified subtab
|
|||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||
```
|
||||
## Super Table Syntax
|
||||
|
||||
Automatically creating table and the table name is specified through the `tbname` column
|
||||
|
||||
```sql
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, phase)
|
||||
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
values('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
```
|
||||
|
||||
|
|
|
@ -59,4 +59,4 @@ Query OK, 9 row(s) affected (0.004763s)
|
|||
|
||||
## Import using taosdump
|
||||
|
||||
A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
|
||||
A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to the taosdump documentation.
|
||||
|
|
|
@ -19,4 +19,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
|
|||
|
||||
## Export Using taosdump
|
||||
|
||||
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
|
||||
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to the taosdump documentation.
|
||||
|
|
|
@ -11,8 +11,6 @@ The collection of the monitoring information is enabled by default, but can be d
|
|||
|
||||
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
|
||||
|
||||
Please refer to [TDinsight Grafana Dashboard](../../reference/tdinsight) to learn more details about using TDinsight to monitor TDengine.
|
||||
|
||||
A script `TDinsight.sh` is provided to deploy TDinsight automatically.
|
||||
|
||||
Download `TDinsight.sh` with the below command:
|
||||
|
|
|
@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
|
|||
|
||||
| taos-jdbcdriver version | major changes | TDengine version |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||
| 3.2.7 | Support VARBINARY and GEOMETRY types, and add time zone support for native connections. Support websocket auto reconnection | 3.2.0.0 or later |
|
||||
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
|
||||
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
||||
|
@ -178,7 +179,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.2</version>
|
||||
<version>3.2.7</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -218,7 +218,7 @@ The example to query the average system memory usage for the specified interval
|
|||
|
||||
### Importing the Dashboard
|
||||
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly.
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.1.0
|
||||
|
||||
<Release type="tdengine" version="3.2.1.0" />
|
||||
|
||||
## 3.2.0.0
|
||||
|
||||
<Release type="tdengine" version="3.2.0.0" />
|
||||
|
|
|
@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.2.7 | 支持VARBINARY和GEOMETRY类型,增加native连接的时区设置支持。增加websocket自动重连功能。 | 3.2.0.0 及更高版本 |
|
||||
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
||||
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||
|
@ -177,7 +178,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.2</version>
|
||||
<version>3.2.7</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -1097,7 +1098,6 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group), 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
|
||||
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
```java
|
||||
|
|
|
@ -105,7 +105,7 @@ spec:
|
|||
# TZ for timezone settings, we recommend to always set it.
|
||||
- name: TZ
|
||||
value: "Asia/Shanghai"
|
||||
# TAOS_ prefix will configured in taos.cfg, strip prefix and camelCase.
|
||||
# Environment variables with prefix TAOS_ will be parsed and converted into corresponding parameter in taos.cfg. For example, serverPort in taos.cfg should be configured by TAOS_SERVER_PORT when using K8S to deploy
|
||||
- name: TAOS_SERVER_PORT
|
||||
value: "6030"
|
||||
# Must set if you want a cluster.
|
||||
|
|
|
@ -53,7 +53,7 @@ database_option: {
|
|||
- 1:表示一阶段压缩。
|
||||
- 2:表示两阶段压缩。
|
||||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
|
|
|
@ -5,7 +5,9 @@ description: 写入数据的详细语法
|
|||
---
|
||||
|
||||
## 写入语法
|
||||
写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名
|
||||
|
||||
### 正常语法
|
||||
```sql
|
||||
INSERT INTO
|
||||
tb_name
|
||||
|
@ -20,6 +22,15 @@ INSERT INTO
|
|||
|
||||
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||
```
|
||||
### 超级表语法
|
||||
```sql
|
||||
INSERT INTO
|
||||
stb1_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
[stb2_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
...];
|
||||
```
|
||||
|
||||
**关于时间戳**
|
||||
|
||||
|
@ -32,26 +43,34 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
|
||||
**语法说明**
|
||||
|
||||
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
|
||||
1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。
|
||||
|
||||
2. 可以指定要插入值的列,对于为指定的列数据库将自动填充为 NULL。
|
||||
2. VALUES 语法表示了要插入的一行或多行数据。
|
||||
|
||||
3. VALUES 语法表示了要插入的一行或多行数据。
|
||||
3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
|
||||
|
||||
4. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
|
||||
4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
|
||||
|
||||
5. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
|
||||
|
||||
6. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
|
||||
5. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
|
||||
|
||||
```sql
|
||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||
```
|
||||
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
||||
|
||||
7. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
||||
**正常语法说明**
|
||||
|
||||
8. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
|
||||
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
|
||||
|
||||
2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
|
||||
|
||||
**超级表语法说明**
|
||||
|
||||
1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.‘
|
||||
|
||||
2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列,则把所有标签列的值设置为NULL
|
||||
|
||||
3. 不支持参数绑定写入
|
||||
## 插入一条记录
|
||||
|
||||
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
|
||||
|
@ -134,3 +153,12 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
|
|||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||
```
|
||||
## 超级表语法
|
||||
|
||||
自动建表, 表名通过tbname列指定
|
||||
```sql
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, phase)
|
||||
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
values('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
```
|
||||
|
|
|
@ -59,4 +59,4 @@ Query OK, 9 row(s) affected (0.004763s)
|
|||
|
||||
## taosdump 工具导入
|
||||
|
||||
TDengine 提供了方便的数据库导入导出工具 taosdump。用户可以将 taosdump 从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见:[TDengine 数据备份工具: taosdump](/reference/taosdump)。
|
||||
TDengine 提供了方便的数据库导入导出工具 taosdump。用户可以将 taosdump 从一个系统导出的数据,导入到其他系统中。具体使用方法,请参考 taosdump 的相关文档。
|
||||
|
|
|
@ -17,5 +17,4 @@ select * from <tb_name> >> data.csv;
|
|||
|
||||
## 用 taosdump 导出数据
|
||||
|
||||
利用 taosdump,用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见:
|
||||
[TDengine 数据备份工具: taosdump](/reference/taosdump)。
|
||||
利用 taosdump,用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参考 taosdump 的相关文档。
|
|
@ -218,7 +218,7 @@ docker run -d \
|
|||
|
||||
### 导入 Dashboard
|
||||
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper,相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper。
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
---
|
||||
title: TSZ 压缩算法
|
||||
description: TDengine 对浮点数进行高效压缩的算法
|
||||
---
|
||||
|
||||
TSZ 压缩算法是 TDengine 为浮点数据类型提供的可选压缩算法,可以实现浮点数有损至无损全状态压缩,相比默认压缩算法, TSZ 压缩算法压缩率更高,即使切至无损状态,压缩率也会比默认压缩高一倍。
|
||||
|
||||
## 适合场景
|
||||
|
||||
- TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
|
||||
- TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
|
||||
|
||||
## 使用步骤
|
||||
- TDengine 支持版本为 3.2.0.0 或以上
|
||||
- 开启选项
|
||||
在 taos.cfg 配置中增加以下内容,即可开启 TSZ 压缩算法,功能打开后,会替换默认算法。
|
||||
以下表示字段类型是 float 及 double 类型都使用此压缩算法,也可以单独只配置一个
|
||||
|
||||
```sql
|
||||
lossyColumns float|double
|
||||
```
|
||||
|
||||
- 配置需重启服务生效
|
||||
- Taosd 日志输出以下内容,表明功能已生效:
|
||||
|
||||
```sql
|
||||
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
||||
```
|
||||
|
||||
## 配置参数
|
||||
|
||||
### fPrecision
|
||||
FLOAT 类型精度控制:
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 服务器端 |
|
||||
| 含义 | 设置 float 类型浮点数压缩精度 |
|
||||
| 取值范围 | 0.1 ~ 0.00000001 |
|
||||
| 缺省值 | 0.00000001 |
|
||||
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
|
||||
|
||||
|
||||
|
||||
### dPrecision
|
||||
DOUBLE 类型精度控制:
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 服务器端 |
|
||||
| 含义 | 设置 double 类型浮点数压缩精度 |
|
||||
| 取值范围 | 0.1 ~ 0.0000000000000001 |
|
||||
| 缺省值 | 0.0000000000000001 |
|
||||
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
|
||||
|
||||
|
||||
### ifAdtFse
|
||||
TSZ 压缩中可选择的算法 FSE,默认为 HUFFMAN:
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 服务器端 |
|
||||
| 含义 | 使用 FSE 算法替换 HUFFMAN 算法, FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法 |
|
||||
| 取值范围 | 0:关闭 1:打开 |
|
||||
| 缺省值 | 0:关闭 |
|
||||
|
||||
|
||||
## 注意事项
|
||||
- 打开 TSZ 后生成的存储数据格式,回退至 3.2.0.0 之前的版本,数据将不能被识别
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.1.0
|
||||
|
||||
<Release type="tdengine" version="3.2.1.0" />
|
||||
|
||||
## 3.2.0.0
|
||||
|
||||
<Release type="tdengine" version="3.2.0.0" />
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.7</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -118,6 +118,13 @@ int32_t taosChar2Ts(const char* format, SArray** formats, const char* tsStr, int
|
|||
void TEST_ts2char(const char* format, int64_t ts, int32_t precision, char* out, int32_t outLen);
|
||||
int32_t TEST_char2ts(const char* format, int64_t* ts, int32_t precision, const char* tsStr);
|
||||
|
||||
/// @brief get offset seconds from zero timezone to input timezone
|
||||
/// for +XX timezone, the offset to zero is negative value
|
||||
/// @param tzStr timezonestr, eg: +0800, -0830, -08
|
||||
/// @param offset seconds, eg: +08 offset -28800, -01 offset 3600
|
||||
/// @return 0 success, other fail
|
||||
int32_t offsetOfTimezone(char* tzStr, int64_t* offset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -388,6 +388,8 @@ typedef struct SStateStore {
|
|||
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
int32_t (*streamStateSessionAllocWinBuffByNextPosition)(SStreamState* pState, SStreamStateCur* pCur,
|
||||
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
|
||||
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
|
||||
|
|
|
@ -57,6 +57,8 @@ int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
|
|||
int32_t streamStateSessionClear(SStreamState* pState);
|
||||
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
int32_t streamStateSessionAllocWinBuffByNextPosition(SStreamState* pState, SStreamStateCur* pCur,
|
||||
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
|
||||
|
@ -66,6 +68,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, cons
|
|||
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
|
||||
// fill
|
||||
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
|
||||
|
|
|
@ -140,8 +140,6 @@ typedef enum EStreamTaskEvent {
|
|||
TASK_EVENT_RESUME = 0x9,
|
||||
TASK_EVENT_HALT = 0xA,
|
||||
TASK_EVENT_DROPPING = 0xB,
|
||||
TASK_EVENT_SCAN_TSDB = 0xC,
|
||||
TASK_EVENT_SCAN_WAL = 0xD,
|
||||
} EStreamTaskEvent;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -78,6 +78,8 @@ int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos)
|
|||
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void* key, size_t keyLen);
|
||||
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||
int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStreamStateCur* pCur,
|
||||
const SSessionKey* pWinKey, void** ppVal, int32_t* pVLen);
|
||||
|
||||
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
|
||||
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
|
||||
|
|
|
@ -640,6 +640,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SCH_IGNORE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2503)
|
||||
#define TSDB_CODE_SCH_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x2504)
|
||||
#define TSDB_CODE_SCH_JOB_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x2505)
|
||||
#define TSDB_CODE_SCH_JOB_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2506)
|
||||
|
||||
//parser
|
||||
#define TSDB_CODE_PAR_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x2600)
|
||||
|
|
|
@ -1140,3 +1140,5 @@ elif [ "$verType" == "client" ]; then
|
|||
else
|
||||
echo "please input correct verType"
|
||||
fi
|
||||
|
||||
|
||||
|
|
|
@ -281,6 +281,11 @@ fi
|
|||
chmod a+x ${install_dir}/install.sh
|
||||
|
||||
if [[ $dbName == "taos" ]]; then
|
||||
cp ${top_dir}/../enterprise/packaging/start-all.sh ${install_dir}
|
||||
cp ${top_dir}/../enterprise/packaging/stop-all.sh ${install_dir}
|
||||
cp ${top_dir}/../enterprise/packaging/README.md ${install_dir}
|
||||
chmod a+x ${install_dir}/start-all.sh
|
||||
chmod a+x ${install_dir}/stop-all.sh
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
|
@ -360,12 +365,6 @@ if [ "$verMode" == "cluster" ]; then
|
|||
git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
|
||||
cp ${top_dir}/../enterprise/packaging/start-all.sh ${install_dir}
|
||||
cp ${top_dir}/../enterprise/packaging/stop-all.sh ${install_dir}
|
||||
cp ${top_dir}/../enterprise/packaging/README.md ${install_dir}
|
||||
chmod a+x ${install_dir}/start-all.sh
|
||||
chmod a+x ${install_dir}/stop-all.sh
|
||||
|
||||
# copy taosx
|
||||
if [ -d ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ]; then
|
||||
cp -r ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ${install_dir}
|
||||
|
|
|
@ -497,12 +497,6 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
|
|||
S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, 0, &putObjectHandler, &data);
|
||||
} while (S3_status_is_retryable(data.status) && should_retry());
|
||||
|
||||
if (data.infileFD) {
|
||||
taosCloseFile(&data.infileFD);
|
||||
} else if (data.gb) {
|
||||
growbuffer_destroy(data.gb);
|
||||
}
|
||||
|
||||
if (data.status != S3StatusOK) {
|
||||
s3PrintError(__func__, data.status, data.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
|
@ -520,8 +514,13 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
|
|||
|
||||
// div round up
|
||||
int seq;
|
||||
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 8;
|
||||
int totalSeq = ((contentLength + chunk_size - 1) / chunk_size);
|
||||
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 7;
|
||||
int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
const int max_part_num = 1000;
|
||||
if (totalSeq > max_part_num) {
|
||||
chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num;
|
||||
totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
}
|
||||
|
||||
MultipartPartData partData;
|
||||
memset(&partData, 0, sizeof(MultipartPartData));
|
||||
|
@ -626,6 +625,12 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
|
|||
taosMemoryFree(manager.etags);
|
||||
}
|
||||
|
||||
if (data.infileFD) {
|
||||
taosCloseFile(&data.infileFD);
|
||||
} else if (data.gb) {
|
||||
growbuffer_destroy(data.gb);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -719,6 +724,8 @@ static SArray *getListByPrefix(const char *prefix) {
|
|||
} else {
|
||||
s3PrintError(__func__, data.status, data.err_msg);
|
||||
}
|
||||
|
||||
taosArrayDestroyEx(data.objectArray, s3FreeObjectKey);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -278,7 +278,7 @@ char tsS3Hostname[TSDB_FQDN_LEN] = "<hostname>";
|
|||
int32_t tsS3BlockSize = -1; // number of tsdb pages (4096)
|
||||
int32_t tsS3BlockCacheSize = 16; // number of blocks
|
||||
int32_t tsS3PageCacheSize = 4096; // number of pages
|
||||
int32_t tsS3UploadDelaySec = 60 * 60;
|
||||
int32_t tsS3UploadDelaySec = 60 * 60 * 24;
|
||||
|
||||
#ifndef _STORAGE
|
||||
int32_t taosSetTfsCfg(SConfig *pCfg) {
|
||||
|
@ -507,8 +507,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
|
||||
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
||||
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
||||
if (tsNumOfTaskQueueThreads >= 10) {
|
||||
tsNumOfTaskQueueThreads = 10;
|
||||
if (tsNumOfTaskQueueThreads >= 50) {
|
||||
tsNumOfTaskQueueThreads = 50;
|
||||
}
|
||||
if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 4, 1024, CFG_SCOPE_CLIENT, CFG_DYN_NONE) != 0)
|
||||
return -1;
|
||||
|
@ -1478,6 +1478,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
bool matched = false;
|
||||
|
||||
int32_t len = strlen(name);
|
||||
char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0};
|
||||
strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len));
|
||||
|
@ -1486,6 +1488,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
if (strcasecmp("debugFlag", name) == 0) {
|
||||
int32_t flag = pItem->i32;
|
||||
taosSetAllDebugFlag(flag, true);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1494,6 +1497,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
bool enableCore = pItem->bval;
|
||||
taosSetCoreDump(enableCore);
|
||||
uInfo("%s set to %d", name, enableCore);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1512,6 +1516,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
||||
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
||||
matched = true;
|
||||
} else if (strcasecmp("firstEp", name) == 0) {
|
||||
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
|
||||
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
|
||||
|
@ -1526,6 +1531,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
||||
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1536,10 +1542,12 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
taosSetSystemLocale(locale, charset);
|
||||
osSetSystemLocale(locale, charset);
|
||||
uInfo("locale set to '%s', charset set to '%s'", locale, charset);
|
||||
matched = true;
|
||||
} else if (strcasecmp("logDir", name) == 0) {
|
||||
uInfo("%s set from '%s' to '%s'", name, tsLogDir, pItem->str);
|
||||
tstrncpy(tsLogDir, pItem->str, PATH_MAX);
|
||||
taosExpandDir(tsLogDir, tsLogDir, PATH_MAX);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1547,15 +1555,19 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
if (strcasecmp("metaCacheMaxSize", name) == 0) {
|
||||
atomic_store_32(&tsMetaCacheMaxSize, pItem->i32);
|
||||
uInfo("%s set to %d", name, atomic_load_32(&tsMetaCacheMaxSize));
|
||||
matched = true;
|
||||
} else if (strcasecmp("minimalTmpDirGB", name) == 0) {
|
||||
tsTempSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
|
||||
uInfo("%s set to %" PRId64, name, tsTempSpace.reserved);
|
||||
matched = true;
|
||||
} else if (strcasecmp("minimalDataDirGB", name) == 0) {
|
||||
tsDataSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
|
||||
uInfo("%s set to %" PRId64, name, tsDataSpace.reserved);
|
||||
matched = true;
|
||||
} else if (strcasecmp("minimalLogDirGB", name) == 0) {
|
||||
tsLogSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
|
||||
uInfo("%s set to %" PRId64, name, tsLogSpace.reserved);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1566,18 +1578,23 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", secondEp.fqdn, secondEp.port);
|
||||
cfgSetItem(pCfg, "secondEp", tsSecond, pItem->stype);
|
||||
uInfo("%s set to %s", name, tsSecond);
|
||||
matched = true;
|
||||
} else if (strcasecmp("smlChildTableName", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, tsSmlChildTableName, pItem->str);
|
||||
tstrncpy(tsSmlChildTableName, pItem->str, TSDB_TABLE_NAME_LEN);
|
||||
matched = true;
|
||||
} else if (strcasecmp("smlAutoChildTableNameDelimiter", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, tsSmlAutoChildTableNameDelimiter, pItem->str);
|
||||
tstrncpy(tsSmlAutoChildTableNameDelimiter, pItem->str, TSDB_TABLE_NAME_LEN);
|
||||
matched = true;
|
||||
} else if (strcasecmp("smlTagName", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, tsSmlTagName, pItem->str);
|
||||
tstrncpy(tsSmlTagName, pItem->str, TSDB_COL_NAME_LEN);
|
||||
matched = true;
|
||||
} else if (strcasecmp("smlTsDefaultName", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, tsSmlTsDefaultName, pItem->str);
|
||||
tstrncpy(tsSmlTsDefaultName, pItem->str, TSDB_COL_NAME_LEN);
|
||||
matched = true;
|
||||
} else if (strcasecmp("serverPort", name) == 0) {
|
||||
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
|
||||
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
|
||||
|
@ -1592,11 +1609,13 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port);
|
||||
cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype);
|
||||
uInfo("localEp set to '%s', tsFirst set to '%s'", tsLocalEp, tsFirst);
|
||||
matched = true;
|
||||
} else if (strcasecmp("slowLogScope", name) == 0) {
|
||||
if (taosSetSlowLogScope(pItem->str)) {
|
||||
return -1;
|
||||
}
|
||||
uInfo("%s set to %s", name, pItem->str);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1605,6 +1624,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
osSetTimezone(pItem->str);
|
||||
uInfo("%s set from %s to %s", name, tsTimezoneStr, pItem->str);
|
||||
cfgSetItem(pCfg, "timezone", tsTimezoneStr, pItem->stype);
|
||||
matched = true;
|
||||
} else if (strcasecmp("tempDir", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, tsTempDir, pItem->str);
|
||||
tstrncpy(tsTempDir, pItem->str, PATH_MAX);
|
||||
|
@ -1613,9 +1633,11 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
uError("failed to create tempDir:%s since %s", tsTempDir, terrstr());
|
||||
return -1;
|
||||
}
|
||||
matched = true;
|
||||
} else if (strcasecmp("telemetryServer", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, pItem->str, tsTelemServer);
|
||||
tstrncpy(tsTelemServer, pItem->str, TSDB_FQDN_LEN);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1624,6 +1646,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (matched) goto _out;
|
||||
|
||||
{ // 'bool/int32_t/int64_t/float/double' variables with general modification function
|
||||
static OptionNameAndVar debugOptions[] = {
|
||||
{"cDebugFlag", &cDebugFlag}, {"dDebugFlag", &dDebugFlag}, {"fsDebugFlag", &fsDebugFlag},
|
||||
|
@ -1664,6 +1688,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
}
|
||||
}
|
||||
|
||||
_out:
|
||||
return terrno == TSDB_CODE_SUCCESS ? 0 : -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -194,6 +194,14 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t offsetOfTimezone(char* tzStr, int64_t* offset) {
|
||||
if (tzStr && (tzStr[0] == 'z' || tzStr[0] == 'Z')) {
|
||||
*offset = 0;
|
||||
return 0;
|
||||
}
|
||||
return parseTimezone(tzStr, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* rfc3339 format:
|
||||
* 2013-04-12T15:52:01+08:00
|
||||
|
|
|
@ -299,10 +299,11 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
|
||||
|
||||
if (vnodeCreate(path, &vnodeCfg, diskPrimary, pMgmt->pTfs) < 0) {
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
return code;
|
||||
}
|
||||
|
||||
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb, true);
|
||||
|
|
|
@ -8,6 +8,12 @@ IF (TD_ENTERPRISE)
|
|||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDnode.c)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/mndView.c)
|
||||
|
||||
IF(${BUILD_WITH_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
ELSEIF(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
ENDIF()
|
||||
ENDIF ()
|
||||
|
||||
add_library(mnode STATIC ${MNODE_SRC})
|
||||
|
|
|
@ -882,7 +882,6 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea
|
|||
if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) {
|
||||
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
mndReleaseIdx(pMnode, idx.pIdx);
|
||||
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -1179,8 +1178,16 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
|
|||
SName name = {0};
|
||||
tNameFromString(&name, createReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, createReq.sql, createReq.sqlLen);
|
||||
if(createReq.sql == NULL && createReq.sqlLen == 0){
|
||||
char detail[1000] = {0};
|
||||
|
||||
sprintf(detail, "dbname:%s, stable name:%s", name.dbname, name.tname);
|
||||
|
||||
auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, detail, strlen(detail));
|
||||
}
|
||||
else{
|
||||
auditRecord(pReq, pMnode->clusterId, "createStb", name.dbname, name.tname, createReq.sql, createReq.sqlLen);
|
||||
}
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mError("stb:%s, failed to create since %s", createReq.name, terrstr());
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "mndTrans.h"
|
||||
#include "mndUser.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "osMemory.h"
|
||||
#include "parser.h"
|
||||
#include "tmisce.h"
|
||||
#include "tname.h"
|
||||
|
@ -678,7 +679,7 @@ _OVER:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
||||
static int32_t mndPersistTaskDropReq(SMnode* pMnode, STrans *pTrans, SStreamTask *pTask) {
|
||||
SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq));
|
||||
if (pReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -690,7 +691,12 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
|||
pReq->streamId = pTask->id.streamId;
|
||||
|
||||
STransAction action = {0};
|
||||
initTransAction(&action, pReq, sizeof(SVDropStreamTaskReq), TDMT_STREAM_TASK_DROP, &pTask->info.epSet, 0);
|
||||
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId);
|
||||
SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
|
||||
// The epset of nodeId of this task may have been expired now, let's use the newest epset from mnode.
|
||||
initTransAction(&action, pReq, sizeof(SVDropStreamTaskReq), TDMT_STREAM_TASK_DROP, &epset, 0);
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
|
@ -706,7 +712,7 @@ int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream)
|
|||
int32_t sz = taosArrayGetSize(pTasks);
|
||||
for (int32_t j = 0; j < sz; j++) {
|
||||
SStreamTask *pTask = taosArrayGetP(pTasks, j);
|
||||
if (mndPersistTaskDropReq(pTrans, pTask) < 0) {
|
||||
if (mndPersistTaskDropReq(pMnode, pTrans, pTask) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -751,6 +757,15 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
char* sql = NULL;
|
||||
int32_t sqlLen = 0;
|
||||
if(createStreamReq.sql != NULL){
|
||||
sqlLen = strlen(createStreamReq.sql);
|
||||
sql = taosMemoryMalloc(sqlLen + 1);
|
||||
memset(sql, 0, sqlLen + 1);
|
||||
memcpy(sql, createStreamReq.sql, sqlLen);
|
||||
}
|
||||
|
||||
// build stream obj from request
|
||||
if (mndBuildStreamObjFromCreateReq(pMnode, &streamObj, &createStreamReq) < 0) {
|
||||
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
|
||||
|
@ -856,14 +871,21 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
|
||||
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
|
||||
SName dbname = {0};
|
||||
tNameFromString(&dbname, createStreamReq.sourceDB, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
SName name = {0};
|
||||
tNameFromString(&name, createStreamReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
// reuse this function for stream
|
||||
|
||||
// TODO
|
||||
if (createStreamReq.sql != NULL) {
|
||||
auditRecord(pReq, pMnode->clusterId, "createStream", name.dbname, name.tname, createStreamReq.sql,
|
||||
strlen(createStreamReq.sql));
|
||||
if (sql != NULL && sqlLen > 0) {
|
||||
auditRecord(pReq, pMnode->clusterId, "createStream", dbname.dbname, name.dbname, sql,
|
||||
sqlLen);
|
||||
}
|
||||
else{
|
||||
char detail[1000] = {0};
|
||||
sprintf(detail, "dbname:%s, stream name:%s", dbname.dbname, name.dbname);
|
||||
auditRecord(pReq, pMnode->clusterId, "createStream", dbname.dbname, name.dbname, detail, strlen(detail));
|
||||
}
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
|
@ -874,6 +896,9 @@ _OVER:
|
|||
|
||||
tFreeSCMCreateStreamReq(&createStreamReq);
|
||||
tFreeStreamObj(&streamObj);
|
||||
if(sql != NULL){
|
||||
taosMemoryFreeClear(sql);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1085,9 +1110,10 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream
|
|||
|
||||
STransAction action = {0};
|
||||
SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
|
||||
initTransAction(&action, buf, tlen, TDMT_VND_STREAM_CHECK_POINT_SOURCE, &epset,
|
||||
TSDB_CODE_SYN_PROPOSE_NOT_READY);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(buf);
|
||||
|
@ -1150,38 +1176,33 @@ static int32_t initStreamNodeList(SMnode* pMnode) {
|
|||
return taosArrayGetSize(execInfo.pNodeList);
|
||||
}
|
||||
|
||||
static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
||||
SMnode * pMnode = pReq->info.node;
|
||||
SSdb * pSdb = pMnode->pSdb;
|
||||
void * pIter = NULL;
|
||||
SStreamObj *pStream = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
{ // check if the node update happens or not
|
||||
int64_t ts = taosGetTimestampSec();
|
||||
|
||||
static bool taskNodeIsUpdated(SMnode* pMnode) {
|
||||
// check if the node update happens or not
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
int32_t numOfNodes = initStreamNodeList(pMnode);
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
|
||||
if (numOfNodes == 0) {
|
||||
mDebug("stream task node change checking done, no vgroups exist, do nothing");
|
||||
execInfo.ts = ts;
|
||||
return 0;
|
||||
execInfo.ts = taosGetTimestampSec();
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfNodes; ++i) {
|
||||
SNodeEntry *pNodeEntry = taosArrayGet(execInfo.pNodeList, i);
|
||||
if (pNodeEntry->stageUpdated) {
|
||||
mDebug("stream task not ready due to node update detected, checkpoint not issued");
|
||||
return 0;
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool allReady = true;
|
||||
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode, &allReady);
|
||||
if (!allReady) {
|
||||
mWarn("not all vnodes are ready, ignore the checkpoint") taosArrayDestroy(pNodeSnapshot);
|
||||
mWarn("not all vnodes ready");
|
||||
taosArrayDestroy(pNodeSnapshot);
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1192,15 +1213,38 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
|||
taosArrayDestroy(pNodeSnapshot);
|
||||
|
||||
if (nodeUpdated) {
|
||||
mDebug("stream task not ready due to node update, checkpoint not issued");
|
||||
return 0;
|
||||
mDebug("stream task not ready due to node update");
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return nodeUpdated;
|
||||
}
|
||||
|
||||
static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
||||
SMnode * pMnode = pReq->info.node;
|
||||
SSdb * pSdb = pMnode->pSdb;
|
||||
void * pIter = NULL;
|
||||
SStreamObj *pStream = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
// check if the node update happens or not
|
||||
bool updated = taskNodeIsUpdated(pMnode);
|
||||
if (updated) {
|
||||
mWarn("checkpoint ignore, stream task nodes update detected");
|
||||
return -1;
|
||||
}
|
||||
|
||||
{ // check if all tasks are in TASK_STATUS__READY status
|
||||
bool ready = true;
|
||||
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
|
||||
// no streams exists, abort
|
||||
int32_t numOfTasks = taosArrayGetSize(execInfo.pTaskList);
|
||||
if (numOfTasks <= 0) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) {
|
||||
STaskId * p = taosArrayGet(execInfo.pTaskList, i);
|
||||
STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p));
|
||||
|
@ -1283,12 +1327,13 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
|
||||
if (pStream == NULL) {
|
||||
if (dropReq.igNotExists) {
|
||||
mInfo("stream:%s, not exist, ignore not exist is set", dropReq.name);
|
||||
mInfo("stream:%s not exist, ignore not exist is set", dropReq.name);
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
return 0;
|
||||
} else {
|
||||
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
|
||||
mError("stream:%s not exist failed to drop", dropReq.name);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1660,7 +1705,7 @@ static void mndCancelGetNextStreamTask(SMnode *pMnode, void *pIter) {
|
|||
sdbCancelFetch(pSdb, pIter);
|
||||
}
|
||||
|
||||
static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) {
|
||||
static int32_t mndPauseStreamTask(SMnode* pMnode, STrans *pTrans, SStreamTask *pTask) {
|
||||
SVPauseStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVPauseStreamTaskReq));
|
||||
if (pReq == NULL) {
|
||||
mError("failed to malloc in pause stream, size:%" PRIzu ", code:%s", sizeof(SVPauseStreamTaskReq),
|
||||
|
@ -1673,8 +1718,12 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) {
|
|||
pReq->taskId = pTask->id.taskId;
|
||||
pReq->streamId = pTask->id.streamId;
|
||||
|
||||
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId);
|
||||
SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
|
||||
STransAction action = {0};
|
||||
initTransAction(&action, pReq, sizeof(SVPauseStreamTaskReq), TDMT_STREAM_TASK_PAUSE, &pTask->info.epSet, 0);
|
||||
initTransAction(&action, pReq, sizeof(SVPauseStreamTaskReq), TDMT_STREAM_TASK_PAUSE, &epset, 0);
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
|
@ -1682,7 +1731,7 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndPauseAllStreamTasks(STrans *pTrans, SStreamObj *pStream) {
|
||||
int32_t mndPauseAllStreamTasks(SMnode* pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||
SArray *tasks = pStream->tasks;
|
||||
|
||||
int32_t size = taosArrayGetSize(tasks);
|
||||
|
@ -1691,7 +1740,7 @@ int32_t mndPauseAllStreamTasks(STrans *pTrans, SStreamObj *pStream) {
|
|||
int32_t sz = taosArrayGetSize(pTasks);
|
||||
for (int32_t j = 0; j < sz; j++) {
|
||||
SStreamTask *pTask = taosArrayGetP(pTasks, j);
|
||||
if (mndPauseStreamTask(pTrans, pTask) < 0) {
|
||||
if (mndPauseStreamTask(pMnode, pTrans, pTask) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1751,6 +1800,12 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
bool updated = taskNodeIsUpdated(pMnode);
|
||||
if (updated) {
|
||||
mError("tasks are not ready for pause, node update detected");
|
||||
return -1;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "pause-stream");
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to pause stream since %s", pauseReq.name, terrstr());
|
||||
|
@ -1768,7 +1823,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
// pause all tasks
|
||||
if (mndPauseAllStreamTasks(pTrans, pStream) < 0) {
|
||||
if (mndPauseAllStreamTasks(pMnode, pTrans, pStream) < 0) {
|
||||
mError("stream:%s, failed to pause task since %s", pauseReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
|
@ -1795,7 +1850,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t igUntreated) {
|
||||
static int32_t mndResumeStreamTask(STrans *pTrans, SMnode* pMnode, SStreamTask *pTask, int8_t igUntreated) {
|
||||
SVResumeStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResumeStreamTaskReq));
|
||||
if (pReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -1806,8 +1861,12 @@ static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t ig
|
|||
pReq->streamId = pTask->id.streamId;
|
||||
pReq->igUntreated = igUntreated;
|
||||
|
||||
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId);
|
||||
SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
|
||||
STransAction action = {0};
|
||||
initTransAction(&action, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &pTask->info.epSet, 0);
|
||||
initTransAction(&action, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0);
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
|
@ -1815,14 +1874,14 @@ static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t ig
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndResumeAllStreamTasks(STrans *pTrans, SStreamObj *pStream, int8_t igUntreated) {
|
||||
int32_t mndResumeAllStreamTasks(STrans *pTrans, SMnode* pMnode, SStreamObj *pStream, int8_t igUntreated) {
|
||||
int32_t size = taosArrayGetSize(pStream->tasks);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SArray *pTasks = taosArrayGetP(pStream->tasks, i);
|
||||
int32_t sz = taosArrayGetSize(pTasks);
|
||||
for (int32_t j = 0; j < sz; j++) {
|
||||
SStreamTask *pTask = taosArrayGetP(pTasks, j);
|
||||
if (mndResumeStreamTask(pTrans, pTask, igUntreated) < 0) {
|
||||
if (mndResumeStreamTask(pTrans, pMnode, pTask, igUntreated) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1831,7 +1890,6 @@ int32_t mndResumeAllStreamTasks(STrans *pTrans, SStreamObj *pStream, int8_t igUn
|
|||
}
|
||||
}
|
||||
}
|
||||
// pStream->pHTasksList is null
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1884,7 +1942,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
// resume all tasks
|
||||
if (mndResumeAllStreamTasks(pTrans, pStream, pauseReq.igUntreated) < 0) {
|
||||
if (mndResumeAllStreamTasks(pTrans, pMnode, pStream, pauseReq.igUntreated) < 0) {
|
||||
mError("stream:%s, failed to drop task since %s", pauseReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
|
@ -2534,8 +2592,12 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
|
|||
pReq->taskId = pTask->id.taskId;
|
||||
pReq->streamId = pTask->id.streamId;
|
||||
|
||||
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId);
|
||||
SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
|
||||
STransAction action = {0};
|
||||
initTransAction(&action, pReq, sizeof(SVResetStreamTaskReq), TDMT_VND_STREAM_TASK_RESET, &pTask->info.epSet, 0);
|
||||
initTransAction(&action, pReq, sizeof(SVResetStreamTaskReq), TDMT_VND_STREAM_TASK_RESET, &epset, 0);
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
taosWUnLockLatch(&pStream->lock);
|
||||
|
|
|
@ -1229,6 +1229,21 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
|
|||
}
|
||||
} else {
|
||||
mInfo("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
|
||||
|
||||
for (int32_t action = 0; action < numOfActions; ++action) {
|
||||
STransAction *pAction = taosArrayGet(pArray, action);
|
||||
mDebug("trans:%d, %s:%d Sent:%d, Received:%d, errCode:0x%x, acceptableCode:0x%x, retryCode:0x%x",
|
||||
pTrans->id, mndTransStr(pAction->stage), pAction->id, pAction->msgSent, pAction->msgReceived,
|
||||
pAction->errCode, pAction->acceptableCode, pAction->retryCode);
|
||||
if (pAction->msgSent) {
|
||||
if (pAction->msgReceived) {
|
||||
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
|
||||
mndTransResetAction(pMnode, pTrans, pAction);
|
||||
mInfo("trans:%d, %s:%d reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndVgroup.h"
|
||||
#include "audit.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
|
@ -26,7 +27,6 @@
|
|||
#include "mndTrans.h"
|
||||
#include "mndUser.h"
|
||||
#include "tmisce.h"
|
||||
#include "audit.h"
|
||||
|
||||
#define VGROUP_VER_NUMBER 1
|
||||
#define VGROUP_RESERVE_SIZE 64
|
||||
|
@ -296,8 +296,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
|
||||
if (pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER) {
|
||||
pReplica = &createReq.replicas[createReq.replica];
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
pReplica = &createReq.learnerReplicas[createReq.learnerReplica];
|
||||
}
|
||||
|
||||
|
@ -316,8 +315,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
if (pDnode->id == pVgid->dnodeId) {
|
||||
createReq.selfIndex = createReq.replica;
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
if (pDnode->id == pVgid->dnodeId) {
|
||||
createReq.learnerSelfIndex = createReq.learnerReplica;
|
||||
}
|
||||
|
@ -325,8 +323,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
|
||||
if (pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER) {
|
||||
createReq.replica++;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
createReq.learnerReplica++;
|
||||
}
|
||||
}
|
||||
|
@ -338,10 +335,11 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
|
||||
createReq.changeVersion = pVgroup->syncConfChangeVer;
|
||||
|
||||
mInfo("vgId:%d, build create vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d "
|
||||
mInfo(
|
||||
"vgId:%d, build create vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d "
|
||||
"changeVersion:%d",
|
||||
createReq.vgId, createReq.replica, createReq.selfIndex, createReq.learnerReplica,
|
||||
createReq.learnerReplica, createReq.strict, createReq.changeVersion);
|
||||
createReq.vgId, createReq.replica, createReq.selfIndex, createReq.learnerReplica, createReq.learnerReplica,
|
||||
createReq.strict, createReq.changeVersion);
|
||||
for (int32_t i = 0; i < createReq.replica; ++i) {
|
||||
mInfo("vgId:%d, replica:%d ep:%s:%u", createReq.vgId, i, createReq.replicas[i].fqdn, createReq.replicas[i].port);
|
||||
}
|
||||
|
@ -429,8 +427,7 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
if (pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER) {
|
||||
pReplica = &alterReq.replicas[alterReq.replica];
|
||||
alterReq.replica++;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
pReplica = &alterReq.learnerReplicas[alterReq.learnerReplica];
|
||||
alterReq.learnerReplica++;
|
||||
}
|
||||
|
@ -448,24 +445,24 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
if (dnodeId == pVgid->dnodeId) {
|
||||
alterReq.selfIndex = v;
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
if (dnodeId == pVgid->dnodeId) {
|
||||
alterReq.learnerSelfIndex = v;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d "
|
||||
mInfo(
|
||||
"vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d "
|
||||
"changeVersion:%d",
|
||||
alterReq.vgId, alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
|
||||
alterReq.learnerSelfIndex, alterReq.strict, alterReq.changeVersion);
|
||||
alterReq.vgId, alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica, alterReq.learnerSelfIndex,
|
||||
alterReq.strict, alterReq.changeVersion);
|
||||
for (int32_t i = 0; i < alterReq.replica; ++i) {
|
||||
mInfo("vgId:%d, replica:%d ep:%s:%u", alterReq.vgId, i, alterReq.replicas[i].fqdn, alterReq.replicas[i].port);
|
||||
}
|
||||
for (int32_t i = 0; i < alterReq.learnerReplica; ++i) {
|
||||
mInfo("vgId:%d, learnerReplica:%d ep:%s:%u", alterReq.vgId, i,
|
||||
alterReq.learnerReplicas[i].fqdn, alterReq.learnerReplicas[i].port);
|
||||
mInfo("vgId:%d, learnerReplica:%d ep:%s:%u", alterReq.vgId, i, alterReq.learnerReplicas[i].fqdn,
|
||||
alterReq.learnerReplicas[i].port);
|
||||
}
|
||||
|
||||
if (alterReq.selfIndex == -1 && alterReq.learnerSelfIndex == -1) {
|
||||
|
@ -507,8 +504,7 @@ static void *mndBuildCheckLearnCatchupReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
if (pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER) {
|
||||
pReplica = &req.replicas[req.replica];
|
||||
req.replica++;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
pReplica = &req.learnerReplicas[req.learnerReplica];
|
||||
req.learnerReplica++;
|
||||
}
|
||||
|
@ -526,8 +522,7 @@ static void *mndBuildCheckLearnCatchupReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
if (dnodeId == pVgid->dnodeId) {
|
||||
req.selfIndex = v;
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
if (dnodeId == pVgid->dnodeId) {
|
||||
req.learnerSelfIndex = v;
|
||||
}
|
||||
|
@ -535,14 +530,12 @@ static void *mndBuildCheckLearnCatchupReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
}
|
||||
|
||||
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d",
|
||||
req.vgId, req.replica, req.selfIndex, req.learnerReplica,
|
||||
req.learnerSelfIndex, req.strict);
|
||||
req.vgId, req.replica, req.selfIndex, req.learnerReplica, req.learnerSelfIndex, req.strict);
|
||||
for (int32_t i = 0; i < req.replica; ++i) {
|
||||
mInfo("vgId:%d, replica:%d ep:%s:%u", req.vgId, i, req.replicas[i].fqdn, req.replicas[i].port);
|
||||
}
|
||||
for (int32_t i = 0; i < req.learnerReplica; ++i) {
|
||||
mInfo("vgId:%d, learnerReplica:%d ep:%s:%u", req.vgId, i,
|
||||
req.learnerReplicas[i].fqdn, req.learnerReplicas[i].port);
|
||||
mInfo("vgId:%d, learnerReplica:%d ep:%s:%u", req.vgId, i, req.learnerReplicas[i].fqdn, req.learnerReplicas[i].port);
|
||||
}
|
||||
|
||||
if (req.selfIndex == -1 && req.learnerSelfIndex == -1) {
|
||||
|
@ -1350,7 +1343,8 @@ int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVg
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndRestoreAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SDnodeObj *pDnode) {
|
||||
int32_t mndRestoreAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
|
||||
SDnodeObj *pDnode) {
|
||||
STransAction action = {0};
|
||||
|
||||
action.epSet = mndGetDnodeEpset(pDnode);
|
||||
|
@ -1401,8 +1395,8 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndAddChangeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
|
||||
SVgObj *pOldVgroup, SVgObj *pNewVgroup, int32_t dnodeId) {
|
||||
int32_t mndAddChangeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pOldVgroup, SVgObj *pNewVgroup,
|
||||
int32_t dnodeId) {
|
||||
STransAction action = {0};
|
||||
action.epSet = mndGetVgroupEpset(pMnode, pNewVgroup);
|
||||
|
||||
|
@ -2206,8 +2200,7 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
static void *mndBuildSForceBecomeFollowerReq(SMnode *pMnode, SVgObj *pVgroup, int32_t dnodeId,
|
||||
int32_t *pContLen) {
|
||||
static void *mndBuildSForceBecomeFollowerReq(SMnode *pMnode, SVgObj *pVgroup, int32_t dnodeId, int32_t *pContLen) {
|
||||
SForceBecomeFollowerReq balanceReq = {
|
||||
.vgId = pVgroup->vgId,
|
||||
};
|
||||
|
@ -2271,8 +2264,7 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
|
|||
|
||||
int32_t dnodeId = pVgroup->vnodeGid[0].dnodeId;
|
||||
|
||||
for(int i = 0; i < replica; i++)
|
||||
{
|
||||
for (int i = 0; i < replica; i++) {
|
||||
if (pVgroup->vnodeGid[i].syncState == TAOS_SYNC_STATE_LEADER) {
|
||||
dnodeId = pVgroup->vnodeGid[i].dnodeId;
|
||||
break;
|
||||
|
@ -2289,8 +2281,7 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
|
|||
mndReleaseDnode(pMnode, pDnode);
|
||||
}
|
||||
|
||||
if(exist && online)
|
||||
{
|
||||
if (exist && online) {
|
||||
mInfo("trans:%d, vgid:%d leader to dnode:%d", pTrans->id, vgid, dnodeId);
|
||||
|
||||
if (mndAddBalanceVgroupLeaderAction(pMnode, pTrans, pVgroup, dnodeId) != 0) {
|
||||
|
@ -2322,11 +2313,9 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
|
|||
return -1;
|
||||
}
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
}
|
||||
else
|
||||
{
|
||||
mInfo("trans:%d, vgid:%d cant be balanced to dnode:%d, exist:%d, online:%d",
|
||||
pTrans->id, vgid, dnodeId, exist, online);
|
||||
} else {
|
||||
mInfo("trans:%d, vgid:%d cant be balanced to dnode:%d, exist:%d, online:%d", pTrans->id, vgid, dnodeId, exist,
|
||||
online);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2334,14 +2323,10 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
|
|||
|
||||
extern int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq);
|
||||
|
||||
int32_t mndProcessVgroupBalanceLeaderMsg(SRpcMsg *pReq) {
|
||||
return mndProcessVgroupBalanceLeaderMsgImp(pReq);
|
||||
}
|
||||
int32_t mndProcessVgroupBalanceLeaderMsg(SRpcMsg *pReq) { return mndProcessVgroupBalanceLeaderMsgImp(pReq); }
|
||||
|
||||
#ifndef TD_ENTERPRISE
|
||||
int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq) {
|
||||
return 0;
|
||||
}
|
||||
int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq) { return 0; }
|
||||
#endif
|
||||
|
||||
static int32_t mndCheckDnodeMemory(SMnode *pMnode, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pOldVgroup,
|
||||
|
@ -2407,8 +2392,7 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
|
|||
|
||||
// follower stage
|
||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
||||
return -1;
|
||||
if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0) return -1;
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||
return -1;
|
||||
|
||||
|
@ -2478,8 +2462,8 @@ int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pO
|
|||
|
||||
mndTransSetSerial(pTrans);
|
||||
|
||||
mInfo("trans:%d, vgId:%d, alter vgroup, syncConfChangeVer:%d, version:%d, replica:%d",
|
||||
pTrans->id, pVgroup->vgId, pVgroup->syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
mInfo("trans:%d, vgId:%d, alter vgroup, syncConfChangeVer:%d, version:%d, replica:%d", pTrans->id, pVgroup->vgId,
|
||||
pVgroup->syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
|
||||
if (newVgroup.replica == 1 && pNewDb->cfg.replications == 3) {
|
||||
mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
|
||||
|
@ -2494,23 +2478,25 @@ int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pO
|
|||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0) return -1;
|
||||
mInfo("trans:%d, vgId:%d, add change config, syncConfChangeVer:%d, version:%d, replica:%d",
|
||||
pTrans->id, pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||
return -1;
|
||||
mInfo("trans:%d, vgId:%d, add change config, syncConfChangeVer:%d, version:%d, replica:%d", pTrans->id,
|
||||
pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1;
|
||||
mInfo("trans:%d, vgId:%d, create vnode, syncConfChangeVer:%d, version:%d, replica:%d",
|
||||
pTrans->id, pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
mInfo("trans:%d, vgId:%d, create vnode, syncConfChangeVer:%d, version:%d, replica:%d", pTrans->id, pVgroup->vgId,
|
||||
newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[2]) != 0) return -1;
|
||||
mInfo("trans:%d, vgId:%d, create vnode, syncConfChangeVer:%d, version:%d, replica:%d",
|
||||
pTrans->id, pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
|
||||
mInfo("trans:%d, vgId:%d, create vnode, syncConfChangeVer:%d, version:%d, replica:%d", pTrans->id, pVgroup->vgId,
|
||||
newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
|
||||
|
||||
// check learner
|
||||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
if (mndAddCheckLearnerCatchupAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0) return -1;
|
||||
if (mndAddCheckLearnerCatchupAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[2].dnodeId) != 0) return -1;
|
||||
if (mndAddCheckLearnerCatchupAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
||||
return -1;
|
||||
if (mndAddCheckLearnerCatchupAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[2].dnodeId) != 0)
|
||||
return -1;
|
||||
|
||||
// change raft type
|
||||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
|
@ -2543,7 +2529,8 @@ int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pO
|
|||
SVnodeGid del1 = {0};
|
||||
if (mndRemoveVnodeFromVgroupWithoutSave(pMnode, pTrans, &newVgroup, pArray, &del1) != 0) return -1;
|
||||
|
||||
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0) return -1;
|
||||
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||
return -1;
|
||||
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
|
||||
|
||||
|
@ -2560,7 +2547,8 @@ int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pO
|
|||
SVnodeGid del2 = {0};
|
||||
if (mndRemoveVnodeFromVgroupWithoutSave(pMnode, pTrans, &newVgroup, pArray, &del2) != 0) return -1;
|
||||
|
||||
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0) return -1;
|
||||
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||
return -1;
|
||||
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
|
||||
|
||||
|
@ -2597,8 +2585,7 @@ int32_t mndBuildRestoreAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
SVgObj newVgroup = {0};
|
||||
memcpy(&newVgroup, pVgroup, sizeof(SVgObj));
|
||||
|
||||
mInfo("db:%s, vgId:%d, restore vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
|
||||
pVgroup->vnodeGid[0].dnodeId);
|
||||
mInfo("db:%s, vgId:%d, restore vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId, pVgroup->vnodeGid[0].dnodeId);
|
||||
|
||||
if (newVgroup.replica == 1) {
|
||||
int selected = 0;
|
||||
|
@ -2609,13 +2596,11 @@ int32_t mndBuildRestoreAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
}
|
||||
}
|
||||
if (mndAddCreateVnodeAction(pMnode, pTrans, db, &newVgroup, &newVgroup.vnodeGid[selected]) != 0) return -1;
|
||||
}
|
||||
else if(newVgroup.replica == 3){
|
||||
} else if (newVgroup.replica == 3) {
|
||||
for (int i = 0; i < newVgroup.replica; i++) {
|
||||
if (newVgroup.vnodeGid[i].dnodeId == pDnode->id) {
|
||||
newVgroup.vnodeGid[i].nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
newVgroup.vnodeGid[i].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
}
|
||||
}
|
||||
|
@ -2626,8 +2611,7 @@ int32_t mndBuildRestoreAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
if (newVgroup.vnodeGid[i].dnodeId == pDnode->id) {
|
||||
}
|
||||
}
|
||||
if (mndRestoreAddAlterVnodeTypeAction(pMnode, pTrans, db, &newVgroup, pDnode) != 0)
|
||||
return -1;
|
||||
if (mndRestoreAddAlterVnodeTypeAction(pMnode, pTrans, db, &newVgroup, pDnode) != 0) return -1;
|
||||
}
|
||||
|
||||
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
|
||||
|
@ -2697,6 +2681,15 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
#if defined(USE_S3)
|
||||
extern int8_t tsS3Enabled;
|
||||
if (tsS3Enabled) {
|
||||
code = TSDB_CODE_OPS_NOT_SUPPORT;
|
||||
mError("vgId:%d, db:%s, s3 exists, split vgroup not allowed", pVgroup->vgId, pVgroup->dbName);
|
||||
goto _OVER;
|
||||
}
|
||||
#endif
|
||||
|
||||
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "split-vgroup");
|
||||
if (pTrans == NULL) goto _OVER;
|
||||
mndTransSetSerial(pTrans);
|
||||
|
|
|
@ -71,6 +71,7 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->streamStateSessionGetKVByCur = streamStateSessionGetKVByCur;
|
||||
pStore->streamStateStateAddIfNotExist = streamStateStateAddIfNotExist;
|
||||
pStore->streamStateSessionGetKeyByRange = streamStateSessionGetKeyByRange;
|
||||
pStore->streamStateSessionAllocWinBuffByNextPosition = streamStateSessionAllocWinBuffByNextPosition;
|
||||
|
||||
pStore->updateInfoInit = updateInfoInit;
|
||||
pStore->updateInfoFillBlockData = updateInfoFillBlockData;
|
||||
|
|
|
@ -831,7 +831,7 @@ struct SDiskDataBuilder {
|
|||
SBlkInfo bi;
|
||||
};
|
||||
|
||||
typedef struct SLDataIter {
|
||||
struct SLDataIter {
|
||||
SRBTreeNode node;
|
||||
SSttBlk *pSttBlk;
|
||||
int64_t cid; // for debug purpose
|
||||
|
@ -845,7 +845,7 @@ typedef struct SLDataIter {
|
|||
SSttBlockLoadInfo *pBlockLoadInfo;
|
||||
bool ignoreEarlierTs;
|
||||
struct SSttFileReader *pReader;
|
||||
} SLDataIter;
|
||||
};
|
||||
|
||||
#define tMergeTreeGetRow(_t) (&((_t)->pIter->rInfo.row))
|
||||
|
||||
|
|
|
@ -1105,10 +1105,6 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
|||
}
|
||||
}
|
||||
|
||||
static void ddxx() {
|
||||
|
||||
}
|
||||
|
||||
// this function should be executed by only one thread, so we set an sentinel to protect this function
|
||||
int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)pMsg->pCont;
|
||||
|
@ -1155,7 +1151,9 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqDebug("s-task:%s continue exec scan-history(step1), original step1 startTs:%" PRId64 ", already elapsed:%.2fs",
|
||||
id, pTask->execInfo.step1Start, pTask->execInfo.step1El);
|
||||
} else {
|
||||
tqDebug("s-task:%s already in step2, no need to scan-history data, step2 starTs:%"PRId64, id, pTask->execInfo.step2Start);
|
||||
tqDebug("s-task:%s already in step2, no need to scan-history data, step2 startTs:%" PRId64, id,
|
||||
pTask->execInfo.step2Start);
|
||||
|
||||
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
|
|
|
@ -155,7 +155,7 @@ int32_t tqRestartStreamTasks(STQ* pTq) {
|
|||
streamMetaInitBackend(pMeta);
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
|
||||
tqInfo("vgId:%d close&reload state elapsed time:%.3fms", vgId, el/1000.);
|
||||
tqInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.);
|
||||
|
||||
code = streamMetaLoadAllTasks(pTq->pStreamMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -168,12 +168,15 @@ int32_t tqRestartStreamTasks(STQ* pTq) {
|
|||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||
tqResetStreamTaskStatus(pTq);
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqStartStreamTasks(pTq);
|
||||
} else {
|
||||
streamMetaResetStartInfo(&pMeta->startInfo);
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -212,9 +212,7 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) {
|
|||
taosMemoryFree(pWriter);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
|
||||
taosMemoryFree(pWriter);
|
||||
return code;
|
||||
|
||||
|
|
|
@ -409,7 +409,6 @@ static int32_t tsdbCommitFileSetBegin(SCommitter2 *committer) {
|
|||
extern int32_t tsS3UploadDelaySec;
|
||||
long s3Size(const char *object_name);
|
||||
int32_t nlevel = tfsGetLevel(committer->tsdb->pVnode->pTfs);
|
||||
committer->ctx->skipTsRow = false;
|
||||
if (tsS3Enabled && nlevel > 1 && committer->ctx->fset) {
|
||||
STFileObj *fobj = committer->ctx->fset->farr[TSDB_FTYPE_DATA];
|
||||
if (fobj && fobj->f->did.level == nlevel - 1) {
|
||||
|
|
|
@ -879,14 +879,43 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) {
|
|||
continue;
|
||||
}
|
||||
|
||||
bool skipMerge = false;
|
||||
int32_t numFile = TARRAY2_SIZE(lvl->fobjArr);
|
||||
if (numFile >= sttTrigger) {
|
||||
// launch merge
|
||||
{
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int32_t tsS3UploadDelaySec;
|
||||
long s3Size(const char *object_name);
|
||||
int32_t nlevel = tfsGetLevel(fs->tsdb->pVnode->pTfs);
|
||||
if (tsS3Enabled && nlevel > 1) {
|
||||
STFileObj *fobj = fset->farr[TSDB_FTYPE_DATA];
|
||||
if (fobj && fobj->f->did.level == nlevel - 1) {
|
||||
// if exists on s3 or local mtime < committer->ctx->now - tsS3UploadDelay
|
||||
const char *object_name = taosDirEntryBaseName((char *)fobj->fname);
|
||||
|
||||
if (taosCheckExistFile(fobj->fname)) {
|
||||
int32_t now = taosGetTimestampSec();
|
||||
int32_t mtime = 0;
|
||||
taosStatFile(fobj->fname, NULL, &mtime, NULL);
|
||||
if (mtime < now - tsS3UploadDelaySec) {
|
||||
skipMerge = true;
|
||||
}
|
||||
} else if (s3Size(object_name) > 0) {
|
||||
skipMerge = true;
|
||||
}
|
||||
}
|
||||
// new fset can be written with ts data
|
||||
}
|
||||
}
|
||||
|
||||
if (!skipMerge) {
|
||||
code = tsdbSchedMerge(fs->tsdb, fset->fid);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
}
|
||||
|
||||
if (numFile >= sttTrigger * BLOCK_COMMIT_FACTOR) {
|
||||
if (numFile >= sttTrigger * BLOCK_COMMIT_FACTOR && !skipMerge) {
|
||||
tsdbFSSetBlockCommit(fset, true);
|
||||
} else {
|
||||
tsdbFSSetBlockCommit(fset, false);
|
||||
|
|
|
@ -50,6 +50,7 @@ void remove_file(const char *fname, bool last_level) {
|
|||
long s3_size = tsS3Enabled ? s3Size(object_name) : 0;
|
||||
if (!strncmp(fname + strlen(fname) - 5, ".data", 5) && s3_size > 0) {
|
||||
s3DeleteObjects(&object_name, 1);
|
||||
tsdbInfo("file:%s is removed from s3", fname);
|
||||
} else {
|
||||
tsdbError("file:%s remove failed", fname);
|
||||
}
|
||||
|
|
|
@ -548,6 +548,36 @@ static int32_t tsdbMerge(void *arg) {
|
|||
|
||||
if (merger->fset == NULL) return 0;
|
||||
|
||||
bool skipMerge = false;
|
||||
{
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int32_t tsS3UploadDelaySec;
|
||||
long s3Size(const char *object_name);
|
||||
int32_t nlevel = tfsGetLevel(merger->tsdb->pVnode->pTfs);
|
||||
if (tsS3Enabled && nlevel > 1) {
|
||||
STFileObj *fobj = merger->fset->farr[TSDB_FTYPE_DATA];
|
||||
if (fobj && fobj->f->did.level == nlevel - 1) {
|
||||
// if exists on s3 or local mtime < committer->ctx->now - tsS3UploadDelay
|
||||
const char *object_name = taosDirEntryBaseName((char *)fobj->fname);
|
||||
|
||||
if (taosCheckExistFile(fobj->fname)) {
|
||||
int32_t now = taosGetTimestampSec();
|
||||
int32_t mtime = 0;
|
||||
|
||||
taosStatFile(fobj->fname, NULL, &mtime, NULL);
|
||||
if (mtime < now - tsS3UploadDelaySec) {
|
||||
skipMerge = true;
|
||||
}
|
||||
} else if (s3Size(object_name) > 0) {
|
||||
skipMerge = true;
|
||||
}
|
||||
}
|
||||
// new fset can be written with ts data
|
||||
}
|
||||
}
|
||||
|
||||
if (skipMerge) return 0;
|
||||
|
||||
// do merge
|
||||
tsdbDebug("vgId:%d merge begin, fid:%d", TD_VID(tsdb->pVnode), merger->fid);
|
||||
code = tsdbDoMerge(merger);
|
||||
|
|
|
@ -524,7 +524,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockN
|
|||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
// clear info for the new file
|
||||
cleanupInfoFoxNextFileset(pReader->status.pTableMap);
|
||||
cleanupInfoForNextFileset(pReader->status.pTableMap);
|
||||
|
||||
int32_t k = 0;
|
||||
int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
|
||||
|
@ -1444,7 +1444,9 @@ static bool nextRowFromSttBlocks(SLastBlockReader* pLastBlockReader, STableBlock
|
|||
}
|
||||
}
|
||||
|
||||
static void doPinSttBlock(SLastBlockReader* pLastBlockReader) { tMergeTreePinSttBlock(&pLastBlockReader->mergeTree); }
|
||||
static void doPinSttBlock(SLastBlockReader* pLastBlockReader) {
|
||||
tMergeTreePinSttBlock(&pLastBlockReader->mergeTree);
|
||||
}
|
||||
|
||||
static void doUnpinSttBlock(SLastBlockReader* pLastBlockReader) {
|
||||
tMergeTreeUnpinSttBlock(&pLastBlockReader->mergeTree);
|
||||
|
@ -1454,7 +1456,6 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLas
|
|||
STableBlockScanInfo* pScanInfo, int64_t ts, STsdbReader* pReader,
|
||||
bool* copied) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
*copied = false;
|
||||
|
||||
// avoid the fetch next row replace the referenced stt block in buffer
|
||||
|
@ -1540,12 +1541,6 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t minKey = 0;
|
||||
|
@ -1688,7 +1683,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
|
|||
tsdbTrace("fRow ptr:%p, %d, uid:%" PRIu64 ", ts:%" PRId64 " %s", pRow->pBlockData, pRow->iRow, pLastBlockReader->uid,
|
||||
fRow.pBlockData->aTSKEY[fRow.iRow], pReader->idStr);
|
||||
|
||||
// only last block exists
|
||||
// only stt block exists
|
||||
if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) {
|
||||
code = tryCopyDistinctRowFromSttBlock(&fRow, pLastBlockReader, pBlockScanInfo, tsLastBlock, pReader, &copied);
|
||||
if (code) {
|
||||
|
@ -1767,12 +1762,6 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasDataInFileBlock(pBlockData, pDumpInfo)) {
|
||||
|
@ -1874,12 +1863,6 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t minKey = 0;
|
||||
|
@ -2202,12 +2185,6 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
|
|||
if (ps == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
code = tsdbRowMergerInit(pMerger, ps);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("failed to init row merger, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
if (copied) {
|
||||
|
|
|
@ -245,7 +245,7 @@ static void doCleanupInfoForNextFileset(STableBlockScanInfo* pScanInfo) {
|
|||
pScanInfo->sttKeyInfo.status = STT_FILE_READER_UNINIT;
|
||||
}
|
||||
|
||||
void cleanupInfoFoxNextFileset(SSHashObj* pTableMap) {
|
||||
void cleanupInfoForNextFileset(SSHashObj* pTableMap) {
|
||||
STableBlockScanInfo** p = NULL;
|
||||
|
||||
int32_t iter = 0;
|
||||
|
|
|
@ -248,7 +248,7 @@ SSHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf
|
|||
void clearBlockScanInfo(STableBlockScanInfo* p);
|
||||
void destroyAllBlockScanInfo(SSHashObj* pTableMap);
|
||||
void resetAllDataBlockScanInfo(SSHashObj* pTableMap, int64_t ts, int32_t step);
|
||||
void cleanupInfoFoxNextFileset(SSHashObj* pTableMap);
|
||||
void cleanupInfoForNextFileset(SSHashObj* pTableMap);
|
||||
int32_t ensureBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables);
|
||||
void clearBlockScanInfoBuf(SBlockInfoBuf* pBuf);
|
||||
void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index);
|
||||
|
|
|
@ -103,7 +103,7 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile
|
|||
|
||||
char fname[TSDB_FILENAME_LEN];
|
||||
TdFilePtr fdFrom = NULL;
|
||||
TdFilePtr fdTo = NULL;
|
||||
// TdFilePtr fdTo = NULL;
|
||||
|
||||
tsdbTFileName(rtner->tsdb, to, fname);
|
||||
|
||||
|
@ -359,7 +359,11 @@ static int32_t tsdbDoRetentionOnFileSet(SRTNer *rtner, STFileSet *fset) {
|
|||
s3EvictCache(fobj->fname, fsize * 2);
|
||||
}
|
||||
*/
|
||||
tsdbInfo("file:%s size: %" PRId64 " do migrate", fobj->fname, fobj->f->size);
|
||||
if (fobj->f->did.level > did.level) {
|
||||
continue;
|
||||
}
|
||||
tsdbInfo("file:%s size: %" PRId64 " do migrate from %d to %d", fobj->fname, fobj->f->size, fobj->f->did.level,
|
||||
did.level);
|
||||
|
||||
code = tsdbDoMigrateFileObj(rtner, fobj, &did);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
|
|
@ -180,6 +180,7 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->streamStateSessionGetKVByCur = streamStateSessionGetKVByCur;
|
||||
pStore->streamStateStateAddIfNotExist = streamStateStateAddIfNotExist;
|
||||
pStore->streamStateSessionGetKeyByRange = streamStateSessionGetKeyByRange;
|
||||
pStore->streamStateSessionAllocWinBuffByNextPosition = streamStateSessionAllocWinBuffByNextPosition;
|
||||
|
||||
pStore->updateInfoInit = updateInfoInit;
|
||||
pStore->updateInfoFillBlockData = updateInfoFillBlockData;
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include "cos.h"
|
||||
#include "vnode.h"
|
||||
#include "vnodeInt.h"
|
||||
#include "audit.h"
|
||||
#include "tstrbuild.h"
|
||||
|
||||
static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
|
@ -1005,7 +1007,9 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
|
|||
size_t len = 0;
|
||||
char* keyJoined = taosStringBuilderGetResult(&sb, &len);
|
||||
|
||||
if(pOriginRpc->info.conn.user != NULL && strlen(pOriginRpc->info.conn.user) > 0){
|
||||
auditRecord(pOriginRpc, clusterId, "createTable", name.dbname, "", keyJoined, len);
|
||||
}
|
||||
|
||||
taosStringBuilderDestroy(&sb);
|
||||
}
|
||||
|
@ -1227,7 +1231,9 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
|||
size_t len = 0;
|
||||
char *keyJoined = taosStringBuilderGetResult(&sb, &len);
|
||||
|
||||
if(pOriginRpc->info.conn.user != NULL && strlen(pOriginRpc->info.conn.user) > 0){
|
||||
auditRecord(pOriginRpc, clusterId, "dropTable", name.dbname, "", keyJoined, len);
|
||||
}
|
||||
|
||||
taosStringBuilderDestroy(&sb);
|
||||
}
|
||||
|
|
|
@ -610,6 +610,31 @@ typedef struct SStreamStateAggOperatorInfo {
|
|||
bool recvGetAll;
|
||||
} SStreamStateAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamEventAggOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SStreamAggSupporter streamAggSup;
|
||||
SExprSupp scalarSupp; // supporter for perform scalar function
|
||||
SGroupResInfo groupResInfo;
|
||||
int32_t primaryTsIndex; // primary timestamp slot id
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SSDataBlock* pDelRes;
|
||||
SSHashObj* pSeDeleted;
|
||||
void* pDelIterator;
|
||||
SArray* pChildren; // cache for children's result;
|
||||
bool ignoreExpiredData;
|
||||
bool ignoreExpiredDataSaved;
|
||||
SArray* pUpdated;
|
||||
SSHashObj* pSeUpdated;
|
||||
SSHashObj* pAllUpdated;
|
||||
int64_t dataVersion;
|
||||
bool isHistoryOp;
|
||||
SArray* historyWins;
|
||||
bool reCkBlock;
|
||||
SSDataBlock* pCheckpointRes;
|
||||
SFilterInfo* pStartCondInfo;
|
||||
SFilterInfo* pEndCondInfo;
|
||||
} SStreamEventAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamPartitionOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SPartitionBySupporter partitionSup;
|
||||
|
@ -768,9 +793,51 @@ void doClearBufferedBlocks(SStreamScanInfo* pInfo);
|
|||
uint64_t calcGroupId(char* pData, int32_t len);
|
||||
void streamOpReleaseState(struct SOperatorInfo* pOperator);
|
||||
void streamOpReloadState(struct SOperatorInfo* pOperator);
|
||||
void destroyStreamAggSupporter(SStreamAggSupporter* pSup);
|
||||
void clearGroupResInfo(SGroupResInfo* pGroupResInfo);
|
||||
int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResultBlock, SFunctionStateStore* pStore);
|
||||
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, int32_t numOfOutput, int64_t gap,
|
||||
SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore,
|
||||
SReadHandle* pHandle, STimeWindowAggSupp* pTwAggSup, const char* taskIdStr,
|
||||
SStorageAPI* pApi);
|
||||
void initDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uint16_t type, int32_t tsColIndex,
|
||||
STimeWindowAggSupp* pTwSup);
|
||||
void getMaxTsWins(const SArray* pAllWins, SArray* pMaxWins);
|
||||
void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList);
|
||||
void getSessionHashKey(const SSessionKey* pKey, SSessionKey* pHashKey);
|
||||
void deleteSessionWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate, SSHashObj* pMapDelete);
|
||||
int32_t getAllSessionWindow(SSHashObj* pHashMap, SSHashObj* pStUpdated);
|
||||
int32_t closeSessionWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SSHashObj* pClosed);
|
||||
int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar);
|
||||
int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2);
|
||||
void removeSessionDeleteResults(SSHashObj* pHashMap, SArray* pWins);
|
||||
int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult,
|
||||
int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput,
|
||||
struct SOperatorInfo* pOperator, int64_t winDelta);
|
||||
int32_t setSessionWinOutputInfo(SSHashObj* pStUpdated, SResultWindowInfo* pWinInfo);
|
||||
int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo);
|
||||
int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated);
|
||||
void saveDeleteRes(SSHashObj* pStDelete, SSessionKey key);
|
||||
void removeSessionResult(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SSHashObj* pResMap, SSessionKey* pKey);
|
||||
void doBuildDeleteDataBlock(struct SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite);
|
||||
void doBuildSessionResult(struct SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo, SSDataBlock* pBlock);
|
||||
void getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SResultWindowInfo* pWinInfo);
|
||||
void getNextSessionWinInfo(SStreamAggSupporter* pAggSup, SSHashObj* pStUpdated, SResultWindowInfo* pCurWin,
|
||||
SResultWindowInfo* pNextWin);
|
||||
void compactTimeWindow(SExprSupp* pSup, SStreamAggSupporter* pAggSup, STimeWindowAggSupp* pTwAggSup,
|
||||
SExecTaskInfo* pTaskInfo, SResultWindowInfo* pCurWin, SResultWindowInfo* pNextWin,
|
||||
SSHashObj* pStUpdated, SSHashObj* pStDeleted, bool addGap);
|
||||
int32_t releaseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI);
|
||||
void resetWinRange(STimeWindow* winRange);
|
||||
|
||||
int32_t encodeSSessionKey(void** buf, SSessionKey* key);
|
||||
void* decodeSSessionKey(void* buf, SSessionKey* key);
|
||||
int32_t encodeSResultWindowInfo(void** buf, SResultWindowInfo* key, int32_t outLen);
|
||||
void* decodeSResultWindowInfo(void* buf, SResultWindowInfo* key, int32_t outLen);
|
||||
int32_t encodeSTimeWindowAggSupp(void** buf, STimeWindowAggSupp* pTwAggSup);
|
||||
void* decodeSTimeWindowAggSupp(void* buf, STimeWindowAggSupp* pTwAggSup);
|
||||
|
||||
void destroyOperatorParamValue(void* pValues);
|
||||
int32_t mergeOperatorParams(SOperatorParam* pDst, SOperatorParam* pSrc);
|
||||
int32_t buildTableScanOperatorParam(SOperatorParam** ppRes, SArray* pUidList, int32_t srcOpType, bool tableSeq);
|
||||
|
|
|
@ -152,6 +152,8 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
|
||||
SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle);
|
||||
|
||||
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo);
|
||||
|
|
|
@ -519,6 +519,8 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR
|
|||
pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
|
||||
pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT == type) {
|
||||
pOptr = createStreamEventAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
|
||||
pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN == type) {
|
||||
|
|
|
@ -1127,7 +1127,8 @@ static bool isSessionWindow(SStreamScanInfo* pInfo) {
|
|||
}
|
||||
|
||||
static bool isStateWindow(SStreamScanInfo* pInfo) {
|
||||
return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
|
||||
return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE ||
|
||||
pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT;
|
||||
}
|
||||
|
||||
static bool isIntervalWindow(SStreamScanInfo* pInfo) {
|
||||
|
@ -2243,6 +2244,7 @@ FETCH_NEXT_BLOCK:
|
|||
blockDataCleanup(pSup->pScanBlock);
|
||||
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
|
||||
pInfo->pUpdateRes->info.type = STREAM_DELETE_DATA;
|
||||
printSpecDataBlock(pInfo->pUpdateRes, getStreamOpName(pOperator->operatorType), "rebuild", GET_TASKID(pTaskInfo));
|
||||
return pInfo->pUpdateRes;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,751 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "executorInt.h"
|
||||
#include "filter.h"
|
||||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "operator.h"
|
||||
#include "querytask.h"
|
||||
#include "tchecksum.h"
|
||||
#include "tcommon.h"
|
||||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tfill.h"
|
||||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "ttime.h"
|
||||
|
||||
#define IS_FINAL_EVENT_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_EVENT)
|
||||
#define STREAM_EVENT_OP_STATE_NAME "StreamEventHistoryState"
|
||||
#define STREAM_EVENT_OP_CHECKPOINT_NAME "StreamEventOperator_Checkpoint"
|
||||
|
||||
typedef struct SEventWinfowFlag {
|
||||
bool startFlag;
|
||||
bool endFlag;
|
||||
} SEventWinfowFlag;
|
||||
|
||||
typedef struct SEventWindowInfo {
|
||||
SResultWindowInfo winInfo;
|
||||
SEventWinfowFlag* pWinFlag;
|
||||
} SEventWindowInfo;
|
||||
|
||||
void destroyStreamEventOperatorInfo(void* param) {
|
||||
SStreamEventAggOperatorInfo* pInfo = (SStreamEventAggOperatorInfo*)param;
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||
clearGroupResInfo(&pInfo->groupResInfo);
|
||||
cleanupExprSupp(&pInfo->scalarSupp);
|
||||
if (pInfo->pChildren != NULL) {
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
|
||||
destroyOperator(pChild);
|
||||
}
|
||||
taosArrayDestroy(pInfo->pChildren);
|
||||
}
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
blockDataDestroy(pInfo->pDelRes);
|
||||
tSimpleHashCleanup(pInfo->pSeUpdated);
|
||||
tSimpleHashCleanup(pInfo->pAllUpdated);
|
||||
tSimpleHashCleanup(pInfo->pSeDeleted);
|
||||
pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
blockDataDestroy(pInfo->pCheckpointRes);
|
||||
|
||||
if (pInfo->pStartCondInfo != NULL) {
|
||||
filterFreeInfo(pInfo->pStartCondInfo);
|
||||
pInfo->pStartCondInfo = NULL;
|
||||
}
|
||||
|
||||
if (pInfo->pEndCondInfo != NULL) {
|
||||
filterFreeInfo(pInfo->pEndCondInfo);
|
||||
pInfo->pEndCondInfo = NULL;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
void setEventWindowFlag(SStreamAggSupporter* pAggSup, SEventWindowInfo* pWinInfo) {
|
||||
char* pFlagInfo = (char*)pWinInfo->winInfo.pStatePos->pRowBuff + (pAggSup->resultRowSize - pAggSup->stateKeySize);
|
||||
pWinInfo->pWinFlag = (SEventWinfowFlag*) pFlagInfo;
|
||||
}
|
||||
|
||||
void setEventWindowInfo(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SRowBuffPos* pPos, SEventWindowInfo* pWinInfo) {
|
||||
pWinInfo->winInfo.sessionWin = *pKey;
|
||||
pWinInfo->winInfo.pStatePos = pPos;
|
||||
setEventWindowFlag(pAggSup, pWinInfo);
|
||||
}
|
||||
|
||||
int32_t getEndCondIndex(bool* pEnd, int32_t start, int32_t rows) {
|
||||
for (int32_t i = start; i < rows; i++) {
|
||||
if (pEnd[i]) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t groupId, bool* pStart, bool* pEnd, int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t size = pAggSup->resultRowSize;
|
||||
TSKEY ts = pTs [index];
|
||||
bool start = pStart[index];
|
||||
bool end = pEnd[index];
|
||||
pCurWin->winInfo.sessionWin.groupId = groupId;
|
||||
pCurWin->winInfo.sessionWin.win.skey = ts;
|
||||
pCurWin->winInfo.sessionWin.win.ekey = ts;
|
||||
SStreamStateCur* pCur = pAggSup->stateStore.streamStateSessionSeekKeyCurrentPrev(pAggSup->pState, &pCurWin->winInfo.sessionWin);
|
||||
SSessionKey leftWinKey = {.groupId = groupId};
|
||||
void* pVal = NULL;
|
||||
int32_t len = 0;
|
||||
code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &leftWinKey, &pVal, &len);
|
||||
if (code == TSDB_CODE_SUCCESS && inWinRange(&pAggSup->winRange, &leftWinKey.win) ) {
|
||||
bool inWin = isInTimeWindow(&leftWinKey.win, ts, 0);
|
||||
setEventWindowInfo(pAggSup, &leftWinKey, pVal, pCurWin);
|
||||
if(inWin || (pCurWin->pWinFlag->startFlag && !pCurWin->pWinFlag->endFlag) ) {
|
||||
pCurWin->winInfo.isOutput = true;
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
pAggSup->stateStore.streamStateFreeCur(pCur);
|
||||
pCur = pAggSup->stateStore.streamStateSessionSeekKeyNext(pAggSup->pState, &pCurWin->winInfo.sessionWin);
|
||||
SSessionKey rightWinKey = {.groupId = groupId};
|
||||
code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &rightWinKey, &pVal, &len);
|
||||
bool inWin = isInTimeWindow(&rightWinKey.win, ts, 0);
|
||||
if (code == TSDB_CODE_SUCCESS && inWinRange(&pAggSup->winRange, &rightWinKey.win) && (inWin || (start && !end))) {
|
||||
int32_t endi = getEndCondIndex(pEnd, index, rows);
|
||||
if (endi < 0 || pTs[endi] >= rightWinKey.win.skey) {
|
||||
setEventWindowInfo(pAggSup, &rightWinKey, pVal, pCurWin);
|
||||
pCurWin->winInfo.isOutput = true;
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
|
||||
SSessionKey winKey = {.win.skey = ts, .win.ekey = ts, .groupId = groupId};
|
||||
pAggSup->stateStore.streamStateSessionAllocWinBuffByNextPosition(pAggSup->pState, pCur, &winKey, &pVal, &len);
|
||||
setEventWindowInfo(pAggSup, &winKey, pVal, pCurWin);
|
||||
pCurWin->pWinFlag->startFlag = start;
|
||||
pCurWin->pWinFlag->endFlag = end;
|
||||
pCurWin->winInfo.isOutput = false;
|
||||
|
||||
_end:
|
||||
pAggSup->stateStore.streamStateCurNext(pAggSup->pState, pCur);
|
||||
pNextWinKey->groupId = groupId;
|
||||
code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, pNextWinKey, NULL, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SET_SESSION_WIN_KEY_INVALID(pNextWinKey);
|
||||
}
|
||||
if(pCurWin->winInfo.pStatePos->needFree) {
|
||||
pAggSup->stateStore.streamStateSessionDel(pAggSup->pState, &pCurWin->winInfo.sessionWin);
|
||||
}
|
||||
pAggSup->stateStore.streamStateFreeCur(pCur);
|
||||
qDebug("===stream===set event next win buff. skey:%" PRId64 ", endkey:%" PRId64, pCurWin->winInfo.sessionWin.win.skey,
|
||||
pCurWin->winInfo.sessionWin.win.ekey);
|
||||
}
|
||||
|
||||
int32_t updateEventWindowInfo(SStreamAggSupporter* pAggSup, SEventWindowInfo* pWinInfo, SSessionKey* pNextWinKey,
|
||||
TSKEY* pTsData, bool* starts, bool* ends, int32_t rows, int32_t start, SSHashObj* pResultRows,
|
||||
SSHashObj* pStUpdated, SSHashObj* pStDeleted, bool* pRebuild) {
|
||||
*pRebuild = false;
|
||||
if (!pWinInfo->pWinFlag->startFlag && !(starts[start]) ) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
TSKEY maxTs = INT64_MAX;
|
||||
STimeWindow* pWin = &pWinInfo->winInfo.sessionWin.win;
|
||||
if (pWinInfo->pWinFlag->endFlag) {
|
||||
maxTs = pWin->ekey + 1;
|
||||
}
|
||||
if (!IS_INVALID_SESSION_WIN_KEY(*pNextWinKey)) {
|
||||
maxTs = TMIN(maxTs, pNextWinKey->win.skey);
|
||||
}
|
||||
|
||||
for (int32_t i = start; i < rows; ++i) {
|
||||
if (pTsData[i] >= maxTs) {
|
||||
return i - 1 - start;
|
||||
}
|
||||
|
||||
if (pWin->skey > pTsData[i]) {
|
||||
if (pStDeleted && pWinInfo->winInfo.isOutput) {
|
||||
saveDeleteRes(pStDeleted, pWinInfo->winInfo.sessionWin);
|
||||
}
|
||||
removeSessionResult(pAggSup, pStUpdated, pResultRows, &pWinInfo->winInfo.sessionWin);
|
||||
pWin->skey = pTsData[i];
|
||||
pWinInfo->pWinFlag->startFlag = starts[i];
|
||||
} else if (pWin->skey == pTsData[i]) {
|
||||
pWinInfo->pWinFlag->startFlag |= starts[i];
|
||||
}
|
||||
|
||||
if (pWin->ekey < pTsData[i]) {
|
||||
pWin->ekey = pTsData[i];
|
||||
pWinInfo->pWinFlag->endFlag = ends[i];
|
||||
} else if (pWin->ekey == pTsData[i]) {
|
||||
pWinInfo->pWinFlag->endFlag |= ends[i];
|
||||
}
|
||||
|
||||
memcpy(pWinInfo->winInfo.pStatePos->pKey, &pWinInfo->winInfo.sessionWin, sizeof(SSessionKey));
|
||||
|
||||
if (ends[i]) {
|
||||
if (pWinInfo->pWinFlag->endFlag && pWin->skey <= pTsData[i] && pTsData[i] < pWin->ekey ) {
|
||||
*pRebuild = true;
|
||||
}
|
||||
return i + 1 - start;
|
||||
}
|
||||
}
|
||||
return rows - start;
|
||||
}
|
||||
|
||||
static int32_t compactEventWindow(SOperatorInfo* pOperator, SEventWindowInfo* pCurWin, SSHashObj* pStUpdated,
|
||||
SSHashObj* pStDeleted, bool addGap) {
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
|
||||
int32_t winNum = 0;
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
SResultRow* pCurResult = NULL;
|
||||
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
while (1) {
|
||||
if (!pCurWin->pWinFlag->startFlag || pCurWin->pWinFlag->endFlag) {
|
||||
break;
|
||||
}
|
||||
SEventWindowInfo nextWinInfo = {0};
|
||||
getNextSessionWinInfo(pAggSup, pStUpdated, &pCurWin->winInfo, &nextWinInfo.winInfo);
|
||||
if (!IS_VALID_SESSION_WIN(nextWinInfo.winInfo) || !inWinRange(&pAggSup->winRange, &nextWinInfo.winInfo.sessionWin.win)) {
|
||||
releaseOutputBuf(pAggSup->pState, nextWinInfo.winInfo.pStatePos, &pAggSup->pSessionAPI->stateStore);
|
||||
break;
|
||||
}
|
||||
setEventWindowFlag(pAggSup, &nextWinInfo);
|
||||
compactTimeWindow(pSup, pAggSup, &pInfo->twAggSup, pTaskInfo, &pCurWin->winInfo, &nextWinInfo.winInfo, pStUpdated, pStDeleted, false);
|
||||
pCurWin->pWinFlag->endFlag = nextWinInfo.pWinFlag->endFlag;
|
||||
winNum++;
|
||||
}
|
||||
return winNum;
|
||||
}
|
||||
|
||||
static bool isWindowIncomplete(SEventWindowInfo* pWinInfo) {
|
||||
return !(pWinInfo->pWinFlag->startFlag && pWinInfo->pWinFlag->endFlag);
|
||||
}
|
||||
|
||||
bool doDeleteEventWindow(SStreamAggSupporter* pAggSup, SSHashObj* pSeUpdated, SSessionKey* pKey) {
|
||||
pAggSup->stateStore.streamStateSessionDel(pAggSup->pState, pKey);
|
||||
removeSessionResult(pAggSup, pSeUpdated, pAggSup->pResultRows, pKey);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pSeUpdated,
|
||||
SSHashObj* pStDeleted) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
||||
uint64_t groupId = pSDataBlock->info.id.groupId;
|
||||
int64_t code = TSDB_CODE_SUCCESS;
|
||||
TSKEY* tsCols = NULL;
|
||||
SResultRow* pResult = NULL;
|
||||
int32_t winRows = 0;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
SColumnInfoData* pColStart = NULL;
|
||||
SColumnInfoData* pColEnd = NULL;
|
||||
|
||||
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
|
||||
pAggSup->winRange = pTaskInfo->streamInfo.fillHistoryWindow;
|
||||
if (pAggSup->winRange.ekey <= 0) {
|
||||
pAggSup->winRange.ekey = INT64_MAX;
|
||||
}
|
||||
|
||||
if (pSDataBlock->pDataBlock != NULL) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||
tsCols = (int64_t*)pColDataInfo->pData;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
SFilterColumnParam paramStart = {.numOfCols = taosArrayGetSize(pSDataBlock->pDataBlock), .pDataBlock = pSDataBlock->pDataBlock};
|
||||
code = filterSetDataFromSlotId(pInfo->pStartCondInfo, ¶mStart);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("set data from start slotId error.");
|
||||
goto _end;
|
||||
}
|
||||
int32_t statusStart = 0;
|
||||
filterExecute(pInfo->pStartCondInfo, pSDataBlock, &pColStart, NULL, paramStart.numOfCols, &statusStart);
|
||||
|
||||
SFilterColumnParam paramEnd = {.numOfCols = taosArrayGetSize(pSDataBlock->pDataBlock), .pDataBlock = pSDataBlock->pDataBlock};
|
||||
code = filterSetDataFromSlotId(pInfo->pEndCondInfo, ¶mEnd);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("set data from end slotId error.");
|
||||
goto _end;
|
||||
}
|
||||
|
||||
int32_t statusEnd = 0;
|
||||
filterExecute(pInfo->pEndCondInfo, pSDataBlock, &pColEnd, NULL, paramEnd.numOfCols, &statusEnd);
|
||||
|
||||
int32_t rows = pSDataBlock->info.rows;
|
||||
blockDataEnsureCapacity(pAggSup->pScanBlock, rows);
|
||||
for (int32_t i = 0; i < rows; i += winRows) {
|
||||
if (pInfo->ignoreExpiredData && isOverdue(tsCols[i], &pInfo->twAggSup)) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
int32_t winIndex = 0;
|
||||
bool allEqual = true;
|
||||
SEventWindowInfo curWin = {0};
|
||||
SSessionKey nextWinKey = {0};
|
||||
setEventOutputBuf(pAggSup, tsCols, groupId, (bool*)pColStart->pData, (bool*)pColEnd->pData, i, rows, &curWin, &nextWinKey);
|
||||
setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo);
|
||||
bool rebuild = false;
|
||||
winRows = updateEventWindowInfo(pAggSup, &curWin, &nextWinKey, tsCols, (bool*)pColStart->pData, (bool*)pColEnd->pData, rows, i,
|
||||
pAggSup->pResultRows, pSeUpdated, pStDeleted, &rebuild);
|
||||
ASSERT(winRows >= 1);
|
||||
if (rebuild) {
|
||||
uint64_t uid = 0;
|
||||
appendOneRowToStreamSpecialBlock(pAggSup->pScanBlock, &curWin.winInfo.sessionWin.win.skey,
|
||||
&curWin.winInfo.sessionWin.win.ekey, &uid, &groupId, NULL);
|
||||
tSimpleHashRemove(pSeUpdated, &curWin.winInfo.sessionWin, sizeof(SSessionKey));
|
||||
doDeleteEventWindow(pAggSup, pSeUpdated, &curWin.winInfo.sessionWin);
|
||||
releaseOutputBuf(pAggSup->pState, curWin.winInfo.pStatePos, &pAPI->stateStore);
|
||||
continue;
|
||||
}
|
||||
code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &curWin.winInfo, &pResult, i, winRows, rows, numOfOutput,
|
||||
pOperator, 0);
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
compactEventWindow(pOperator, &curWin, pInfo->pSeUpdated, pInfo->pSeDeleted, false);
|
||||
saveSessionOutputBuf(pAggSup, &curWin.winInfo);
|
||||
|
||||
if (pInfo->isHistoryOp) {
|
||||
saveResult(curWin.winInfo, pInfo->pAllUpdated);
|
||||
}
|
||||
|
||||
if (isWindowIncomplete(&curWin)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
code = saveResult(curWin.winInfo, pSeUpdated);
|
||||
}
|
||||
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
SSessionKey key = {0};
|
||||
getSessionHashKey(&curWin.winInfo.sessionWin, &key);
|
||||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
colDataDestroy(pColStart);
|
||||
taosMemoryFree(pColStart);
|
||||
colDataDestroy(pColEnd);
|
||||
taosMemoryFree(pColEnd);
|
||||
}
|
||||
|
||||
int32_t doStreamEventEncodeOpState(void** buf, int32_t len, SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
if (!pInfo) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* pData = (buf == NULL) ? NULL : *buf;
|
||||
|
||||
// 1.streamAggSup.pResultRows
|
||||
int32_t tlen = 0;
|
||||
int32_t mapSize = tSimpleHashGetSize(pInfo->streamAggSup.pResultRows);
|
||||
tlen += taosEncodeFixedI32(buf, mapSize);
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pInfo->streamAggSup.pResultRows, pIte, &iter)) != NULL) {
|
||||
void* key = tSimpleHashGetKey(pIte, &keyLen);
|
||||
tlen += encodeSSessionKey(buf, key);
|
||||
tlen += encodeSResultWindowInfo(buf, pIte, pInfo->streamAggSup.resultRowSize);
|
||||
}
|
||||
|
||||
// 2.twAggSup
|
||||
tlen += encodeSTimeWindowAggSupp(buf, &pInfo->twAggSup);
|
||||
|
||||
// 3.dataVersion
|
||||
tlen += taosEncodeFixedI32(buf, pInfo->dataVersion);
|
||||
|
||||
// 4.checksum
|
||||
if (buf) {
|
||||
uint32_t cksum = taosCalcChecksum(0, pData, len - sizeof(uint32_t));
|
||||
tlen += taosEncodeFixedU32(buf, cksum);
|
||||
} else {
|
||||
tlen += sizeof(uint32_t);
|
||||
}
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void* doStreamEventDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
if (!pInfo) {
|
||||
return buf;
|
||||
}
|
||||
|
||||
// 4.checksum
|
||||
int32_t dataLen = len - sizeof(uint32_t);
|
||||
void* pCksum = POINTER_SHIFT(buf, dataLen);
|
||||
if (taosCheckChecksum(buf, dataLen, *(uint32_t*)pCksum) != TSDB_CODE_SUCCESS) {
|
||||
ASSERT(0); // debug
|
||||
qError("stream event state is invalid");
|
||||
return buf;
|
||||
}
|
||||
|
||||
// 1.streamAggSup.pResultRows
|
||||
int32_t mapSize = 0;
|
||||
buf = taosDecodeFixedI32(buf, &mapSize);
|
||||
for (int32_t i = 0; i < mapSize; i++) {
|
||||
SSessionKey key = {0};
|
||||
SResultWindowInfo winfo = {0};
|
||||
buf = decodeSSessionKey(buf, &key);
|
||||
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
// 2.twAggSup
|
||||
buf = decodeSTimeWindowAggSupp(buf, &pInfo->twAggSup);
|
||||
|
||||
// 3.dataVersion
|
||||
buf = taosDecodeFixedI64(buf, &pInfo->dataVersion);
|
||||
return buf;
|
||||
}
|
||||
|
||||
void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t len = doStreamEventEncodeOpState(NULL, 0, pOperator);
|
||||
void* buf = taosMemoryCalloc(1, len);
|
||||
void* pBuf = buf;
|
||||
len = doStreamEventEncodeOpState(&pBuf, len, pOperator);
|
||||
pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_EVENT_OP_CHECKPOINT_NAME,
|
||||
strlen(STREAM_EVENT_OP_CHECKPOINT_NAME), buf, len);
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
|
||||
static SSDataBlock* buildEventResult(SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
doBuildDeleteDataBlock(pOperator, pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
|
||||
if (pInfo->pDelRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->pDelRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildSessionResult(pOperator, pInfo->streamAggSup.pState, &pInfo->groupResInfo, pBInfo->pRes);
|
||||
if (pBInfo->pRes->info.rows > 0) {
|
||||
printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pBInfo->pRes;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
qDebug("===stream=== stream event agg");
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
SSDataBlock* resBlock = buildEventResult(pOperator);
|
||||
if (resBlock != NULL) {
|
||||
return resBlock;
|
||||
}
|
||||
|
||||
if (pInfo->reCkBlock) {
|
||||
pInfo->reCkBlock = false;
|
||||
printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pCheckpointRes;
|
||||
}
|
||||
|
||||
setOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (!pInfo->pUpdated) {
|
||||
pInfo->pUpdated = taosArrayInit(16, sizeof(SEventWindowInfo));
|
||||
}
|
||||
if (!pInfo->pSeUpdated) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "recv", GET_TASKID(pTaskInfo));
|
||||
|
||||
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
|
||||
pBlock->info.type == STREAM_CLEAR) {
|
||||
deleteSessionWinState(&pInfo->streamAggSup, pBlock, pInfo->pSeUpdated, pInfo->pSeDeleted);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_GET_ALL) {
|
||||
getAllSessionWindow(pInfo->streamAggSup.pResultRows, pInfo->pSeUpdated);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
return pBlock;
|
||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||
pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState);
|
||||
doStreamEventSaveCheckpoint(pOperator);
|
||||
pInfo->reCkBlock = true;
|
||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||
continue;
|
||||
} else {
|
||||
ASSERTS(pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type");
|
||||
}
|
||||
|
||||
if (pInfo->scalarSupp.pExprInfo != NULL) {
|
||||
SExprSupp* pExprSup = &pInfo->scalarSupp;
|
||||
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
|
||||
}
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
|
||||
doStreamEventAggImpl(pOperator, pBlock, pInfo->pSeUpdated, pInfo->pSeDeleted);
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
|
||||
}
|
||||
// restore the value
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
|
||||
closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pInfo->pSeUpdated);
|
||||
copyUpdateResult(&pInfo->pSeUpdated, pInfo->pUpdated, sessionKeyCompareAsc);
|
||||
removeSessionDeleteResults(pInfo->pSeDeleted, pInfo->pUpdated);
|
||||
|
||||
if (pInfo->isHistoryOp) {
|
||||
SArray* pHisWins = taosArrayInit(16, sizeof(SEventWindowInfo));
|
||||
copyUpdateResult(&pInfo->pAllUpdated, pHisWins, sessionKeyCompareAsc);
|
||||
getMaxTsWins(pHisWins, pInfo->historyWins);
|
||||
taosArrayDestroy(pHisWins);
|
||||
}
|
||||
|
||||
initGroupResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated);
|
||||
pInfo->pUpdated = NULL;
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
|
||||
SSDataBlock* resBlock = buildEventResult(pOperator);
|
||||
if (resBlock != NULL) {
|
||||
return resBlock;
|
||||
}
|
||||
setOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void streamEventReleaseState(SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t winSize = taosArrayGetSize(pInfo->historyWins) * sizeof(SSessionKey);
|
||||
int32_t resSize = winSize + sizeof(TSKEY);
|
||||
char* pBuff = taosMemoryCalloc(1, resSize);
|
||||
memcpy(pBuff, pInfo->historyWins->pData, winSize);
|
||||
memcpy(pBuff + winSize, &pInfo->twAggSup.maxTs, sizeof(TSKEY));
|
||||
qDebug("===stream=== event window operator relase state. save result count:%d", (int32_t)taosArrayGetSize(pInfo->historyWins));
|
||||
pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_EVENT_OP_STATE_NAME,
|
||||
strlen(STREAM_EVENT_OP_STATE_NAME), pBuff, resSize);
|
||||
pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState);
|
||||
taosMemoryFreeClear(pBuff);
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (downstream->fpSet.releaseStreamStateFn) {
|
||||
downstream->fpSet.releaseStreamStateFn(downstream);
|
||||
}
|
||||
}
|
||||
|
||||
void streamEventReloadState(SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
resetWinRange(&pAggSup->winRange);
|
||||
|
||||
SSessionKey seKey = {.win.skey = INT64_MIN, .win.ekey = INT64_MIN, .groupId = 0};
|
||||
int32_t size = 0;
|
||||
void* pBuf = NULL;
|
||||
int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_EVENT_OP_STATE_NAME,
|
||||
strlen(STREAM_EVENT_OP_STATE_NAME), &pBuf, &size);
|
||||
int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey);
|
||||
qDebug("===stream=== event window operator reload state. get result count:%d", num);
|
||||
SSessionKey* pSeKeyBuf = (SSessionKey*)pBuf;
|
||||
ASSERT(size == num * sizeof(SSessionKey) + sizeof(TSKEY));
|
||||
|
||||
TSKEY ts = *(TSKEY*)((char*)pBuf + size - sizeof(TSKEY));
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, ts);
|
||||
pAggSup->stateStore.streamStateReloadInfo(pAggSup->pState, ts);
|
||||
|
||||
if (!pInfo->pSeUpdated && num > 0) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
if (!pInfo->pSeDeleted && num > 0) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeDeleted = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
for (int32_t i = 0; i < num; i++) {
|
||||
SEventWindowInfo curInfo = {0};
|
||||
qDebug("===stream=== reload state. try process result %" PRId64 ", %" PRIu64 ", index:%d", pSeKeyBuf[i].win.skey,
|
||||
pSeKeyBuf[i].groupId, i);
|
||||
getSessionWindowInfoByKey(pAggSup, pSeKeyBuf + i, &curInfo.winInfo);
|
||||
setEventWindowFlag(pAggSup, &curInfo);
|
||||
if (!curInfo.pWinFlag->startFlag || curInfo.pWinFlag->endFlag) {
|
||||
continue;
|
||||
}
|
||||
|
||||
compactEventWindow(pOperator, &curInfo, pInfo->pSeUpdated, pInfo->pSeDeleted, false);
|
||||
qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, curInfo.winInfo.sessionWin.win.skey,
|
||||
curInfo.winInfo.sessionWin.groupId);
|
||||
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
saveResult(curInfo.winInfo, pInfo->pSeUpdated);
|
||||
} else if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
if (!isCloseWindow(&curInfo.winInfo.sessionWin.win, &pInfo->twAggSup)) {
|
||||
saveDeleteRes(pInfo->pSeDeleted, curInfo.winInfo.sessionWin);
|
||||
}
|
||||
SSessionKey key = {0};
|
||||
getSessionHashKey(&curInfo.winInfo.sessionWin, &key);
|
||||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curInfo.winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
if (IS_VALID_SESSION_WIN(curInfo.winInfo)) {
|
||||
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(pBuf);
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (downstream->fpSet.reloadStreamStateFn) {
|
||||
downstream->fpSet.reloadStreamStateFn(downstream);
|
||||
}
|
||||
}
|
||||
|
||||
SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle) {
|
||||
SStreamEventWinodwPhysiNode* pEventNode = (SStreamEventWinodwPhysiNode*)pPhyNode;
|
||||
int32_t tsSlotId = ((SColumnNode*)pEventNode->window.pTspk)->slotId;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SStreamEventAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamEventAggOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
if (pEventNode->window.pExprs != NULL) {
|
||||
int32_t numOfScalar = 0;
|
||||
SExprInfo* pScalarExprInfo = createExprInfo(pEventNode->window.pExprs, NULL, &numOfScalar);
|
||||
code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
|
||||
pInfo->twAggSup = (STimeWindowAggSupp){
|
||||
.waterMark = pEventNode->window.watermark,
|
||||
.calTrigger = pEventNode->window.triggerType,
|
||||
.maxTs = INT64_MIN,
|
||||
.minTs = INT64_MAX,
|
||||
};
|
||||
|
||||
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
|
||||
|
||||
SExprSupp* pExpSup = &pOperator->exprSupp;
|
||||
int32_t numOfCols = 0;
|
||||
SExprInfo* pExprInfo = createExprInfo(pEventNode->window.pFuncs, NULL, &numOfCols);
|
||||
SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc);
|
||||
code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState,
|
||||
sizeof(bool) + sizeof(bool), 0, &pTaskInfo->storageAPI.stateStore, pHandle,
|
||||
&pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->primaryTsIndex = tsSlotId;
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeDeleted = tSimpleHashInit(64, hashFn);
|
||||
pInfo->pDelIterator = NULL;
|
||||
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
|
||||
pInfo->pChildren = NULL;
|
||||
pInfo->ignoreExpiredData = pEventNode->window.igExpired;
|
||||
pInfo->ignoreExpiredDataSaved = false;
|
||||
pInfo->pUpdated = NULL;
|
||||
pInfo->pSeUpdated = NULL;
|
||||
pInfo->dataVersion = 0;
|
||||
pInfo->historyWins = taosArrayInit(4, sizeof(SSessionKey));
|
||||
if (!pInfo->historyWins) {
|
||||
goto _error;
|
||||
}
|
||||
if (pHandle) {
|
||||
pInfo->isHistoryOp = pHandle->fillHistory;
|
||||
}
|
||||
|
||||
if (pInfo->isHistoryOp) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pAllUpdated = tSimpleHashInit(64, hashFn);
|
||||
} else {
|
||||
pInfo->pAllUpdated = NULL;
|
||||
}
|
||||
|
||||
pInfo->pCheckpointRes = createSpecialDataBlock(STREAM_CHECKPOINT);
|
||||
pInfo->reCkBlock = false;
|
||||
|
||||
// for stream
|
||||
void* buff = NULL;
|
||||
int32_t len = 0;
|
||||
int32_t res =
|
||||
pInfo->streamAggSup.stateStore.streamStateGetInfo(pInfo->streamAggSup.pState, STREAM_EVENT_OP_CHECKPOINT_NAME,
|
||||
strlen(STREAM_EVENT_OP_CHECKPOINT_NAME), &buff, &len);
|
||||
if (res == TSDB_CODE_SUCCESS) {
|
||||
doStreamEventDecodeOpState(buff, len, pOperator);
|
||||
taosMemoryFree(buff);
|
||||
}
|
||||
|
||||
setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamEventAgg, NULL, destroyStreamEventOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamEventReleaseState, streamEventReloadState);
|
||||
initDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex, &pInfo->twAggSup);
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = filterInitFromNode((SNode*)pEventNode->pStartCond, &pInfo->pStartCondInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
code = filterInitFromNode((SNode*)pEventNode->pEndCond, &pInfo->pEndCondInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
destroyStreamEventOperatorInfo(pInfo);
|
||||
taosMemoryFreeClear(pOperator);
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
#define IS_FINAL_INTERVAL_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL)
|
||||
#define IS_FINAL_SESSION_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION)
|
||||
#define DEAULT_DELETE_MARK (1000LL * 60LL * 60LL * 24LL * 365LL * 10LL);
|
||||
#define DEAULT_DELETE_MARK INT64_MAX
|
||||
#define STREAM_INTERVAL_OP_STATE_NAME "StreamIntervalHistoryState"
|
||||
#define STREAM_SESSION_OP_STATE_NAME "StreamSessionHistoryState"
|
||||
#define STREAM_STATE_OP_STATE_NAME "StreamStateHistoryState"
|
||||
|
@ -146,7 +146,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated) {
|
||||
int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated) {
|
||||
winInfo.sessionWin.win.ekey = winInfo.sessionWin.win.skey;
|
||||
return tSimpleHashPut(pStUpdated, &winInfo.sessionWin, sizeof(SSessionKey), &winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
@ -853,6 +853,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat
|
|||
pSup->rowEntryInfoOffset, &pInfo->aggSup, &pInfo->stateStore);
|
||||
pResult = (SResultRow*)pResPos->pRowBuff;
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
qError("%s set interval output buff error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
if (IS_FINAL_INTERVAL_OP(pOperator)) {
|
||||
|
@ -1082,7 +1083,6 @@ void doStreamIntervalDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera
|
|||
int32_t dataLen = len - sizeof(uint32_t);
|
||||
void* pCksum = POINTER_SHIFT(buf, dataLen);
|
||||
if (taosCheckChecksum(buf, dataLen, *(uint32_t*)pCksum) != TSDB_CODE_SUCCESS) {
|
||||
ASSERT(0); // debug
|
||||
qError("stream interval state is invalid");
|
||||
return;
|
||||
}
|
||||
|
@ -1165,7 +1165,7 @@ static SSDataBlock* buildIntervalResult(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar) {
|
||||
int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar) {
|
||||
void* pIte = NULL;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(*ppWinUpdated, pIte, &iter)) != NULL) {
|
||||
|
@ -1243,7 +1243,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
tSimpleHashCleanup(pInfo->pUpdatedMap);
|
||||
pInfo->pUpdatedMap = NULL;
|
||||
}
|
||||
|
||||
qInfo("%s task is killed, code %s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
|
||||
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
|
||||
}
|
||||
|
||||
|
@ -1774,8 +1774,9 @@ int32_t reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void removeSessionResult(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SSHashObj* pResMap, SSessionKey key) {
|
||||
key.win.ekey = key.win.skey;
|
||||
void removeSessionResult(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SSHashObj* pResMap, SSessionKey* pKey) {
|
||||
SSessionKey key = {0};
|
||||
getSessionHashKey(pKey, &key);
|
||||
void* pVal = tSimpleHashGet(pHashMap, &key, sizeof(SSessionKey));
|
||||
if (pVal) {
|
||||
releaseOutputBuf(pAggSup->pState, *(void**)pVal, &pAggSup->pSessionAPI->stateStore);
|
||||
|
@ -1784,12 +1785,12 @@ static void removeSessionResult(SStreamAggSupporter* pAggSup, SSHashObj* pHashMa
|
|||
tSimpleHashRemove(pResMap, &key, sizeof(SSessionKey));
|
||||
}
|
||||
|
||||
static void getSessionHashKey(const SSessionKey* pKey, SSessionKey* pHashKey) {
|
||||
void getSessionHashKey(const SSessionKey* pKey, SSessionKey* pHashKey) {
|
||||
*pHashKey = *pKey;
|
||||
pHashKey->win.ekey = pKey->win.skey;
|
||||
}
|
||||
|
||||
static void removeSessionDeleteResults(SSHashObj* pHashMap, SArray* pWins) {
|
||||
void removeSessionDeleteResults(SSHashObj* pHashMap, SArray* pWins) {
|
||||
if (tSimpleHashGetSize(pHashMap) == 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -1803,7 +1804,7 @@ static void removeSessionDeleteResults(SSHashObj* pHashMap, SArray* pWins) {
|
|||
}
|
||||
}
|
||||
|
||||
static void removeSessionResults(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SArray* pWins) {
|
||||
void removeSessionResults(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SArray* pWins) {
|
||||
if (tSimpleHashGetSize(pHashMap) == 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -1832,7 +1833,7 @@ int32_t updateSessionWindowInfo(SStreamAggSupporter* pAggSup, SResultWindowInfo*
|
|||
if (pStDeleted && pWinInfo->isOutput) {
|
||||
saveDeleteRes(pStDeleted, pWinInfo->sessionWin);
|
||||
}
|
||||
removeSessionResult(pAggSup, pStUpdated, pResultRows, pWinInfo->sessionWin);
|
||||
removeSessionResult(pAggSup, pStUpdated, pResultRows, &pWinInfo->sessionWin);
|
||||
pWinInfo->sessionWin.win.skey = pStartTs[i];
|
||||
}
|
||||
pWinInfo->sessionWin.win.ekey = TMAX(pWinInfo->sessionWin.win.ekey, pStartTs[i]);
|
||||
|
@ -1854,7 +1855,7 @@ static int32_t initSessionOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pR
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult,
|
||||
int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult,
|
||||
int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput,
|
||||
SOperatorInfo* pOperator, int64_t winDelta) {
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
@ -1876,7 +1877,7 @@ static bool doDeleteSessionWindow(SStreamAggSupporter* pAggSup, SSessionKey* pKe
|
|||
return true;
|
||||
}
|
||||
|
||||
static int32_t setSessionWinOutputInfo(SSHashObj* pStUpdated, SResultWindowInfo* pWinInfo) {
|
||||
int32_t setSessionWinOutputInfo(SSHashObj* pStUpdated, SResultWindowInfo* pWinInfo) {
|
||||
void* pVal = tSimpleHashGet(pStUpdated, &pWinInfo->sessionWin, sizeof(SSessionKey));
|
||||
if (pVal) {
|
||||
SResultWindowInfo* pWin = pVal;
|
||||
|
@ -1900,6 +1901,34 @@ void getNextSessionWinInfo(SStreamAggSupporter* pAggSup, SSHashObj* pStUpdated,
|
|||
pAggSup->stateStore.streamStateFreeCur(pCur);
|
||||
}
|
||||
|
||||
void compactTimeWindow(SExprSupp* pSup, SStreamAggSupporter* pAggSup, STimeWindowAggSupp* pTwAggSup,
|
||||
SExecTaskInfo* pTaskInfo, SResultWindowInfo* pCurWin, SResultWindowInfo* pNextWin,
|
||||
SSHashObj* pStUpdated, SSHashObj* pStDeleted, bool addGap) {
|
||||
SResultRow* pCurResult = NULL;
|
||||
int32_t numOfOutput = pSup->numOfExprs;
|
||||
initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
SResultRow* pWinResult = NULL;
|
||||
initSessionOutputBuf(pNextWin, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
pCurWin->sessionWin.win.ekey = TMAX(pCurWin->sessionWin.win.ekey, pNextWin->sessionWin.win.ekey);
|
||||
memcpy(pCurWin->pStatePos->pKey, &pCurWin->sessionWin, sizeof(SSessionKey));
|
||||
|
||||
int64_t winDelta = 0;
|
||||
if (addGap) {
|
||||
winDelta = pAggSup->gap;
|
||||
}
|
||||
updateTimeWindowInfo(&pTwAggSup->timeWindowData, &pCurWin->sessionWin.win, winDelta);
|
||||
compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pTwAggSup->timeWindowData);
|
||||
tSimpleHashRemove(pStUpdated, &pNextWin->sessionWin, sizeof(SSessionKey));
|
||||
if (pNextWin->isOutput && pStDeleted) {
|
||||
qDebug("===stream=== save delete window info %" PRId64 ", %" PRIu64, pNextWin->sessionWin.win.skey,
|
||||
pNextWin->sessionWin.groupId);
|
||||
saveDeleteRes(pStDeleted, pNextWin->sessionWin);
|
||||
}
|
||||
removeSessionResult(pAggSup, pStUpdated, pAggSup->pResultRows, &pNextWin->sessionWin);
|
||||
doDeleteSessionWindow(pAggSup, &pNextWin->sessionWin);
|
||||
releaseOutputBuf(pAggSup->pState, pNextWin->pStatePos, &pAggSup->pSessionAPI->stateStore);
|
||||
}
|
||||
|
||||
static int32_t compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCurWin, SSHashObj* pStUpdated,
|
||||
SSHashObj* pStDeleted, bool addGap) {
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
@ -1911,7 +1940,7 @@ static int32_t compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo*
|
|||
SResultRow* pCurResult = NULL;
|
||||
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
// initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
// Just look for the window behind StartIndex
|
||||
while (1) {
|
||||
SResultWindowInfo winInfo = {0};
|
||||
|
@ -1921,23 +1950,7 @@ static int32_t compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo*
|
|||
releaseOutputBuf(pAggSup->pState, winInfo.pStatePos, &pAggSup->pSessionAPI->stateStore);
|
||||
break;
|
||||
}
|
||||
SResultRow* pWinResult = NULL;
|
||||
initSessionOutputBuf(&winInfo, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
pCurWin->sessionWin.win.ekey = TMAX(pCurWin->sessionWin.win.ekey, winInfo.sessionWin.win.ekey);
|
||||
memcpy(pCurWin->pStatePos->pKey, &pCurWin->sessionWin, sizeof(SSessionKey));
|
||||
int64_t winDelta = 0;
|
||||
if (addGap) {
|
||||
winDelta = pAggSup->gap;
|
||||
}
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, winDelta);
|
||||
compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
|
||||
tSimpleHashRemove(pStUpdated, &winInfo.sessionWin, sizeof(SSessionKey));
|
||||
if (winInfo.isOutput && pStDeleted) {
|
||||
saveDeleteRes(pStDeleted, winInfo.sessionWin);
|
||||
}
|
||||
removeSessionResult(pAggSup, pStUpdated, pAggSup->pResultRows, winInfo.sessionWin);
|
||||
doDeleteSessionWindow(pAggSup, &winInfo.sessionWin);
|
||||
releaseOutputBuf(pAggSup->pState, winInfo.pStatePos, &pAggSup->pSessionAPI->stateStore);
|
||||
compactTimeWindow(pSup, pAggSup, &pInfo->twAggSup, pTaskInfo, pCurWin, &winInfo, pStUpdated, pStDeleted, true);
|
||||
winNum++;
|
||||
}
|
||||
return winNum;
|
||||
|
@ -2023,6 +2036,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
|
|||
code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &winInfo, &pResult, i, winRows, rows, numOfOutput,
|
||||
pOperator, winDelta);
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
qError("%s do stream session aggregate impl error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
compactSessionWindow(pOperator, &winInfo, pStUpdated, pStDeleted, addGap);
|
||||
|
@ -2031,6 +2045,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
|
|||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) {
|
||||
code = saveResult(winInfo, pStUpdated);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s do stream session aggregate impl, set result error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
@ -2044,7 +2059,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
|
|||
}
|
||||
}
|
||||
|
||||
static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* result) {
|
||||
void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* result) {
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
TSKEY* startDatas = (TSKEY*)pStartTsCol->pData;
|
||||
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
|
@ -2066,7 +2081,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
|
|||
}
|
||||
}
|
||||
|
||||
static inline int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2) {
|
||||
inline int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2) {
|
||||
SResultWindowInfo* pWinInfo1 = (SResultWindowInfo*)pKey1;
|
||||
SResultWindowInfo* pWinInfo2 = (SResultWindowInfo*)pKey2;
|
||||
SSessionKey* pWin1 = &pWinInfo1->sessionWin;
|
||||
|
@ -2496,8 +2511,7 @@ void* doStreamSessionDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera
|
|||
int32_t dataLen = len - sizeof(uint32_t);
|
||||
void* pCksum = POINTER_SHIFT(buf, dataLen);
|
||||
if (taosCheckChecksum(buf, dataLen, *(uint32_t*)pCksum) != TSDB_CODE_SUCCESS) {
|
||||
ASSERT(0); // debug
|
||||
qError("stream interval state is invalid");
|
||||
qError("stream session state is invalid");
|
||||
return buf;
|
||||
}
|
||||
}
|
||||
|
@ -2565,6 +2579,12 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
|||
return opRes;
|
||||
}
|
||||
|
||||
if (pInfo->reCkBlock) {
|
||||
pInfo->reCkBlock = false;
|
||||
printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pCheckpointRes;
|
||||
}
|
||||
|
||||
if (pInfo->recvGetAll) {
|
||||
pInfo->recvGetAll = false;
|
||||
resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows);
|
||||
|
@ -2637,6 +2657,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
|||
SOperatorInfo* pChildOp =
|
||||
createStreamFinalSessionAggOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0, NULL);
|
||||
if (!pChildOp) {
|
||||
qError("%s create stream child of final session error", GET_TASKID(pTaskInfo));
|
||||
T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
taosArrayPush(pInfo->pChildren, &pChildOp);
|
||||
|
@ -2903,6 +2924,14 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
|
|||
pInfo->streamAggSup.stateStore.streamStateSessionClear(pInfo->streamAggSup.pState);
|
||||
}
|
||||
|
||||
void deleteSessionWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate, SSHashObj* pMapDelete) {
|
||||
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
|
||||
doDeleteTimeWindows(pAggSup, pBlock, pWins);
|
||||
removeSessionResults(pAggSup, pMapUpdate, pWins);
|
||||
copyDeleteWindowInfo(pWins, pMapDelete);
|
||||
taosArrayDestroy(pWins);
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
||||
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
|
@ -2929,6 +2958,11 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
if (pInfo->reCkBlock) {
|
||||
pInfo->reCkBlock = false;
|
||||
printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pCheckpointRes;
|
||||
}
|
||||
clearFunctionContext(&pOperator->exprSupp);
|
||||
// semi session operator clear disk buffer
|
||||
clearStreamSessionOperator(pInfo);
|
||||
|
@ -2957,11 +2991,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
|||
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
|
||||
pBlock->info.type == STREAM_CLEAR) {
|
||||
// gap must be 0
|
||||
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
|
||||
doDeleteTimeWindows(pAggSup, pBlock, pWins);
|
||||
removeSessionResults(pAggSup, pInfo->pStUpdated, pWins);
|
||||
copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
|
||||
taosArrayDestroy(pWins);
|
||||
deleteSessionWinState(pAggSup, pBlock, pInfo->pStUpdated, pInfo->pStDeleted);
|
||||
pInfo->clearState = true;
|
||||
break;
|
||||
} else if (pBlock->info.type == STREAM_GET_ALL) {
|
||||
|
@ -3254,7 +3284,7 @@ int32_t updateStateWindowInfo(SStreamAggSupporter* pAggSup, SStateWindowInfo* pW
|
|||
if (pSeDeleted && pWinInfo->winInfo.isOutput) {
|
||||
saveDeleteRes(pSeDeleted, pWinInfo->winInfo.sessionWin);
|
||||
}
|
||||
removeSessionResult(pAggSup, pSeUpdated, pResultRows, pWinInfo->winInfo.sessionWin);
|
||||
removeSessionResult(pAggSup, pSeUpdated, pResultRows, &pWinInfo->winInfo.sessionWin);
|
||||
pWinInfo->winInfo.sessionWin.win.skey = pTs[i];
|
||||
}
|
||||
pWinInfo->winInfo.sessionWin.win.ekey = TMAX(pWinInfo->winInfo.sessionWin.win.ekey, pTs[i]);
|
||||
|
@ -3324,6 +3354,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &curWin.winInfo, &pResult, i, winRows, rows, numOfOutput,
|
||||
pOperator, 0);
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
qError("%s do one window aggregate impl error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
saveSessionOutputBuf(pAggSup, &curWin.winInfo);
|
||||
|
@ -3331,6 +3362,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
code = saveResult(curWin.winInfo, pSeUpdated);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s do stream state aggregate impl, set result error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
@ -3402,8 +3434,7 @@ void* doStreamStateDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperato
|
|||
int32_t dataLen = len - sizeof(uint32_t);
|
||||
void* pCksum = POINTER_SHIFT(buf, dataLen);
|
||||
if (taosCheckChecksum(buf, dataLen, *(uint32_t*)pCksum) != TSDB_CODE_SUCCESS) {
|
||||
ASSERT(0); // debug
|
||||
qError("stream interval state is invalid");
|
||||
qError("stream state_window state is invalid");
|
||||
return buf;
|
||||
}
|
||||
}
|
||||
|
@ -3481,6 +3512,12 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
return resBlock;
|
||||
}
|
||||
|
||||
if (pInfo->reCkBlock) {
|
||||
pInfo->reCkBlock = false;
|
||||
printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pCheckpointRes;
|
||||
}
|
||||
|
||||
if (pInfo->recvGetAll) {
|
||||
pInfo->recvGetAll = false;
|
||||
resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows);
|
||||
|
@ -3507,11 +3544,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
|
||||
pBlock->info.type == STREAM_CLEAR) {
|
||||
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
|
||||
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, pWins);
|
||||
removeSessionResults(&pInfo->streamAggSup, pInfo->pSeUpdated, pWins);
|
||||
copyDeleteWindowInfo(pWins, pInfo->pSeDeleted);
|
||||
taosArrayDestroy(pWins);
|
||||
deleteSessionWinState(&pInfo->streamAggSup, pBlock, pInfo->pSeUpdated, pInfo->pSeDeleted);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_GET_ALL) {
|
||||
pInfo->recvGetAll = true;
|
||||
|
@ -3521,7 +3554,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
return pBlock;
|
||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||
pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState);
|
||||
doStreamSessionSaveCheckpoint(pOperator);
|
||||
doStreamStateSaveCheckpoint(pOperator);
|
||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||
continue;
|
||||
} else {
|
||||
|
@ -3583,29 +3616,8 @@ static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCur
|
|||
SSHashObj* pStUpdated, SSHashObj* pStDeleted) {
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
|
||||
|
||||
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
|
||||
SResultRow* pCurResult = NULL;
|
||||
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
SResultRow* pWinResult = NULL;
|
||||
initSessionOutputBuf(pNextWin, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||
pCurWin->sessionWin.win.ekey = TMAX(pCurWin->sessionWin.win.ekey, pNextWin->sessionWin.win.ekey);
|
||||
memcpy(pCurWin->pStatePos->pKey, &pCurWin->sessionWin, sizeof(SSessionKey));
|
||||
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, 1);
|
||||
compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
|
||||
tSimpleHashRemove(pStUpdated, &pNextWin->sessionWin, sizeof(SSessionKey));
|
||||
if (pNextWin->isOutput && pStDeleted) {
|
||||
qDebug("===stream=== save delete window info %" PRId64 ", %" PRIu64, pNextWin->sessionWin.win.skey,
|
||||
pNextWin->sessionWin.groupId);
|
||||
saveDeleteRes(pStDeleted, pNextWin->sessionWin);
|
||||
}
|
||||
removeSessionResult(pAggSup, pStUpdated, pAggSup->pResultRows, pNextWin->sessionWin);
|
||||
doDeleteSessionWindow(pAggSup, &pNextWin->sessionWin);
|
||||
releaseOutputBuf(pAggSup->pState, pNextWin->pStatePos, &pAggSup->pSessionAPI->stateStore);
|
||||
compactTimeWindow(pSup, &pInfo->streamAggSup, &pInfo->twAggSup, pTaskInfo, pCurWin, pNextWin, pStUpdated, pStDeleted, false);
|
||||
}
|
||||
|
||||
void streamStateReloadState(SOperatorInfo* pOperator) {
|
||||
|
@ -3777,12 +3789,6 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void cleanupAfterGroupResultGen(SMergeAlignedIntervalAggOperatorInfo* pMiaInfo, SSDataBlock* pRes) {
|
||||
pRes->info.id.groupId = pMiaInfo->groupId;
|
||||
pMiaInfo->curTs = INT64_MIN;
|
||||
pMiaInfo->groupId = 0;
|
||||
}
|
||||
|
||||
static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) {
|
||||
for (int i = 0; i < num; i++) {
|
||||
if (type == STREAM_INVERT) {
|
||||
|
|
|
@ -7363,7 +7363,7 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
|
|||
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
|
||||
if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type ||
|
||||
!isTimeLineQuery(pStmt->pQuery) || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList ||
|
||||
crossTableWithUdaf(pSelect) || isEventWindowQuery(pSelect) || hasJsonTypeProjection(pSelect)) {
|
||||
crossTableWithUdaf(pSelect) || hasJsonTypeProjection(pSelect)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
|
||||
}
|
||||
if (NULL != pSelect->pSubtable && TSDB_DATA_TYPE_VARCHAR != ((SExprNode*)pSelect->pSubtable)->resType.type) {
|
||||
|
|
|
@ -2784,11 +2784,44 @@ static int32_t splitCacheLastFuncOptCreateAggLogicNode(SAggLogicNode** pNewAgg,
|
|||
pNew->hasGroup = pAgg->hasGroup;
|
||||
pNew->node.pChildren = nodesCloneList(pAgg->node.pChildren);
|
||||
|
||||
SNode* pNode = NULL;
|
||||
FOREACH(pNode, pNew->node.pChildren) {
|
||||
int32_t code = 0;
|
||||
SNode* pNode = nodesListGetNode(pNew->node.pChildren, 0);
|
||||
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) {
|
||||
OPTIMIZE_FLAG_CLEAR_MASK(((SScanLogicNode*)pNode)->node.optimizedFlag, OPTIMIZE_FLAG_SCAN_PATH);
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
|
||||
SNodeList* pOldScanCols = NULL;
|
||||
TSWAP(pScan->pScanCols, pOldScanCols);
|
||||
nodesDestroyList(pScan->pScanPseudoCols);
|
||||
pScan->pScanPseudoCols = NULL;
|
||||
nodesDestroyList(pScan->node.pTargets);
|
||||
pScan->node.pTargets = NULL;
|
||||
SNodeListNode* list = (SNodeListNode*)nodesMakeNode(QUERY_NODE_NODE_LIST);
|
||||
list->pNodeList = pFunc;
|
||||
code = nodesCollectColumnsFromNode((SNode*)list, NULL, COLLECT_COL_TYPE_COL, &pScan->pScanCols);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
nodesFree(list);
|
||||
bool found = false;
|
||||
FOREACH(pNode, pScan->pScanCols) {
|
||||
if (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pNode)->colId) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
FOREACH(pNode, pOldScanCols) {
|
||||
if (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pNode)->colId) {
|
||||
nodesListMakeStrictAppend(&pScan->pScanCols, nodesCloneNode(pNode));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
nodesDestroyList(pOldScanCols);
|
||||
code = createColumnByRewriteExprs(pScan->pScanCols, &pScan->node.pTargets);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
OPTIMIZE_FLAG_CLEAR_MASK(pScan->node.optimizedFlag, OPTIMIZE_FLAG_SCAN_PATH);
|
||||
}
|
||||
|
||||
*pNewAgg = pNew;
|
||||
|
|
|
@ -1083,6 +1083,15 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *
|
|||
memmove(fraction, fraction + TSDB_TIME_PRECISION_SEC_DIGITS, TSDB_TIME_PRECISION_SEC_DIGITS);
|
||||
}
|
||||
|
||||
// trans current timezone's unix ts to dest timezone
|
||||
// offset = delta from dest timezone to zero
|
||||
// delta from zero to current timezone = 3600 * (cur)tsTimezone
|
||||
int64_t offset = 0;
|
||||
if (0 != offsetOfTimezone(tz, &offset)) {
|
||||
goto _end;
|
||||
}
|
||||
timeVal -= offset + 3600 * ((int64_t)tsTimezone);
|
||||
|
||||
struct tm tmInfo;
|
||||
int32_t len = 0;
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ int32_t schHandleOpBeginEvent(int64_t jobId, SSchJob** job, SCH_OP_TYPE type, SS
|
|||
SSchJob* pJob = schAcquireJob(jobId);
|
||||
if (NULL == pJob) {
|
||||
qDebug("Acquire sch job failed, may be dropped, jobId:0x%" PRIx64, jobId);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_JOB_NOT_EXISTS);
|
||||
}
|
||||
|
||||
*job = pJob;
|
||||
|
|
|
@ -122,7 +122,7 @@ int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** p
|
|||
int32_t streamDefaultDel_rocksdb(SStreamState* pState, const void* key);
|
||||
int32_t streamDefaultIterGet_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
|
||||
void* streamDefaultIterCreate_rocksdb(SStreamState* pState);
|
||||
int32_t streamDefaultIterValid_rocksdb(void* iter);
|
||||
bool streamDefaultIterValid_rocksdb(void* iter);
|
||||
void streamDefaultIterSeek_rocksdb(void* iter, const char* key);
|
||||
void streamDefaultIterNext_rocksdb(void* iter);
|
||||
char* streamDefaultIterKey_rocksdb(void* iter, int32_t* len);
|
||||
|
|
|
@ -2859,9 +2859,12 @@ void* streamDefaultIterCreate_rocksdb(SStreamState* pState) {
|
|||
pCur->number = pState->number;
|
||||
return pCur;
|
||||
}
|
||||
int32_t streamDefaultIterValid_rocksdb(void* iter) {
|
||||
bool streamDefaultIterValid_rocksdb(void* iter) {
|
||||
if (iter) {
|
||||
return false;
|
||||
}
|
||||
SStreamStateCur* pCur = iter;
|
||||
return rocksdb_iter_valid(pCur->iter) ? 1 : 0;
|
||||
return (rocksdb_iter_valid(pCur->iter) && !iterValueIsStale(pCur->iter)) ? true : false;
|
||||
}
|
||||
void streamDefaultIterSeek_rocksdb(void* iter, const char* key) {
|
||||
SStreamStateCur* pCur = iter;
|
||||
|
|
|
@ -1039,10 +1039,33 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dispatchDataInFuture(void* param, void* tmrId) {
|
||||
SStreamTask* pTask = param;
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||
return;
|
||||
}
|
||||
|
||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
||||
if (status == TASK_STATUS__CK) {
|
||||
stDebug("s-task:%s in checkpoint status, wait for 500ms to dispatch data downstream", pTask->id.idStr);
|
||||
taosTmrReset(dispatchDataInFuture, 500, pTask, streamEnv.timer, &pTask->msgInfo.pTimer);
|
||||
} else {
|
||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s start to dispatch data, jump out of timer, ref:%d", pTask->id.idStr, ref);
|
||||
streamDispatchStreamBlock(pTask);
|
||||
}
|
||||
}
|
||||
|
||||
// this message has been sent successfully, let's try next one.
|
||||
static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId) {
|
||||
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||
|
||||
bool delayDispatch = (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER);
|
||||
|
||||
pTask->msgInfo.pData = NULL;
|
||||
pTask->msgInfo.dispatchMsgType = 0;
|
||||
|
||||
int64_t el = taosGetTimestampMs() - pTask->msgInfo.startTs;
|
||||
|
||||
|
@ -1059,7 +1082,18 @@ static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId
|
|||
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
|
||||
|
||||
// otherwise, continue dispatch the first block to down stream task in pipeline
|
||||
if (delayDispatch) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s in checkpoint status, add in timer, try dispatch data in 500ms, ref:%d", pTask->id.idStr, ref);
|
||||
if (pTask->msgInfo.pTimer != NULL) {
|
||||
taosTmrReset(dispatchDataInFuture, 500, pTask, streamEnv.timer, &pTask->msgInfo.pTimer);
|
||||
} else {
|
||||
pTask->msgInfo.pTimer = taosTmrStart(dispatchDataInFuture, 500, pTask, streamEnv.timer);
|
||||
}
|
||||
} else {
|
||||
streamDispatchStreamBlock(pTask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1106,7 +1140,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
taosArrayPush(pTask->msgInfo.pRetryList, &pRsp->downstreamNodeId);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
stError("s-task:%s inputQ of downstream task:0x%x(vgId:%d) is full, wait for %dms and retry dispatch data", id,
|
||||
stWarn("s-task:%s inputQ of downstream task:0x%x(vgId:%d) is full, wait for %dms and retry dispatch", id,
|
||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId, DISPATCH_RETRY_INTERVAL_MS);
|
||||
} else if (pRsp->inputStatus == TASK_INPUT_STATUS__REFUSED) {
|
||||
stError("s-task:%s downstream task:0x%x(vgId:%d) refused the dispatch msg, treat it as success", id,
|
||||
|
@ -1147,8 +1181,8 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
}
|
||||
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s failed to dispatch msg to downstream code:%s, add timer to retry in %dms, ref:%d",
|
||||
pTask->id.idStr, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
stDebug("s-task:%s failed to dispatch msg to downstream, add into timer to retry in %dms, ref:%d",
|
||||
pTask->id.idStr, DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
|
||||
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
} else { // this message has been sent successfully, let's try next one.
|
||||
|
|
|
@ -72,7 +72,7 @@ bool inSessionWindow(SSessionKey* pKey, TSKEY ts, int64_t gap) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static SRowBuffPos* addNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, SSessionKey* pKey) {
|
||||
static SRowBuffPos* addNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, const SSessionKey* pKey) {
|
||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
||||
ASSERT(pNewPos->pRowBuff);
|
||||
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
||||
|
@ -80,7 +80,7 @@ static SRowBuffPos* addNewSessionWindow(SStreamFileState* pFileState, SArray* pW
|
|||
return pNewPos;
|
||||
}
|
||||
|
||||
static SRowBuffPos* insertNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, SSessionKey* pKey, int32_t index) {
|
||||
static SRowBuffPos* insertNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, const SSessionKey* pKey, int32_t index) {
|
||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
||||
ASSERT(pNewPos->pRowBuff);
|
||||
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
||||
|
@ -270,6 +270,51 @@ int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffP
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStreamStateCur* pCur, const SSessionKey* pWinKey, void** ppVal, int32_t* pVLen) {
|
||||
SRowBuffPos* pNewPos = NULL;
|
||||
SSHashObj* pSessionBuff = getRowStateBuff(pFileState);
|
||||
void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t));
|
||||
SArray* pWinStates = NULL;
|
||||
if (!ppBuff) {
|
||||
pWinStates = taosArrayInit(16, POINTER_BYTES);
|
||||
tSimpleHashPut(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t), &pWinStates, POINTER_BYTES);
|
||||
} else {
|
||||
pWinStates = (SArray*)(*ppBuff);
|
||||
}
|
||||
if (!pCur) {
|
||||
pNewPos = addNewSessionWindow(pFileState, pWinStates, pWinKey);
|
||||
goto _end;
|
||||
}
|
||||
|
||||
int32_t size = taosArrayGetSize(pWinStates);
|
||||
if (pCur->buffIndex >= 0) {
|
||||
if (pCur->buffIndex >= size) {
|
||||
pNewPos = insertNewSessionWindow(pFileState, pWinStates, pWinKey, size);
|
||||
goto _end;
|
||||
}
|
||||
pNewPos = insertNewSessionWindow(pFileState, pWinStates, pWinKey, pCur->buffIndex);
|
||||
goto _end;
|
||||
} else {
|
||||
if (size > 0) {
|
||||
SRowBuffPos* pPos = taosArrayGetP(pWinStates, 0);
|
||||
if (sessionWinKeyCmpr(pWinKey, pPos->pKey) >=0) {
|
||||
// pCur is invalid
|
||||
SSessionKey pTmpKey = *pWinKey;
|
||||
int32_t code = getSessionWinResultBuff(pFileState, &pTmpKey, 0, (void**)&pNewPos, pVLen);
|
||||
ASSERT(code == TSDB_CODE_FAILED);
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
pNewPos = getNewRowPosForWrite(pFileState);
|
||||
pNewPos->needFree = true;
|
||||
}
|
||||
|
||||
_end:
|
||||
memcpy(pNewPos->pKey, pWinKey, sizeof(SSessionKey));
|
||||
(*ppVal) = pNewPos;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void sessionWinStateClear(SStreamFileState* pFileState) {
|
||||
int32_t buffSize = getRowStateRowSize(pFileState);
|
||||
void* pIte = NULL;
|
||||
|
|
|
@ -134,7 +134,7 @@ int32_t streamReExecScanHistoryFuture(SStreamTask* pTask, int32_t idleDuration)
|
|||
pTask->schedHistoryInfo.numOfTicks = numOfTicks;
|
||||
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s scan-history start in %.2fs, ref:%d", pTask->id.idStr, numOfTicks*0.1, ref);
|
||||
stDebug("s-task:%s scan-history resumed in %.2fs, ref:%d", pTask->id.idStr, numOfTicks*0.1, ref);
|
||||
|
||||
if (pTask->schedHistoryInfo.pTimer == NULL) {
|
||||
pTask->schedHistoryInfo.pTimer = taosTmrStart(doReExecScanhistory, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamEnv.timer);
|
||||
|
|
|
@ -735,6 +735,14 @@ int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void
|
|||
#endif
|
||||
}
|
||||
|
||||
int32_t streamStateSessionAllocWinBuffByNextPosition(SStreamState* pState, SStreamStateCur* pCur, const SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return allocSessioncWinBuffByNextPosition(pState->pFileState, pCur, pKey, pVal, pVLen);
|
||||
#else
|
||||
return TSDB_CODE_FAILED;
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return getSessionFlushedBuff(pState->pFileState, key, pVal, pVLen);
|
||||
|
|
|
@ -321,6 +321,10 @@ void popUsedBuffs(SStreamFileState* pFileState, SStreamSnapshot* pFlushList, uin
|
|||
while ((pNode = tdListNext(&iter)) != NULL && i < max) {
|
||||
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
|
||||
if (pPos->beUsed == used) {
|
||||
if (used && !pPos->pRowBuff) {
|
||||
ASSERT(pPos->needFree == true);
|
||||
continue;
|
||||
}
|
||||
tdListAppend(pFlushList, &pPos);
|
||||
pFileState->flushMark = TMAX(pFileState->flushMark, pFileState->getTs(pPos->pKey));
|
||||
pFileState->stateBuffRemoveByPosFn(pFileState, pPos);
|
||||
|
|
|
@ -293,6 +293,15 @@ int32_t httpSendQuit() {
|
|||
|
||||
static int32_t taosSendHttpReportImpl(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen,
|
||||
EHttpCompFlag flag) {
|
||||
if (server == NULL || uri == NULL) {
|
||||
tError("http-report failed to report to invalid addr");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pCont == NULL || contLen == 0) {
|
||||
tError("http-report failed to report empty packet");
|
||||
return -1;
|
||||
}
|
||||
SHttpModule* load = taosAcquireRef(httpRefMgt, httpRef);
|
||||
if (load == NULL) {
|
||||
tError("http-report already released");
|
||||
|
|
|
@ -504,6 +504,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SCH_STATUS_ERROR, "scheduler status erro
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_INTERNAL_ERROR, "scheduler internal error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_TIMEOUT_ERROR, "Task timeout")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_JOB_IS_DROPPING, "Job is dropping")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_JOB_NOT_EXISTS, "Job no longer exist")
|
||||
|
||||
// parser
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYNTAX_ERROR, "syntax error near")
|
||||
|
|
|
@ -243,6 +243,7 @@ e
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupRep3.py -N 3
|
||||
,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/delete_check.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/test_hot_refresh_configurations.py
|
||||
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3
|
||||
|
|
|
@ -102,7 +102,7 @@ ulimit -c unlimited
|
|||
TMP_DIR=$WORKDIR/tmp
|
||||
SOURCEDIR=$WORKDIR/src
|
||||
MOUNT_DIR=""
|
||||
packageName="TDengine-server-3.0.1.0-Linux-x64.tar.gz"
|
||||
# packageName="TDengine-server-3.0.1.0-Linux-x64.tar.gz"
|
||||
rm -rf ${TMP_DIR}/thread_volume/$thread_no/sim
|
||||
mkdir -p $SOURCEDIR
|
||||
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/sim/tsim
|
||||
|
@ -114,9 +114,9 @@ if [ ! -d "${TMP_DIR}/thread_volume/$thread_no/$exec_dir" ]; then
|
|||
cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/
|
||||
fi
|
||||
|
||||
if [ ! -f "${SOURCEDIR}/${packageName}" ]; then
|
||||
wget -P ${SOURCEDIR} https://taosdata.com/assets-download/3.0/${packageName}
|
||||
fi
|
||||
# if [ ! -f "${SOURCEDIR}/${packageName}" ]; then
|
||||
# wget -P ${SOURCEDIR} https://taosdata.com/assets-download/3.0/${packageName}
|
||||
# fi
|
||||
|
||||
MOUNT_DIR="$TMP_DIR/thread_volume/$thread_no/$exec_dir:$CONTAINER_TESTDIR/tests/$exec_dir"
|
||||
echo "$thread_no -> ${exec_dir}:$cmd"
|
||||
|
|
|
@ -15,6 +15,8 @@ sql create table t1 using st tags(1,1,1);
|
|||
sql create table t2 using st tags(2,2,2);
|
||||
sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart, count(*) c1, sum(a) from st interval(10s);
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791213000,1,2,3,1.0);
|
||||
|
||||
sql insert into t2 values(1648791213001,2,2,3,1.1);
|
||||
|
|
|
@ -0,0 +1,322 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 50
|
||||
sql connect
|
||||
|
||||
print step1
|
||||
print =============== create database
|
||||
sql create database test vgroups 1;
|
||||
sql use test;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart as s, count(*) c1, sum(b), max(c) from t1 event_window start with a = 0 end with a = 9;
|
||||
sleep 1000
|
||||
sql insert into t1 values(1648791213000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791223001,9,2,2,1.1);
|
||||
sql insert into t1 values(1648791213009,0,3,3,1.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop0:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt;
|
||||
sql select * from streamt;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data02 != 6 then
|
||||
print ======data02=$data02
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data03 != 3 then
|
||||
print ======data03=$data03
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791243006,1,1,1,1.1);
|
||||
sql insert into t1 values(1648791253000,2,2,2,1.1);
|
||||
|
||||
|
||||
$loop_count = 0
|
||||
loop1:
|
||||
|
||||
sleep 300
|
||||
print 2 sql select * from streamt;
|
||||
sql select * from streamt;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791243000,0,3,3,1.1);
|
||||
|
||||
$loop_count = 0
|
||||
loop2:
|
||||
|
||||
sleep 300
|
||||
print 3 sql select * from streamt;
|
||||
sql select * from streamt;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791253009,9,4,4,1.1);
|
||||
|
||||
$loop_count = 0
|
||||
loop3:
|
||||
|
||||
sleep 300
|
||||
print 4 sql select * from streamt;
|
||||
sql select * from streamt;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $rows != 2 then
|
||||
print ======rows=$rows
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data02 != 6 then
|
||||
print ======data02=$data02
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data03 != 3 then
|
||||
print ======data03=$data03
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
# row 1
|
||||
if $data11 != 4 then
|
||||
print ======data11=$data11
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data12 != 10 then
|
||||
print ======data12=$data12
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
if $data13 != 4 then
|
||||
print ======data13=$data13
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
print step2
|
||||
print =============== create database test2
|
||||
sql create database test2 vgroups 1;
|
||||
sql use test2;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart as s, count(*) c1, sum(b), max(c) from t1 event_window start with a = 0 end with b = 9;
|
||||
sleep 1000
|
||||
sql insert into t1 values(1648791213000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791213009,1,2,2,2.1);
|
||||
sql insert into t1 values(1648791223000,0,9,9,9.0);
|
||||
|
||||
|
||||
sql insert into t1 values(1648791233000,0,9,9,9.0);
|
||||
|
||||
|
||||
$loop_count = 0
|
||||
loop4:
|
||||
|
||||
sleep 300
|
||||
print sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $rows != 2 then
|
||||
print ======rows=$rows
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data11 != 1 then
|
||||
print ======data11=$data11
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
print step3
|
||||
print =============== create database test3
|
||||
sql create database test3 vgroups 1;
|
||||
sql use test3;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _wstart as s, count(*) c1, sum(b), max(c) from t1 event_window start with a = 0 end with b = 9;
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791233009,1,2,2,2.1);
|
||||
|
||||
sql insert into t1 values(1648791233000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791243000,0,9,9,9.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop5:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt3;
|
||||
sql select * from streamt3;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop5
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop5
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791223000,0,9,9,9.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop6:
|
||||
|
||||
sleep 300
|
||||
print 2 sql select * from streamt3;
|
||||
sql select * from streamt3;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 2 then
|
||||
print ======rows=$rows
|
||||
goto loop6
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 1 then
|
||||
print ======data01=$data01
|
||||
goto loop6
|
||||
endi
|
||||
|
||||
# row 1
|
||||
if $data11 != 3 then
|
||||
print ======data11=$data11
|
||||
goto loop6
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791213000,0,1,1,1.0);
|
||||
|
||||
sleep 300
|
||||
|
||||
sql insert into t1 values(1648791213001,1,9,9,9.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop7:
|
||||
|
||||
sleep 300
|
||||
print 3 sql select * from streamt3;
|
||||
sql select * from streamt3;
|
||||
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print $data20 $data21 $data22 $data23
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 3 then
|
||||
print ======rows=$rows
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 2 then
|
||||
print ======data01=$data01
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
# row 1
|
||||
if $data11 != 1 then
|
||||
print ======data11=$data11
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
# row 2
|
||||
if $data21 != 3 then
|
||||
print ======data21=$data21
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
print event0 end
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,332 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 50
|
||||
sql connect
|
||||
|
||||
print step1
|
||||
print =============== create database test1
|
||||
sql create database test1 vgroups 1;
|
||||
sql use test1;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1 as select _wstart as s, count(*) c1, sum(b), max(c) from t1 event_window start with a = 0 end with b = 9;
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791233000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791243000,1,9,9,9.0);
|
||||
|
||||
|
||||
sql insert into t1 values(1648791223000,3,3,3,3.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop0:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt1;
|
||||
sql select * from streamt1;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 2 then
|
||||
print ======data01=$data01
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
print step2
|
||||
print =============== create database test2
|
||||
sql create database test2 vgroups 1;
|
||||
sql use test2;
|
||||
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart as s, count(*) c1, sum(b), max(c) from t1 event_window start with a = 0 end with b = 9;
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791223000,0,3,3,3.0);
|
||||
|
||||
sql insert into t1 values(1648791233000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791243000,1,9,2,2.0);
|
||||
|
||||
|
||||
$loop_count = 0
|
||||
loop1:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
|
||||
sql insert into t1 values(1648791223000,1,1,4,4.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop2:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 2 then
|
||||
print ======data01=$data01
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791243000,1,1,5,5.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop3:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 0 then
|
||||
print ======rows=$rows
|
||||
goto loop3
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791253000,1,9,6,6.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop4:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
sql delete from t1 where ts = 1648791253000;
|
||||
|
||||
$loop_count = 0
|
||||
loop5:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 0 then
|
||||
print ======rows=$rows
|
||||
goto loop5
|
||||
endi
|
||||
|
||||
sql insert into t1 values(1648791263000,1,9,7,7.0);
|
||||
sql delete from t1 where ts = 1648791243000;
|
||||
|
||||
$loop_count = 0
|
||||
loop6:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt2;
|
||||
sql select * from streamt2;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop6
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 2 then
|
||||
print ======data01=$data01
|
||||
goto loop6
|
||||
endi
|
||||
|
||||
print step3
|
||||
print =============== create database test3
|
||||
sql create database test3 vgroups 1;
|
||||
sql use test3;
|
||||
|
||||
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
|
||||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
|
||||
sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _wstart as s, count(*) c1, sum(b), max(c) from st partition by tbname event_window start with a = 0 end with b = 9;
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791223000,0,3,3,3.0);
|
||||
|
||||
sql insert into t1 values(1648791233000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791243000,1,9,2,2.0);
|
||||
|
||||
sql insert into t2 values(1648791223000,0,3,3,3.0);
|
||||
|
||||
sql insert into t2 values(1648791233000,0,1,1,1.0);
|
||||
sql insert into t2 values(1648791243000,1,9,2,2.0);
|
||||
|
||||
|
||||
$loop_count = 0
|
||||
loop7:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt3;
|
||||
sql select * from streamt3;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print $data20 $data21 $data22 $data23
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 2 then
|
||||
print ======rows=$rows
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
if $data01 != 3 then
|
||||
print ======data01=$data01
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
if $data11 != 3 then
|
||||
print ======data11=$data11
|
||||
goto loop7
|
||||
endi
|
||||
|
||||
print update data
|
||||
sql insert into t1 values(1648791243000,1,3,3,3.0);
|
||||
sql insert into t2 values(1648791243000,1,3,3,3.0);
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791253000,1,9,3,3.0);
|
||||
sql insert into t2 values(1648791253000,1,9,3,3.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop8:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt3;
|
||||
sql select * from streamt3;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03
|
||||
print $data10 $data11 $data12 $data13
|
||||
print $data20 $data21 $data22 $data23
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 2 then
|
||||
print ======rows=$rows
|
||||
goto loop8
|
||||
endi
|
||||
|
||||
if $data01 != 4 then
|
||||
print ======data01=$data01
|
||||
goto loop8
|
||||
endi
|
||||
|
||||
if $data11 != 4 then
|
||||
print ======data11=$data11
|
||||
goto loop8
|
||||
endi
|
||||
|
||||
print event1 end
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,84 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 50
|
||||
sql connect
|
||||
|
||||
print step1
|
||||
print =============== create database test
|
||||
sql create database test vgroups 1;
|
||||
sql use test;
|
||||
|
||||
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
|
||||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
sql create table t3 using st tags(3,3,3);
|
||||
sql create table t4 using st tags(3,3,3);
|
||||
|
||||
sql insert into t1 values(1648791223000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791233000,0,2,2,2.0);
|
||||
sql insert into t1 values(1648791243000,1,3,3,3.0);
|
||||
|
||||
sql insert into t2 values(1648791223000,0,1,4,3.0);
|
||||
sql insert into t2 values(1648791233000,0,2,5,1.0);
|
||||
sql insert into t2 values(1648791243000,1,3,6,2.0);
|
||||
|
||||
sql insert into t3 values(1648791223000,1,1,7,3.0);
|
||||
sql insert into t3 values(1648791233000,1,2,8,1.0);
|
||||
sql insert into t3 values(1648791243000,1,3,9,2.0);
|
||||
|
||||
sql insert into t4 values(1648791223000,1,1,10,3.0);
|
||||
sql insert into t4 values(1648791233000,0,2,11,1.0);
|
||||
sql insert into t4 values(1648791243000,1,9,12,2.0);
|
||||
|
||||
sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 fill_history 1 into streamt0 as select _wstart as s, count(*) c1, sum(b), max(c), _wend as e from st partition by tbname event_window start with a = 0 end with b = 9;
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791253000,1,9,13,2.0);
|
||||
sql insert into t2 values(1648791253000,1,9,14,2.0);
|
||||
sql insert into t3 values(1648791253000,1,9,15,2.0);
|
||||
sql insert into t4 values(1648791253000,1,9,16,2.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop0:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt0 order by 1, 2, 3, 4;
|
||||
sql select * from streamt0 order by 1, 2, 3, 4;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03 $data04
|
||||
print $data10 $data11 $data12 $data13 $data14
|
||||
print $data20 $data21 $data22 $data23 $data24
|
||||
print $data30 $data31 $data32 $data33 $data34
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 3 then
|
||||
print ======rows=$rows
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data01 != 4 then
|
||||
print ======data01=$data01
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data11 != 4 then
|
||||
print ======data11=$data11
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data21 != 2 then
|
||||
print ======data21=$data21
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
|
||||
print event1 end
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,61 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 50
|
||||
sql connect
|
||||
|
||||
print step1
|
||||
print =============== create database test
|
||||
sql create database test vgroups 1;
|
||||
sql use test;
|
||||
|
||||
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
|
||||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
sql create table t3 using st tags(3,3,3);
|
||||
sql create table t4 using st tags(3,3,3);
|
||||
|
||||
|
||||
sql insert into t4 values(1648791223000,1,1,10,3.0);
|
||||
sql insert into t4 values(1648791233000,0,2,11,1.0);
|
||||
sql insert into t4 values(1648791243000,1,9,12,2.0);
|
||||
|
||||
sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 fill_history 1 into streamt0 as select _wstart as s, count(*) c1, sum(b), max(c), _wend as e from st partition by tbname event_window start with a = 0 end with b = 9;
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t4 values(1648791253000,1,9,16,2.0);
|
||||
|
||||
$loop_count = 0
|
||||
loop0:
|
||||
|
||||
sleep 300
|
||||
print 1 sql select * from streamt0 order by 1, 2, 3, 4;
|
||||
sql select * from streamt0 order by 1, 2, 3, 4;
|
||||
|
||||
print
|
||||
print $data00 $data01 $data02 $data03 $data04
|
||||
print $data10 $data11 $data12 $data13 $data14
|
||||
print $data20 $data21 $data22 $data23 $data24
|
||||
print $data30 $data31 $data32 $data33 $data34
|
||||
print
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $rows != 1 then
|
||||
print ======rows=$rows
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data01 != 2 then
|
||||
print ======data01=$data01
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
|
||||
|
||||
print event1 end
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -23,6 +23,8 @@ sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 i
|
|||
sql create stream stream_t2 trigger at_once watermark 1d IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamtST2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s);
|
||||
sleep 1000
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791210000,1,2,3,1.0);
|
||||
sql insert into t1 values(1648791216000,2,2,3,1.1);
|
||||
sql insert into t1 values(1648791220000,3,2,3,2.1);
|
||||
|
@ -314,6 +316,8 @@ sql create stream streams11 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 i
|
|||
sql create stream streams12 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s);
|
||||
sleep 1000
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791213000,1,2,3,1.0);
|
||||
sql insert into t1 values(1648791223001,2,2,3,1.1);
|
||||
sql insert into t1 values(1648791233002,3,2,3,2.1);
|
||||
|
@ -448,6 +452,8 @@ sql create stream streams21 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 i
|
|||
sql create stream streams22 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt22 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s);
|
||||
sleep 1000
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791213000,1,1,1,1.0);
|
||||
sql insert into t1 values(1648791223001,2,2,2,1.1);
|
||||
sql insert into t1 values(1648791233002,3,3,3,2.1);
|
||||
|
@ -712,6 +718,8 @@ sql create table t2 using st tags(2,2,2);
|
|||
sql create stream streams4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4 as select _wstart as ts, count(*),min(a) c1 from st interval(10s) sliding(5s);
|
||||
sleep 1000
|
||||
|
||||
sleep 1000
|
||||
|
||||
sql insert into t1 values(1648791213000,1,1,1,1.0);
|
||||
sql insert into t1 values(1648791243000,2,1,1,1.0);
|
||||
|
||||
|
|
|
@ -0,0 +1,222 @@
|
|||
import subprocess
|
||||
import random
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
"""This test case is used to veirfy hot refresh configurations
|
||||
"""
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.configration_dic = {
|
||||
"cli": [
|
||||
{
|
||||
"name": "keepAliveIdle",
|
||||
"values": [1, 100, 7200000],
|
||||
"except_values": [0, 7200001]
|
||||
},
|
||||
{
|
||||
"name": "queryPolicy",
|
||||
"values": [1, 2, 4],
|
||||
"except_values": [0, 5]
|
||||
},
|
||||
{
|
||||
"name": "numOfLogLines",
|
||||
"values": [1000, 2000, 2000000000],
|
||||
"except_values": [999, 2000000001]
|
||||
},
|
||||
{
|
||||
"name": "logKeepDays",
|
||||
"values": [-365000, 2000, 365000],
|
||||
"except_values": [-365001, 365001]
|
||||
}
|
||||
],
|
||||
"svr": [
|
||||
{
|
||||
"name": "keepAliveIdle",
|
||||
"alias": "tsKeepAliveIdle",
|
||||
"values": [1, 100, 7200000],
|
||||
"except_values": [0, 7200001]
|
||||
},
|
||||
{
|
||||
"name": "mndSdbWriteDelta",
|
||||
"alias": "tsMndSdbWriteDelta",
|
||||
"values": [20, 1000, 10000],
|
||||
"except_values": [19, 10001]
|
||||
},
|
||||
{
|
||||
"name": "enableWhiteList",
|
||||
"alias": "tsEnableWhiteList",
|
||||
"values": [0, 1],
|
||||
"except_values": [-1, 2]
|
||||
},
|
||||
{
|
||||
"name": "audit",
|
||||
"alias": "tsEnableAudit",
|
||||
"values": [0, 1],
|
||||
"except_values": [-1, 2]
|
||||
},
|
||||
{
|
||||
"name": "telemetryReporting",
|
||||
"alias": "tsEnableTelem",
|
||||
"values": [0, 1],
|
||||
"except_values": [-1, 2]
|
||||
},
|
||||
{
|
||||
"name": "cacheLazyLoadThreshold",
|
||||
"alias": "tsCacheLazyLoadThreshold",
|
||||
"values": [0, 200, 100000],
|
||||
"except_values": [-1, 100001]
|
||||
},
|
||||
{
|
||||
"name": "queryRspPolicy",
|
||||
"alias": "tsQueryRspPolicy",
|
||||
"values": [0, 1],
|
||||
"except_values": [-1, 2]
|
||||
},
|
||||
{
|
||||
"name": "ttlFlushThreshold",
|
||||
"alias": "tsTtlFlushThreshold",
|
||||
"values": [-1, 200, 1000000],
|
||||
"except_values": [-2, 1000001]
|
||||
},
|
||||
{
|
||||
"name": "timeseriesThreshold",
|
||||
"alias": "tsTimeSeriesThreshold",
|
||||
"values": [0, 200, 2000],
|
||||
"except_values": [-2, 2001]
|
||||
},
|
||||
{
|
||||
"name": "minDiskFreeSize",
|
||||
"alias": "tsMinDiskFreeSize",
|
||||
"values": ["51200K", "100M", "1G"],
|
||||
"except_values": ["1024K", "1.1G", "1T"]
|
||||
},
|
||||
{
|
||||
"name": "tmqMaxTopicNum",
|
||||
"alias": "tmqMaxTopicNum",
|
||||
"values": [1, 1000, 10000],
|
||||
"except_values": [0, 10001]
|
||||
},
|
||||
{
|
||||
"name": "transPullupInterval",
|
||||
"alias": "tsTransPullupInterval",
|
||||
"values": [1, 1000, 10000],
|
||||
"except_values": [0, 10001]
|
||||
},
|
||||
{
|
||||
"name": "mqRebalanceInterval",
|
||||
"alias": "tsMqRebalanceInterval",
|
||||
"values": [1, 1000, 10000],
|
||||
"except_values": [0, 10001]
|
||||
},
|
||||
{
|
||||
"name": "checkpointInterval",
|
||||
"alias": "tsStreamCheckpointInterval",
|
||||
"values": [60, 1000, 1200],
|
||||
"except_values": [59, 1201]
|
||||
},
|
||||
{
|
||||
"name": "trimVDbIntervalSec",
|
||||
"alias": "tsTrimVDbIntervalSec",
|
||||
"values": [1, 1000, 100000],
|
||||
"except_values": [0, 100001]
|
||||
},
|
||||
{
|
||||
"name": "disableStream",
|
||||
"alias": "tsDisableStream",
|
||||
"values": [0, 1],
|
||||
"except_values": [-1]
|
||||
},
|
||||
{
|
||||
"name": "maxStreamBackendCache",
|
||||
"alias": "tsMaxStreamBackendCache",
|
||||
"values": [16, 512, 1024],
|
||||
"except_values": [15, 1025]
|
||||
},
|
||||
{
|
||||
"name": "numOfLogLines",
|
||||
"alias": "tsNumOfLogLines",
|
||||
"values": [1000, 50000, 2000000000],
|
||||
"except_values": [999, 2000000001]
|
||||
},
|
||||
{
|
||||
"name": "logKeepDays",
|
||||
"alias": "tsLogKeepDays",
|
||||
"values": [-365000, 10, 365000],
|
||||
"except_values": [-365001, 365001]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def get_param_value_with_gdb(self, config_name, process_name):
|
||||
res = subprocess.Popen("gdb -q -nx -p `pidof {}` --batch -ex 'set height 0' -ex 'p {}'".format(process_name, config_name), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
r_lines = res.stdout.read()
|
||||
if r_lines:
|
||||
for line in r_lines.decode().split("\n"):
|
||||
if "$1 = " in line:
|
||||
tdLog.debug("gdb result: {}".format(line))
|
||||
return line.split(" = ")[1]
|
||||
|
||||
def cli_check(self, name, values, except_values=False):
|
||||
if not except_values:
|
||||
for v in values:
|
||||
tdLog.debug("Set {} to {}".format(name, v))
|
||||
tdSql.execute(f'alter local "{name} {v}";')
|
||||
else:
|
||||
for v in values:
|
||||
tdLog.debug("Set {} to {}".format(name, v))
|
||||
tdSql.error(f'alter local "{name} {v}";')
|
||||
|
||||
def svr_check(self, name, alias, values, except_values=False):
|
||||
p_list = ["dnode 1", "all dnodes"]
|
||||
# check bool param value
|
||||
if len(values) == 2 and [0, 1] == values and name != "queryRspPolicy":
|
||||
is_bool = True
|
||||
else:
|
||||
is_bool = False
|
||||
tdLog.debug(f"{name} is_bool: {is_bool}")
|
||||
if not except_values:
|
||||
for v in values:
|
||||
dnode = random.choice(p_list)
|
||||
tdSql.execute(f'alter {dnode} "{name} {v}";')
|
||||
value = self.get_param_value_with_gdb(alias, "taosd")
|
||||
if value:
|
||||
tdLog.debug(f"value: {value}")
|
||||
assert(value == str(bool(v)).lower() if is_bool else str(v))
|
||||
else:
|
||||
for v in values:
|
||||
dnode = random.choice(p_list)
|
||||
tdSql.error(f'alter {dnode} "{name} {v}";')
|
||||
|
||||
def run(self):
|
||||
for key in self.configration_dic:
|
||||
if "cli" == key:
|
||||
for item in self.configration_dic[key]:
|
||||
self.cli_check(item["name"], item["values"])
|
||||
if "except_values" in item:
|
||||
self.cli_check(item["name"], item["except_values"], True)
|
||||
elif "svr" == key:
|
||||
for item in self.configration_dic[key]:
|
||||
self.svr_check(item["name"], item["alias"], item["values"])
|
||||
if "except_values" in item:
|
||||
self.svr_check(item["name"], item["alias"], item["except_values"], True)
|
||||
else:
|
||||
raise Exception(f"unknown key: {key}")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,105 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/home/ben/github/taosdata/3.0/TDinternal/dnodes/dnode1/cfg",
|
||||
"host": "localhost",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"connection_pool_size": 8,
|
||||
"thread_count": 20,
|
||||
"create_table_thread_count": 7,
|
||||
"result_file": "/root/insert_rsma_10w_1vnode.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100,
|
||||
"num_of_records_per_req": 100,
|
||||
"prepared_rand": 10000,
|
||||
"chinese": "no",
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db_replica",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"duration": 10,
|
||||
"vgroups": 2,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp": 2,
|
||||
"wal_Level": 1,
|
||||
"wal_retention_period": 0,
|
||||
"retentions": "-:7d, 20s:8d, 30s:9d"
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 1000,
|
||||
"childtable_prefix": "ct",
|
||||
"escape_character": "yes",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"non_stop_mode": "no",
|
||||
"line_protocol": "line",
|
||||
"insert_rows": 5000,
|
||||
"childtable_limit": 10,
|
||||
"childtable_offset": 100,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"partial_col_num": 0,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1000,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"use_sample_ts": "no",
|
||||
"tags_file": "",
|
||||
"rollup": "max",
|
||||
"columns": [
|
||||
{
|
||||
"type": "FLOAT",
|
||||
"name": "current",
|
||||
"count": 1,
|
||||
"max": 12,
|
||||
"min": 8
|
||||
},
|
||||
{
|
||||
"type": "INT",
|
||||
"name": "voltage",
|
||||
"max": 225,
|
||||
"min": 215
|
||||
},
|
||||
{
|
||||
"type": "FLOAT",
|
||||
"name": "phase",
|
||||
"max": 1,
|
||||
"min": 0
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"type": "INT",
|
||||
"name": "tag1",
|
||||
"max": 10,
|
||||
"min": 1
|
||||
},
|
||||
{
|
||||
"name": "location",
|
||||
"type": "BINARY",
|
||||
"len": 16,
|
||||
"values": [
|
||||
"beijing",
|
||||
"shanghai"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "",
|
||||
"host": "localhost",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"connection_pool_size": 8,
|
||||
"thread_count": 20,
|
||||
"create_table_thread_count": 7,
|
||||
"result_file": "",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100,
|
||||
"num_of_records_per_req": 100,
|
||||
"prepared_rand": 10000,
|
||||
"chinese": "no",
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db_update",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"duration": 10,
|
||||
"vgroups": 4,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp": 2,
|
||||
"wal_Level": 1,
|
||||
"wal_retention_period": 0,
|
||||
"stt_trigger": 1,
|
||||
"retentions": "-:7d, 1m:30d, 1h:60d"
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 100,
|
||||
"childtable_prefix": "ct",
|
||||
"escape_character": "yes",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"non_stop_mode": "no",
|
||||
"line_protocol": "line",
|
||||
"insert_rows": 3600,
|
||||
"childtable_limit": 10,
|
||||
"childtable_offset": 100,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"partial_col_num": 0,
|
||||
"disorder_ratio": 5,
|
||||
"disorder_range": 1000,
|
||||
"update_ratio": 5,
|
||||
"delete_ratio": 1,
|
||||
"disorder_fill_interval": 300,
|
||||
"update_fill_interval": 25,
|
||||
"generate_row_rule": 2,
|
||||
"timestamp_step": 1000,
|
||||
"start_timestamp": "2023-11-12 00:01:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"use_sample_ts": "no",
|
||||
"tags_file": "",
|
||||
"rollup": "max",
|
||||
"columns": [
|
||||
{
|
||||
"type": "FLOAT",
|
||||
"name": "current",
|
||||
"count": 1,
|
||||
"max": 12,
|
||||
"min": 8
|
||||
},
|
||||
{
|
||||
"type": "INT",
|
||||
"name": "voltage",
|
||||
"max": 225,
|
||||
"min": 215
|
||||
},
|
||||
{
|
||||
"type": "FLOAT",
|
||||
"name": "phase",
|
||||
"max": 1,
|
||||
"min": 0
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"type": "INT",
|
||||
"name": "tag1",
|
||||
"max": 10,
|
||||
"min": 1
|
||||
},
|
||||
{
|
||||
"name": "location",
|
||||
"type": "BINARY",
|
||||
"len": 16,
|
||||
"values": [
|
||||
"beijing",
|
||||
"shanghai"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -57,12 +57,16 @@ class TDTestCase:
|
|||
"create database db1 retentions 1s:1d",
|
||||
"create database db1 retentions 1s:1d,2s:2d",
|
||||
"create database db1 retentions 1s:1d,2s:2d,3s:3d",
|
||||
"create database db1 retentions 1s:1d,2s:2d,3s:3d,4s:4d",
|
||||
"create database db1 retentions -:1d,2s:2d,3s:3d,4s:4d",
|
||||
"create database db1 retentions -:-",
|
||||
"create database db1 retentions --:1d",
|
||||
"create database db1 retentions -:-:1d",
|
||||
"create database db1 retentions 1d:-",
|
||||
"create database db1 retentions -:-",
|
||||
"create database db1 retentions +:1d",
|
||||
"create database db1 retentions :1d",
|
||||
"create database db1 retentions -:1d,-:2d",
|
||||
"create database db1 retentions -:1d,-:2d,-:3d",
|
||||
"create database db1 retentions -:1d,1s:-",
|
||||
"create database db1 retentions -:1d,15s:2d,-:3d",
|
||||
|
||||
|
@ -76,6 +80,7 @@ class TDTestCase:
|
|||
"create database db1 retentions -:1d,1s:86400000a",
|
||||
"create database db1 retentions -:1d,1s:86400000000u",
|
||||
"create database db1 retentions -:1d,1s:86400000000000b",
|
||||
"create database db1 retentions -:1s,1s:2s",
|
||||
"create database db1 retentions -:1d,1s:1w",
|
||||
"create database db1 retentions -:1d,1s:1n",
|
||||
"create database db1 retentions -:1d,1s:1y",
|
||||
|
@ -97,6 +102,8 @@ class TDTestCase:
|
|||
"create database db5 retentions -:1d,2s:2d,2s:3d",
|
||||
"create database db5 retentions -:1d,3s:2d,2s:3d",
|
||||
"create database db1 retentions -:1d,2s:3d,3s:2d",
|
||||
"create database db1 retentions -:1d,2s:3d,1s:2d",
|
||||
|
||||
]
|
||||
|
||||
@property
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
from datetime import datetime
|
||||
import time
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c_int"
|
||||
BINT_COL = "c_bint"
|
||||
SINT_COL = "c_sint"
|
||||
TINT_COL = "c_tint"
|
||||
FLOAT_COL = "c_float"
|
||||
DOUBLE_COL = "c_double"
|
||||
BOOL_COL = "c_bool"
|
||||
TINT_UN_COL = "c_utint"
|
||||
SINT_UN_COL = "c_usint"
|
||||
BINT_UN_COL = "c_ubint"
|
||||
INT_UN_COL = "c_uint"
|
||||
BINARY_COL = "c_binary"
|
||||
NCHAR_COL = "c_nchar"
|
||||
TS_COL = "c_ts"
|
||||
|
||||
INT_TAG = "t_int"
|
||||
|
||||
TAG_COL = [INT_TAG]
|
||||
|
||||
## insert data args:
|
||||
TIME_STEP = 10000
|
||||
NOW = int(datetime.timestamp(datetime.now()) * 1000)
|
||||
|
||||
# init db/table
|
||||
DBNAME = "db"
|
||||
DB1 = "db1"
|
||||
DB2 = "db2"
|
||||
DB3 = "db3"
|
||||
DB4 = "db4"
|
||||
STBNAME = "stb1"
|
||||
CTBNAME = "ct1"
|
||||
NTBNAME = "nt1"
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
for i in range(ctbNum):
|
||||
tagValue = 'beijing'
|
||||
if (i % 2 == 0):
|
||||
tagValue = 'shanghai'
|
||||
sql += " %s%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
if sql != pre_create:
|
||||
tsql.execute(sql)
|
||||
|
||||
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
|
||||
return
|
||||
|
||||
def getPath(self, tool="taosBenchmark"):
|
||||
if (platform.system().lower() == 'windows'):
|
||||
tool = tool + ".exe"
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ((tool) in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
paths.append(os.path.join(root, tool))
|
||||
break
|
||||
if (len(paths) == 0):
|
||||
tdLog.exit("taosBenchmark not found!")
|
||||
return
|
||||
else:
|
||||
tdLog.info("taosBenchmark found in %s" % paths[0])
|
||||
return paths[0]
|
||||
|
||||
|
||||
def run(self):
|
||||
binPath = self.getPath()
|
||||
|
||||
self.rows = 10
|
||||
tdLog.printNoPrefix("==========step0:all check")
|
||||
dbname='d0'
|
||||
tdSql.execute(f"create database {dbname} retentions -:363d,1m:364d,1h:365d STT_TRIGGER 2 vgroups 6;")
|
||||
tdSql.execute(f"create stable if not exists {dbname}.st_min (ts timestamp, c1 int) tags (proid int,city binary(20)) rollup(min) watermark 0s,1s max_delay 1m,180s;;")
|
||||
tdSql.execute(f"create stable if not exists {dbname}.st_avg (ts timestamp, c1 double) tags (city binary(20),district binary(20)) rollup(min) watermark 0s,1s max_delay 1m,180s;;")
|
||||
self.create_ctable(tdSql, dbname, 'st_min', 'ct_min', 1000)
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
datetime1 = datetime.now()
|
||||
print(datetime1)
|
||||
tdSql.execute(f"INSERT INTO {dbname}.ct_min7 VALUES ('2023-11-07 10:01:00.001',797029643) ('2023-11-07 10:01:00.001',797029643);")
|
||||
tdSql.query(f"select * from {dbname}.st_min where ts>now-363d;")
|
||||
tdSql.checkData(0, 0, '2023-11-07 10:01:00.001')
|
||||
sleep(6)
|
||||
tdSql.query(f"select * from {dbname}.st_min where ts>now-364d;")
|
||||
tdSql.checkData(0, 0, '2023-11-07 10:01:00.000')
|
||||
tdSql.query(f"select * from {dbname}.st_min where ts>now-365d;")
|
||||
tdSql.checkData(0, 0, '2023-11-07 10:00:00.000')
|
||||
|
||||
#bug
|
||||
os.system(f"{binPath} -f ./1-insert/benchmark-tbl-rsma-alter.json")
|
||||
tdSql.execute(f"alter database db_replica replica 3;")
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=3,db_name="db_replica",count_number=240)
|
||||
tdSql.execute(f"alter database db_replica replica 1;")
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=1,db_name="db_replica",count_number=240)
|
||||
tdSql.execute(f"alter database db_replica replica 3;")
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=3,db_name="db_replica",count_number=240)
|
||||
tdSql.execute(f"alter database db_replica replica 1;")
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=1,db_name="db_replica",count_number=240)
|
||||
|
||||
#bug
|
||||
# os.system(f"{binPath} -f ./1-insert/benchmark-tbl-rsma.json")
|
||||
# tdSql.query(f" select count(*) from db_update.stb1;")
|
||||
# tdSql.checkData(0, 0, 360000)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -30,17 +30,16 @@ class TDTestCase:
|
|||
tdSql.query(f'select to_iso8601(ts) from {self.ntbname}')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{time_zone}')
|
||||
timezone_list = ['+0000','+0100','+0200','+0300','+0330','+0400','+0500','+0530','+0600','+0700','+0800','+0900','+1000','+1100','+1200',\
|
||||
'+00','+01','+02','+03','+04','+05','+06','+07','+08','+09','+10','+11','+12',\
|
||||
'+00:00','+01:00','+02:00','+03:00','+03:30','+04:00','+05:00','+05:30','+06:00','+07:00','+08:00','+09:00','+10:00','+11:00','+12:00',\
|
||||
'-0000','-0100','-0200','-0300','-0400','-0500','-0600','-0700','-0800','-0900','-1000','-1100','-1200',\
|
||||
'-00','-01','-02','-03','-04','-05','-06','-07','-08','-09','-10','-11','-12',\
|
||||
'-00:00','-01:00','-02:00','-03:00','-04:00','-05:00','-06:00','-07:00','-08:00','-09:00','-10:00','-11:00','-12:00',\
|
||||
'z','Z']
|
||||
for j in timezone_list:
|
||||
tdSql.query(f'select to_iso8601(ts,"{j}") from {self.ntbname}')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{j}')
|
||||
|
||||
tz_list = ['+0000','+0530', '+00', '+06', '+00:00', '+12:00', '-0000', '-0900', '-00', '-05', '-00:00', '-03:00','z', 'Z']
|
||||
res_list = ['2021-12-31T16:00:00.000+0000', '2021-12-31T21:30:00.000+0530', '2021-12-31T16:00:00.000+00', '2021-12-31T22:00:00.000+06',\
|
||||
'2021-12-31T16:00:00.000+00:00', '2022-01-01T04:00:00.000+12:00','2021-12-31T16:00:00.000-0000','2021-12-31T07:00:00.000-0900',\
|
||||
'2021-12-31T16:00:00.000-00', '2021-12-31T11:00:00.000-05','2021-12-31T16:00:00.000-00:00','2021-12-31T13:00:00.000-03:00',\
|
||||
'2021-12-31T16:00:00.000z', '2021-12-31T16:00:00.000Z']
|
||||
for i in range(len(tz_list)):
|
||||
tdSql.query(f'select to_iso8601(ts,"{tz_list[i]}") from {self.ntbname} where c1==1')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],res_list[i])
|
||||
|
||||
error_param_list = [0,100.5,'a','!']
|
||||
for i in error_param_list:
|
||||
tdSql.error(f'select to_iso8601(ts,"{i}") from {self.ntbname}')
|
||||
|
|
|
@ -338,11 +338,38 @@ class TDTestCase:
|
|||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
#
|
||||
def checkVGroups(self):
|
||||
|
||||
# db2
|
||||
tdSql.execute("create database db2 vgroups 2;")
|
||||
tdSql.execute("use db2;")
|
||||
tdSql.execute("create table st(ts timestamp, age int) tags(area int);")
|
||||
tdSql.execute("create table t1 using st tags(1);")
|
||||
tdSql.query("select distinct(tbname) from st limit 1 offset 100;")
|
||||
tdSql.checkRows(0)
|
||||
tdLog.info("check db2 vgroups 2 limit 1 offset 100 successfully!")
|
||||
|
||||
|
||||
# db1
|
||||
tdSql.execute("create database db1 vgroups 1;")
|
||||
tdSql.execute("use db1;")
|
||||
tdSql.execute("create table st(ts timestamp, age int) tags(area int);")
|
||||
tdSql.execute("create table t1 using st tags(1);")
|
||||
tdSql.query("select distinct(tbname) from st limit 1 offset 100;")
|
||||
tdSql.checkRows(0)
|
||||
tdLog.info("check db1 vgroups 1 limit 1 offset 100 successfully!")
|
||||
|
||||
|
||||
def run(self):
|
||||
# tdSql.prepare()
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1()
|
||||
|
||||
# one vgroup diff more than one vgroup check
|
||||
self.checkVGroups()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -244,7 +244,7 @@ class ClusterComCheck:
|
|||
else:
|
||||
tdLog.exit(f"vgroup number of {db_name} is not correct")
|
||||
if self.db_replica == 1 :
|
||||
if tdSql.queryResult[0][4] == 'leader' and tdSql.queryResult[1][4] == 'leader' and tdSql.queryResult[last_number][4] == 'leader':
|
||||
if tdSql.queryResult[0][4] == 'leader' and tdSql.queryResult[last_number][4] == 'leader':
|
||||
tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';")
|
||||
print("db replica :",tdSql.queryResult[0][0])
|
||||
if tdSql.queryResult[0][0] == db_replica:
|
||||
|
|
|
@ -15,6 +15,9 @@ from util.common import *
|
|||
from util.cluster import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
|
@ -195,6 +198,9 @@ class TDTestCase:
|
|||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -15,6 +15,9 @@ from util.common import *
|
|||
from util.cluster import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
|
@ -196,6 +199,9 @@ class TDTestCase:
|
|||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -16,6 +16,11 @@ from util.cluster import *
|
|||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.vgroups = 1
|
||||
|
@ -194,6 +199,9 @@ class TDTestCase:
|
|||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -16,6 +16,13 @@ from util.cluster import *
|
|||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.vgroups = 1
|
||||
|
@ -185,7 +192,7 @@ class TDTestCase:
|
|||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
|
||||
if expectrowcnt / 2 >= resultList[0] or expectrowcnt < resultList[0]:
|
||||
if expectrowcnt / 2 >= resultList[0]:
|
||||
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0]))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
||||
|
@ -195,6 +202,8 @@ class TDTestCase:
|
|||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -16,6 +16,11 @@ from util.cluster import *
|
|||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.vgroups = 1
|
||||
|
@ -195,6 +200,8 @@ class TDTestCase:
|
|||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -80,6 +80,7 @@ typedef struct {
|
|||
#ifdef WEBSOCKET
|
||||
bool restful;
|
||||
bool cloud;
|
||||
bool local;
|
||||
char* dsn;
|
||||
int32_t timeout;
|
||||
#endif
|
||||
|
|
|
@ -410,7 +410,7 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) {
|
|||
shellInitArgs(argc, argv);
|
||||
shell.info.clientVersion =
|
||||
"Welcome to the %s Command Line Interface, Client Version:%s\r\n"
|
||||
"Copyright (c) 2022 by %s, all rights reserved.\r\n\r\n";
|
||||
"Copyright (c) 2023 by %s, all rights reserved.\r\n\r\n";
|
||||
#ifdef CUS_NAME
|
||||
strcpy(shell.info.cusName, CUS_NAME);
|
||||
#else
|
||||
|
|
|
@ -83,6 +83,11 @@ SWords shellCommands[] = {
|
|||
{"alter local \"asynclog\" \"1\";", 0, 0, NULL},
|
||||
{"alter topic", 0, 0, NULL},
|
||||
{"alter user <user_name> <user_actions> <anyword> ;", 0, 0, NULL},
|
||||
#ifdef TD_ENTERPRISE
|
||||
{"balance vgroup;", 0, 0, NULL},
|
||||
{"balance vgroup leader <vgroup_id>", 0, 0, NULL},
|
||||
#endif
|
||||
|
||||
// 20
|
||||
{"create table <anyword> using <stb_name> tags(", 0, 0, NULL},
|
||||
{"create database <anyword> <db_options> <anyword> <db_options> <anyword> <db_options> <anyword> <db_options> "
|
||||
|
@ -127,9 +132,12 @@ SWords shellCommands[] = {
|
|||
{"kill query ", 0, 0, NULL},
|
||||
{"kill transaction ", 0, 0, NULL},
|
||||
#ifdef TD_ENTERPRISE
|
||||
{"merge vgroup ", 0, 0, NULL},
|
||||
{"merge vgroup <vgroup_id> <vgroup_id>", 0, 0, NULL},
|
||||
#endif
|
||||
{"pause stream <stream_name> ;", 0, 0, NULL},
|
||||
#ifdef TD_ENTERPRISE
|
||||
{"redistribute vgroup <vgroup_id> dnode <dnode_id> ;", 0, 0, NULL},
|
||||
#endif
|
||||
{"resume stream <stream_name> ;", 0, 0, NULL},
|
||||
{"reset query cache;", 0, 0, NULL},
|
||||
{"restore dnode <dnode_id> ;", 0, 0, NULL},
|
||||
|
@ -187,7 +195,7 @@ SWords shellCommands[] = {
|
|||
{"show consumers;", 0, 0, NULL},
|
||||
{"show grants;", 0, 0, NULL},
|
||||
#ifdef TD_ENTERPRISE
|
||||
{"split vgroup ", 0, 0, NULL},
|
||||
{"split vgroup <vgroup_id>", 0, 0, NULL},
|
||||
#endif
|
||||
{"insert into <tb_name> values(", 0, 0, NULL},
|
||||
{"insert into <tb_name> using <stb_name> tags(", 0, 0, NULL},
|
||||
|
@ -268,7 +276,9 @@ char* db_options[] = {"keep ",
|
|||
"wal_retention_size ",
|
||||
"wal_segment_size "};
|
||||
|
||||
char* alter_db_options[] = {"cachemodel ", "replica ", "keep ", "cachesize ", "wal_fsync_period ", "wal_level "};
|
||||
char* alter_db_options[] = {"cachemodel ", "replica ", "keep ", "stt_trigger ",
|
||||
"wal_retention_period ", "wal_retention_size ", "cachesize ",
|
||||
"wal_fsync_period ", "buffer ", "pages " ,"wal_level "};
|
||||
|
||||
char* data_types[] = {"timestamp", "int",
|
||||
"int unsigned", "varchar(16)",
|
||||
|
@ -312,26 +322,27 @@ bool waitAutoFill = false;
|
|||
#define WT_VAR_TOPIC 5
|
||||
#define WT_VAR_STREAM 6
|
||||
#define WT_VAR_UDFNAME 7
|
||||
#define WT_VAR_VGROUPID 8
|
||||
|
||||
#define WT_FROM_DB_MAX 7 // max get content from db
|
||||
#define WT_FROM_DB_MAX 8 // max get content from db
|
||||
#define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1)
|
||||
|
||||
#define WT_VAR_ALLTABLE 8
|
||||
#define WT_VAR_FUNC 9
|
||||
#define WT_VAR_KEYWORD 10
|
||||
#define WT_VAR_TBACTION 11
|
||||
#define WT_VAR_DBOPTION 12
|
||||
#define WT_VAR_ALTER_DBOPTION 13
|
||||
#define WT_VAR_DATATYPE 14
|
||||
#define WT_VAR_KEYTAGS 15
|
||||
#define WT_VAR_ANYWORD 16
|
||||
#define WT_VAR_TBOPTION 17
|
||||
#define WT_VAR_USERACTION 18
|
||||
#define WT_VAR_KEYSELECT 19
|
||||
#define WT_VAR_SYSTABLE 20
|
||||
#define WT_VAR_LANGUAGE 21
|
||||
#define WT_VAR_ALLTABLE 9
|
||||
#define WT_VAR_FUNC 10
|
||||
#define WT_VAR_KEYWORD 11
|
||||
#define WT_VAR_TBACTION 12
|
||||
#define WT_VAR_DBOPTION 13
|
||||
#define WT_VAR_ALTER_DBOPTION 14
|
||||
#define WT_VAR_DATATYPE 15
|
||||
#define WT_VAR_KEYTAGS 16
|
||||
#define WT_VAR_ANYWORD 17
|
||||
#define WT_VAR_TBOPTION 18
|
||||
#define WT_VAR_USERACTION 19
|
||||
#define WT_VAR_KEYSELECT 20
|
||||
#define WT_VAR_SYSTABLE 21
|
||||
#define WT_VAR_LANGUAGE 22
|
||||
|
||||
#define WT_VAR_CNT 22
|
||||
#define WT_VAR_CNT 23
|
||||
|
||||
|
||||
#define WT_TEXT 0xFF
|
||||
|
@ -345,11 +356,11 @@ TdThread* threads[WT_FROM_DB_CNT];
|
|||
// obtain var name with sql from server
|
||||
char varTypes[WT_VAR_CNT][64] = {
|
||||
"<db_name>", "<stb_name>", "<tb_name>", "<dnode_id>", "<user_name>", "<topic_name>", "<stream_name>",
|
||||
"<udf_name>", "<all_table>", "<function>", "<keyword>", "<tb_actions>", "<db_options>", "<alter_db_options>",
|
||||
"<udf_name>", "<vgroup_id>", "<all_table>", "<function>", "<keyword>", "<tb_actions>", "<db_options>", "<alter_db_options>",
|
||||
"<data_types>", "<key_tags>", "<anyword>", "<tb_options>", "<user_actions>", "<key_select>", "<sys_table>", "<udf_language>"};
|
||||
|
||||
char varSqls[WT_FROM_DB_CNT][64] = {"show databases;", "show stables;", "show tables;", "show dnodes;",
|
||||
"show users;", "show topics;", "show streams;", "show functions;"};
|
||||
"show users;", "show topics;", "show streams;", "show functions;", "show vgroups;"};
|
||||
|
||||
// var words current cursor, if user press any one key except tab, cursorVar can be reset to -1
|
||||
int cursorVar = -1;
|
||||
|
@ -520,7 +531,10 @@ void showHelp() {
|
|||
printf(
|
||||
"\n\n\
|
||||
----- special commands on enterpise version ----- \n\
|
||||
balance vgroup; \n\
|
||||
balance vgroup leader <vgroup_id> \n\
|
||||
compact database <db_name>; \n\
|
||||
redistribute vgroup <vgroup_id> dnode <dnode_id> ;\n\
|
||||
split vgroup <vgroup_id>;");
|
||||
#endif
|
||||
|
||||
|
@ -675,9 +689,9 @@ bool shellAutoInit() {
|
|||
// generate varType
|
||||
GenerateVarType(WT_VAR_FUNC, functions, sizeof(functions) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_KEYWORD, keywords, sizeof(keywords) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_TBACTION, tb_actions, sizeof(tb_actions) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_DBOPTION, db_options, sizeof(db_options) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_ALTER_DBOPTION, alter_db_options, sizeof(alter_db_options) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_TBACTION, tb_actions, sizeof(tb_actions) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_DATATYPE, data_types, sizeof(data_types) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_KEYTAGS, key_tags, sizeof(key_tags) / sizeof(char*));
|
||||
GenerateVarType(WT_VAR_TBOPTION, tb_options, sizeof(tb_options) / sizeof(char*));
|
||||
|
|
|
@ -62,6 +62,8 @@ static void shellCleanup(void *arg);
|
|||
static void *shellCancelHandler(void *arg);
|
||||
static void *shellThreadLoop(void *arg);
|
||||
|
||||
static bool shellCmdkilled = false;
|
||||
|
||||
bool shellIsEmptyCommand(const char *cmd) {
|
||||
for (char c = *cmd++; c != 0; c = *cmd++) {
|
||||
if (c != ' ' && c != '\t' && c != ';') {
|
||||
|
@ -72,6 +74,8 @@ bool shellIsEmptyCommand(const char *cmd) {
|
|||
}
|
||||
|
||||
int32_t shellRunSingleCommand(char *command) {
|
||||
shellCmdkilled = false;
|
||||
|
||||
if (shellIsEmptyCommand(command)) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -257,7 +261,8 @@ void shellRunSingleCommandImp(char *command) {
|
|||
if (error_no == 0) {
|
||||
printf("Query OK, %"PRId64 " row(s) in set (%.6fs)\r\n", numOfRows, (et - st) / 1E6);
|
||||
} else {
|
||||
printf("Query interrupted (%s), %"PRId64 " row(s) in set (%.6fs)\r\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6);
|
||||
terrno = error_no;
|
||||
printf("Query interrupted (%s), %"PRId64 " row(s) in set (%.6fs)\r\n", taos_errstr(NULL), numOfRows, (et - st) / 1E6);
|
||||
}
|
||||
taos_free_result(pSql);
|
||||
} else {
|
||||
|
@ -952,7 +957,11 @@ void shellDumpResultCallback(void *param, TAOS_RES *tres, int num_of_rows) {
|
|||
}
|
||||
}
|
||||
dump_info->numOfAllRows += num_of_rows;
|
||||
if (!shellCmdkilled) {
|
||||
taos_fetch_rows_a(tres, shellDumpResultCallback, param);
|
||||
} else {
|
||||
tsem_post(&dump_info->sem);
|
||||
}
|
||||
} else {
|
||||
if (num_of_rows < 0) {
|
||||
printf("\033[31masync retrieve failed, code: %d\033[0m\n", num_of_rows);
|
||||
|
@ -967,13 +976,15 @@ int64_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool ver
|
|||
num_of_rows = shellDumpResultToFile(fname, tres);
|
||||
} else {
|
||||
tsDumpInfo dump_info;
|
||||
if (!shellCmdkilled) {
|
||||
init_dump_info(&dump_info, tres, sql, vertical);
|
||||
taos_fetch_rows_a(tres, shellDumpResultCallback, &dump_info);
|
||||
tsem_wait(&dump_info.sem);
|
||||
num_of_rows = dump_info.numOfAllRows;
|
||||
}
|
||||
}
|
||||
|
||||
*error_no = taos_errno(tres);
|
||||
*error_no = shellCmdkilled ? TSDB_CODE_TSC_QUERY_KILLED : taos_errno(tres);
|
||||
return num_of_rows;
|
||||
}
|
||||
|
||||
|
@ -1197,6 +1208,7 @@ void *shellCancelHandler(void *arg) {
|
|||
} else {
|
||||
#endif
|
||||
if (shell.conn) {
|
||||
shellCmdkilled = true;
|
||||
taos_kill_query(shell.conn);
|
||||
}
|
||||
#ifdef WEBSOCKET
|
||||
|
|
|
@ -47,6 +47,7 @@ int main(int argc, char *argv[]) {
|
|||
#ifdef WEBSOCKET
|
||||
shell.args.timeout = SHELL_WS_TIMEOUT;
|
||||
shell.args.cloud = true;
|
||||
shell.args.local = false;
|
||||
#endif
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
|
|
|
@ -130,11 +130,21 @@ void shellCheckConnectMode() {
|
|||
}
|
||||
if (shell.args.cloud) {
|
||||
shell.args.dsn = getenv("TDENGINE_CLOUD_DSN");
|
||||
if (shell.args.dsn) {
|
||||
if (shell.args.dsn && strlen(shell.args.dsn) > 4) {
|
||||
shell.args.cloud = true;
|
||||
shell.args.local = false;
|
||||
shell.args.restful = false;
|
||||
return;
|
||||
}
|
||||
|
||||
shell.args.dsn = getenv("TDENGINE_DSN");
|
||||
if (shell.args.dsn && strlen(shell.args.dsn) > 4) {
|
||||
shell.args.cloud = true;
|
||||
shell.args.local = true;
|
||||
shell.args.restful = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (shell.args.restful) {
|
||||
if (!shell.args.host) {
|
||||
shell.args.host = "localhost";
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue