other:merge 3.0
This commit is contained in:
commit
5a2c583c32
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.0.1.2")
|
SET(TD_VER_NUMBER "3.0.1.3")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG f03c09a
|
GIT_TAG 70f5a1c
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -37,7 +37,8 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
||||||
- All the data in `tag_set` will be converted to nchar type automatically .
|
- All the data in `tag_set` will be converted to nchar type automatically .
|
||||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||||
|
- You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
|
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||||
|
@ -64,3 +65,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
||||||
<CLine />
|
<CLine />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
## Query Examples
|
||||||
|
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query sql:
|
||||||
|
select * from `meters.voltage` where location="California.LosAngeles" and groupid=2
|
||||||
|
|
|
@ -31,7 +31,7 @@ For example:
|
||||||
```txt
|
```txt
|
||||||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||||
```
|
```
|
||||||
|
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify chile table names, for example, `smlChildTableName=tname`. You can insert `meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
@ -79,3 +79,6 @@ taos> select tbname, * from `meters.current`;
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.005399s)
|
Query OK, 4 row(s) in set (0.005399s)
|
||||||
```
|
```
|
||||||
|
## Query Examples
|
||||||
|
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query sql:
|
||||||
|
select * from `meters.voltage` where location="California.LosAngeles" and groupid=3
|
||||||
|
|
|
@ -48,7 +48,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
||||||
|
|
||||||
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
|
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
|
||||||
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
||||||
|
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify chile table names, for example, `smlChildTableName=tname`. You can insert `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
@ -94,3 +94,6 @@ taos> select * from `meters.current`;
|
||||||
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.004076s)
|
Query OK, 2 row(s) in set (0.004076s)
|
||||||
```
|
```
|
||||||
|
## Query Examples
|
||||||
|
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query sql:
|
||||||
|
select * from `meters.voltage` where location="California.LosAngeles" and groupid=1
|
||||||
|
|
|
@ -867,6 +867,7 @@ INTERP(expr)
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
||||||
- Interpolation is performed based on `FILL` parameter.
|
- Interpolation is performed based on `FILL` parameter.
|
||||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||||
|
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,15 @@ TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide acce
|
||||||
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
|
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
|
||||||
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
|
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
|
||||||
|
|
||||||
Note: SHOW statements are still supported for the convenience of existing users.
|
:::info
|
||||||
|
|
||||||
|
- SHOW statements are still supported for the convenience of existing users.
|
||||||
|
- Some columns in the system table may be keywords, and you need to use the escape character '\`' when querying, for example, to query the VGROUPS in the database `test`:
|
||||||
|
```sql
|
||||||
|
select `vgroups` from ins_databases where name = 'test';
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
||||||
|
|
||||||
|
@ -102,7 +110,11 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||||
| 24 | wal_retention_period | INT | WAL retention period |
|
| 24 | wal_retention_period | INT | WAL retention period |
|
||||||
| 25 | wal_retention_size | INT | Maximum WAL size |
|
| 25 | wal_retention_size | INT | Maximum WAL size |
|
||||||
| 26 | wal_roll_period | INT | WAL rotation period |
|
| 26 | wal_roll_period | INT | WAL rotation period |
|
||||||
| 27 | wal_segment_size | WAL file size |
|
| 27 | wal_segment_size | BIGINT | WAL file size |
|
||||||
|
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging |
|
||||||
|
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name |
|
||||||
|
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name |
|
||||||
|
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB |
|
||||||
|
|
||||||
## INS_FUNCTIONS
|
## INS_FUNCTIONS
|
||||||
|
|
||||||
|
|
|
@ -177,12 +177,21 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- | --- |
|
| -------- | -------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The maximum number of distinct rows returned |
|
| Meaning | The maximum number of distinct rows returned |
|
||||||
| Value Range | [100,000 - 100,000,000] |
|
| Value Range | [100,000 - 100,000,000] |
|
||||||
| Default Value | 100,000 |
|
| Default Value | 100,000 |
|
||||||
|
|
||||||
|
### keepColumnName
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| Applicable | Client only |
|
||||||
|
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||||
|
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||||
|
| Default Value | 0 |
|
||||||
|
|
||||||
## Locale Parameters
|
## Locale Parameters
|
||||||
|
|
||||||
### timezone
|
### timezone
|
||||||
|
|
|
@ -47,9 +47,8 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
||||||
|
|
||||||
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
||||||
|
|
||||||
For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label
|
For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
||||||
is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`.
|
||||||
is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
|
|
||||||
|
|
||||||
```json
|
```json
|
||||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
||||||
|
@ -69,7 +68,7 @@ Schemaless writes process row data according to the following principles.
|
||||||
|
|
||||||
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
|
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
|
||||||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
|
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
|
||||||
You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
|
|
||||||
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
||||||
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
||||||
|
@ -78,7 +77,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
|
||||||
NULL.
|
NULL.
|
||||||
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
||||||
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
||||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur.
|
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||||
|
|
|
@ -6,6 +6,10 @@ description: TDengine release history, Release Notes and download links.
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.1.3
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.3" />
|
||||||
|
|
||||||
## 3.0.1.2
|
## 3.0.1.2
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.1.2" />
|
<Release type="tdengine" version="3.0.1.2" />
|
||||||
|
|
|
@ -6,6 +6,10 @@ description: taosTools release history, Release Notes, download links.
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.2.3
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.3" />
|
||||||
|
|
||||||
## 2.2.2
|
## 2.2.2
|
||||||
|
|
||||||
<Release type="tools" version="2.2.2" />
|
<Release type="tools" version="2.2.2" />
|
||||||
|
|
|
@ -37,7 +37,9 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
||||||
- tag_set 中的所有的数据自动转化为 nchar 数据类型;
|
- tag_set 中的所有的数据自动转化为 nchar 数据类型;
|
||||||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 float 类型的数值 1.2, 如果不带类型后缀会被当作 double 处理;
|
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 float 类型的数值 1.2, 如果不带类型后缀会被当作 double 处理;
|
||||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
|
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
|
||||||
|
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3之后的版本 smlDataFormat 默认为 false) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||||
|
- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||||
|
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||||
|
@ -64,3 +66,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
||||||
<CLine />
|
<CLine />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
## 查询示例
|
||||||
|
比如查询 location=California.LosAngeles,groupid=2 子表的数据可以通过如下sql:
|
||||||
|
select * from meters where location=California.LosAngeles and groupid=2
|
||||||
|
|
|
@ -32,6 +32,8 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
|
||||||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||||
|
举例如下:配置 smlChildTableName=tname 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||||
参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
||||||
|
|
||||||
## 示例代码
|
## 示例代码
|
||||||
|
@ -79,3 +81,6 @@ taos> select tbname, * from `meters.current`;
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.005399s)
|
Query OK, 4 row(s) in set (0.005399s)
|
||||||
```
|
```
|
||||||
|
## 查询示例:
|
||||||
|
想要查询 location=California.LosAngeles groupid=3 的数据,可以通过如下sql:
|
||||||
|
select * from `meters.voltage` where location="California.LosAngeles" and groupid=3
|
||||||
|
|
|
@ -48,7 +48,8 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
||||||
|
|
||||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
|
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
|
||||||
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
|
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
|
||||||
|
- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||||
|
举例如下:配置 smlChildTableName=tname 插入数据为 "tags": { "host": "web02","dc": "lga","tname":"cpu1"} 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## 示例代码
|
## 示例代码
|
||||||
|
@ -94,3 +95,7 @@ taos> select * from `meters.current`;
|
||||||
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.004076s)
|
Query OK, 2 row(s) in set (0.004076s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 查询示例
|
||||||
|
想要查询"tags": {"location": "California.LosAngeles", "groupid": 1} 的数据,可以通过如下sql:
|
||||||
|
select * from `meters.voltage` where location="California.LosAngeles" and groupid=1
|
||||||
|
|
|
@ -868,6 +868,7 @@ INTERP(expr)
|
||||||
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
|
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
|
||||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
||||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||||
|
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
||||||
4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响
|
4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响
|
||||||
5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表
|
5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表
|
||||||
|
|
||||||
Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
|
:::info
|
||||||
|
|
||||||
|
- 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
|
||||||
|
- 系统表中的一些列可能是关键字,在查询时需要使用转义符'\`',例如查询数据库 test 有几个 VGROUP:
|
||||||
|
```sql
|
||||||
|
select `vgroups` from ins_databases where name = 'test';
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。
|
本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。
|
||||||
|
|
||||||
|
@ -103,7 +111,11 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
|
||||||
| 24 | wal_retention_period | INT | WAL 的保存时长 |
|
| 24 | wal_retention_period | INT | WAL 的保存时长 |
|
||||||
| 25 | wal_retention_size | INT | WAL 的保存上限 |
|
| 25 | wal_retention_size | INT | WAL 的保存上限 |
|
||||||
| 26 | wal_roll_period | INT | wal 文件切换时长 |
|
| 26 | wal_roll_period | INT | wal 文件切换时长 |
|
||||||
| 27 | wal_segment_size | wal 单个文件大小 |
|
| 27 | wal_segment_size | BIGINT | wal 单个文件大小 |
|
||||||
|
| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数 |
|
||||||
|
| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度 |
|
||||||
|
| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度 |
|
||||||
|
| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小 |
|
||||||
|
|
||||||
## INS_FUNCTIONS
|
## INS_FUNCTIONS
|
||||||
|
|
||||||
|
|
|
@ -177,12 +177,21 @@ taos --dump-config
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------- | --- |
|
| -------- | -------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 允许返回的 distinct 结果最大行数 |
|
| 含义 | 允许返回的 distinct 结果最大行数 |
|
||||||
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
|
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
|
||||||
| 缺省值 | 10 万 |
|
| 缺省值 | 10 万 |
|
||||||
|
|
||||||
|
### keepColumnName
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| 适用范围 | 仅客户端适用 |
|
||||||
|
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
|
||||||
|
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
||||||
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
## 区域相关
|
## 区域相关
|
||||||
|
|
||||||
### timezone
|
### timezone
|
||||||
|
|
|
@ -67,9 +67,12 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
||||||
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
|
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::tip
|
||||||
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
|
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
|
||||||
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
|
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
|
||||||
为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set,其他的会忽略)。
|
:::tip
|
||||||
|
为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||||
|
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||||
|
|
||||||
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
|
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
|
||||||
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
|
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
|
||||||
|
|
|
@ -6,6 +6,10 @@ description: TDengine 发布历史、Release Notes 及下载链接
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.1.3
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.3" />
|
||||||
|
|
||||||
## 3.0.1.2
|
## 3.0.1.2
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.1.2" />
|
<Release type="tdengine" version="3.0.1.2" />
|
||||||
|
|
|
@ -6,6 +6,10 @@ description: taosTools 的发布历史、Release Notes 和下载链接
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.2.3
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.3" />
|
||||||
|
|
||||||
## 2.2.2
|
## 2.2.2
|
||||||
|
|
||||||
<Release type="tools" version="2.2.2" />
|
<Release type="tools" version="2.2.2" />
|
||||||
|
|
|
@ -55,11 +55,10 @@ extern int32_t tMsgDict[];
|
||||||
|
|
||||||
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
|
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
|
||||||
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
|
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
|
||||||
#define TMSG_INFO(TYPE) \
|
#define TMSG_INFO(TYPE) \
|
||||||
((TYPE) >= 0 && ((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || \
|
((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
|
||||||
(TYPE) < TDMT_SCH_MAX_MSG || (TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || \
|
(TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG) \
|
||||||
(TYPE) < TDMT_SYNC_MAX_MSG)) \
|
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
|
||||||
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
|
|
||||||
: 0
|
: 0
|
||||||
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
|
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
|
||||||
|
|
||||||
|
|
|
@ -239,6 +239,7 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_FILL,
|
QUERY_NODE_PHYSICAL_PLAN_FILL,
|
||||||
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
|
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
|
||||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
|
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
|
||||||
|
|
|
@ -464,6 +464,8 @@ typedef struct SFillPhysiNode {
|
||||||
EOrder inputTsOrder;
|
EOrder inputTsOrder;
|
||||||
} SFillPhysiNode;
|
} SFillPhysiNode;
|
||||||
|
|
||||||
|
typedef SFillPhysiNode SStreamFillPhysiNode;
|
||||||
|
|
||||||
typedef struct SMultiTableIntervalPhysiNode {
|
typedef struct SMultiTableIntervalPhysiNode {
|
||||||
SIntervalPhysiNode interval;
|
SIntervalPhysiNode interval;
|
||||||
SNodeList* pPartitionKeys;
|
SNodeList* pPartitionKeys;
|
||||||
|
|
|
@ -31,6 +31,7 @@ typedef struct {
|
||||||
TDB* db;
|
TDB* db;
|
||||||
TTB* pStateDb;
|
TTB* pStateDb;
|
||||||
TTB* pFuncStateDb;
|
TTB* pFuncStateDb;
|
||||||
|
TTB* pFillStateDb; // todo refactor
|
||||||
TXN txn;
|
TXN txn;
|
||||||
} SStreamState;
|
} SStreamState;
|
||||||
|
|
||||||
|
@ -51,15 +52,22 @@ int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
|
||||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
||||||
|
|
||||||
|
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||||
|
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
|
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
|
||||||
|
|
||||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||||
void streamFreeVal(void* val);
|
void streamFreeVal(void* val);
|
||||||
|
|
||||||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
||||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
|
||||||
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||||
|
SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
||||||
void streamStateFreeCur(SStreamStateCur* pCur);
|
void streamStateFreeCur(SStreamStateCur* pCur);
|
||||||
|
|
||||||
|
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||||
|
|
||||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=Nginx For TDengine Service
|
|
||||||
After=network-online.target
|
|
||||||
Wants=network-online.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=forking
|
|
||||||
PIDFile=/usr/local/nginxd/logs/nginx.pid
|
|
||||||
ExecStart=/usr/local/nginxd/sbin/nginx
|
|
||||||
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
|
|
||||||
TimeoutStopSec=1000000s
|
|
||||||
LimitNOFILE=infinity
|
|
||||||
LimitNPROC=infinity
|
|
||||||
LimitCORE=infinity
|
|
||||||
TimeoutStartSec=0
|
|
||||||
StandardOutput=null
|
|
||||||
Restart=always
|
|
||||||
StartLimitBurst=3
|
|
||||||
StartLimitInterval=60s
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -38,8 +38,6 @@ temp_version=""
|
||||||
fin_result=""
|
fin_result=""
|
||||||
|
|
||||||
service_config_dir="/etc/systemd/system"
|
service_config_dir="/etc/systemd/system"
|
||||||
nginx_port=6060
|
|
||||||
nginx_dir="/usr/local/nginxd"
|
|
||||||
|
|
||||||
# Color setting
|
# Color setting
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
|
@ -132,10 +130,7 @@ function check_main_path() {
|
||||||
check_file ${install_main_dir} $i
|
check_file ${install_main_dir} $i
|
||||||
done
|
done
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
|
check_file ${install_main_dir} "share/admin"
|
||||||
for i in "${nginx_main_dir[@]}";do
|
|
||||||
check_file ${nginx_dir} $i
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
echo -e "Check main path:\033[32mOK\033[0m!"
|
echo -e "Check main path:\033[32mOK\033[0m!"
|
||||||
}
|
}
|
||||||
|
@ -150,9 +145,6 @@ function check_bin_path() {
|
||||||
for i in "${lbin_dir[@]}";do
|
for i in "${lbin_dir[@]}";do
|
||||||
check_link ${bin_link_dir}/$i
|
check_link ${bin_link_dir}/$i
|
||||||
done
|
done
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
check_file ${nginx_dir}/sbin nginx
|
|
||||||
fi
|
|
||||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,8 +50,7 @@ install_main_dir=${installDir}
|
||||||
bin_dir="${installDir}/bin"
|
bin_dir="${installDir}/bin"
|
||||||
|
|
||||||
service_config_dir="/etc/systemd/system"
|
service_config_dir="/etc/systemd/system"
|
||||||
nginx_port=6060
|
web_port=6041
|
||||||
nginx_dir="/usr/local/nginxd"
|
|
||||||
|
|
||||||
# Color setting
|
# Color setting
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
|
@ -182,7 +181,7 @@ function install_main_path() {
|
||||||
${csudo}mkdir -p ${install_main_dir}/include
|
${csudo}mkdir -p ${install_main_dir}/include
|
||||||
# ${csudo}mkdir -p ${install_main_dir}/init.d
|
# ${csudo}mkdir -p ${install_main_dir}/init.d
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
${csudo}mkdir -p ${nginx_dir}
|
${csudo}mkdir -p ${install_main_dir}/share
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -e ${script_dir}/email ]]; then
|
if [[ -e ${script_dir}/email ]]; then
|
||||||
|
@ -218,12 +217,6 @@ function install_bin() {
|
||||||
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/*
|
|
||||||
${csudo}mkdir -p ${nginx_dir}/logs
|
|
||||||
${csudo}chmod 777 ${nginx_dir}/sbin/nginx
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_lib() {
|
function install_lib() {
|
||||||
|
@ -574,6 +567,13 @@ function install_examples() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function install_web() {
|
||||||
|
if [ -d "${script_dir}/share" ]; then
|
||||||
|
${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
if pidof ${serverName} &>/dev/null; then
|
if pidof ${serverName} &>/dev/null; then
|
||||||
${csudo}service ${serverName} stop || :
|
${csudo}service ${serverName} stop || :
|
||||||
|
@ -654,16 +654,6 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||||
${csudo}rm -f ${tarbitratord_service_config}
|
${csudo}rm -f ${tarbitratord_service_config}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
|
||||||
if systemctl is-active --quiet nginxd; then
|
|
||||||
echo "Nginx for ${productName} is running, stopping it..."
|
|
||||||
${csudo}systemctl stop nginxd &>/dev/null || echo &>/dev/null
|
|
||||||
fi
|
|
||||||
${csudo}systemctl disable nginxd &>/dev/null || echo &>/dev/null
|
|
||||||
${csudo}rm -f ${nginx_service_config}
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_service_on_systemd() {
|
function install_service_on_systemd() {
|
||||||
|
@ -677,19 +667,6 @@ function install_service_on_systemd() {
|
||||||
${csudo}systemctl enable ${serverName}
|
${csudo}systemctl enable ${serverName}
|
||||||
|
|
||||||
${csudo}systemctl daemon-reload
|
${csudo}systemctl daemon-reload
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
[ -f ${script_dir}/cfg/nginxd.service ] &&
|
|
||||||
${csudo}cp ${script_dir}/cfg/nginxd.service \
|
|
||||||
${service_config_dir}/ || :
|
|
||||||
${csudo}systemctl daemon-reload
|
|
||||||
|
|
||||||
if ! ${csudo}systemctl enable nginxd &>/dev/null; then
|
|
||||||
${csudo}systemctl daemon-reexec
|
|
||||||
${csudo}systemctl enable nginxd
|
|
||||||
fi
|
|
||||||
${csudo}systemctl start nginxd
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_adapter_service() {
|
function install_adapter_service() {
|
||||||
|
@ -793,19 +770,6 @@ function updateProduct() {
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
if pidof nginx &>/dev/null; then
|
|
||||||
if ((${service_mod} == 0)); then
|
|
||||||
${csudo}systemctl stop nginxd || :
|
|
||||||
elif ((${service_mod} == 1)); then
|
|
||||||
${csudo}service nginxd stop || :
|
|
||||||
else
|
|
||||||
kill_process nginx
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
install_main_path
|
install_main_path
|
||||||
|
|
||||||
install_log
|
install_log
|
||||||
|
@ -817,6 +781,7 @@ function updateProduct() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_examples
|
install_examples
|
||||||
|
install_web
|
||||||
if [ -z $1 ]; then
|
if [ -z $1 ]; then
|
||||||
install_bin
|
install_bin
|
||||||
install_service
|
install_service
|
||||||
|
@ -825,18 +790,6 @@ function updateProduct() {
|
||||||
install_adapter_config
|
install_adapter_config
|
||||||
|
|
||||||
openresty_work=false
|
openresty_work=false
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
# Check if openresty is installed
|
|
||||||
# Check if nginx is installed successfully
|
|
||||||
if type curl &>/dev/null; then
|
|
||||||
if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then
|
|
||||||
echo -e "\033[44;32;1mNginx for ${productName} is updated successfully!${NC}"
|
|
||||||
openresty_work=true
|
|
||||||
else
|
|
||||||
echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
||||||
|
@ -857,7 +810,7 @@ function updateProduct() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ${openresty_work} = 'true' ]; then
|
if [ ${openresty_work} = 'true' ]; then
|
||||||
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
|
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell${NC}"
|
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell${NC}"
|
||||||
fi
|
fi
|
||||||
|
@ -906,6 +859,7 @@ function installProduct() {
|
||||||
install_connector
|
install_connector
|
||||||
fi
|
fi
|
||||||
install_examples
|
install_examples
|
||||||
|
install_web
|
||||||
|
|
||||||
if [ -z $1 ]; then # install service and client
|
if [ -z $1 ]; then # install service and client
|
||||||
# For installing new
|
# For installing new
|
||||||
|
@ -915,17 +869,6 @@ function installProduct() {
|
||||||
install_adapter_config
|
install_adapter_config
|
||||||
|
|
||||||
openresty_work=false
|
openresty_work=false
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
# Check if nginx is installed successfully
|
|
||||||
if type curl &>/dev/null; then
|
|
||||||
if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then
|
|
||||||
echo -e "\033[44;32;1mNginx for ${productName} is installed successfully!${NC}"
|
|
||||||
openresty_work=true
|
|
||||||
else
|
|
||||||
echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
install_config
|
install_config
|
||||||
|
|
||||||
|
|
|
@ -151,6 +151,7 @@ function install_main_path() {
|
||||||
${csudo}mkdir -p ${install_main_dir}/driver
|
${csudo}mkdir -p ${install_main_dir}/driver
|
||||||
${csudo}mkdir -p ${install_main_dir}/examples
|
${csudo}mkdir -p ${install_main_dir}/examples
|
||||||
${csudo}mkdir -p ${install_main_dir}/include
|
${csudo}mkdir -p ${install_main_dir}/include
|
||||||
|
${csudo}mkdir -p ${install_main_dir}/share
|
||||||
# ${csudo}mkdir -p ${install_main_dir}/init.d
|
# ${csudo}mkdir -p ${install_main_dir}/init.d
|
||||||
else
|
else
|
||||||
${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || :
|
${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || :
|
||||||
|
@ -161,6 +162,7 @@ function install_main_path() {
|
||||||
${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver
|
${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver
|
||||||
${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples
|
${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples
|
||||||
${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include
|
${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include
|
||||||
|
${csudo}mkdir -p ${install_main_dir}/share || ${csudo}mkdir -p ${install_main_2_dir}/share
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -469,6 +471,16 @@ function install_examples() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function install_web() {
|
||||||
|
if [ -d "${binary_dir}/build/share" ]; then
|
||||||
|
if [ "$osType" != "Darwin" ]; then
|
||||||
|
${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || :
|
||||||
|
else
|
||||||
|
${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || ${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_2_dir}/share || :
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
if pidof ${serverName} &>/dev/null; then
|
if pidof ${serverName} &>/dev/null; then
|
||||||
${csudo}service ${serverName} stop || :
|
${csudo}service ${serverName} stop || :
|
||||||
|
@ -596,6 +608,7 @@ function update_TDengine() {
|
||||||
install_lib
|
install_lib
|
||||||
# install_connector
|
# install_connector
|
||||||
install_examples
|
install_examples
|
||||||
|
install_web
|
||||||
install_bin
|
install_bin
|
||||||
|
|
||||||
install_service
|
install_service
|
||||||
|
|
|
@ -107,7 +107,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_files="${script_dir}/install.sh"
|
install_files="${script_dir}/install.sh"
|
||||||
nginx_dir="${top_dir}/../enterprise/src/plugins/web"
|
web_dir="${top_dir}/../enterprise/src/plugins/web"
|
||||||
|
|
||||||
init_file_deb=${script_dir}/../deb/taosd
|
init_file_deb=${script_dir}/../deb/taosd
|
||||||
init_file_rpm=${script_dir}/../rpm/taosd
|
init_file_rpm=${script_dir}/../rpm/taosd
|
||||||
|
@ -132,10 +132,6 @@ if [ -f "${cfg_dir}/${serverName}.service" ]; then
|
||||||
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
|
|
||||||
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
||||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
||||||
|
@ -222,16 +218,6 @@ if [ "$verMode" == "cluster" ]; then
|
||||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
||||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||||
|
|
||||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
|
||||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
|
||||||
rm -rf ${install_dir}/nginxd/png
|
|
||||||
|
|
||||||
if [ "$cpuType" == "aarch64" ]; then
|
|
||||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
|
||||||
elif [ "$cpuType" == "aarch32" ]; then
|
|
||||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
|
||||||
fi
|
|
||||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd ${install_dir}
|
cd ${install_dir}
|
||||||
|
@ -288,6 +274,13 @@ if [[ $dbName == "taos" ]]; then
|
||||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Add web files
|
||||||
|
if [ -d "${web_dir}/admin" ]; then
|
||||||
|
mkdir -p ${install_dir}/share/
|
||||||
|
cp ${web_dir}/admin ${install_dir}/share/ -r
|
||||||
|
cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy driver
|
# Copy driver
|
||||||
|
|
|
@ -27,13 +27,11 @@ local_bin_link_dir="/usr/local/bin"
|
||||||
lib_link_dir="/usr/lib"
|
lib_link_dir="/usr/lib"
|
||||||
lib64_link_dir="/usr/lib64"
|
lib64_link_dir="/usr/lib64"
|
||||||
inc_link_dir="/usr/include"
|
inc_link_dir="/usr/include"
|
||||||
install_nginxd_dir="/usr/local/nginxd"
|
|
||||||
|
|
||||||
service_config_dir="/etc/systemd/system"
|
service_config_dir="/etc/systemd/system"
|
||||||
taos_service_name=${serverName}
|
taos_service_name=${serverName}
|
||||||
taosadapter_service_name="taosadapter"
|
taosadapter_service_name="taosadapter"
|
||||||
tarbitrator_service_name="tarbitratord"
|
tarbitrator_service_name="tarbitratord"
|
||||||
nginx_service_name="nginxd"
|
|
||||||
csudo=""
|
csudo=""
|
||||||
if command -v sudo >/dev/null; then
|
if command -v sudo >/dev/null; then
|
||||||
csudo="sudo "
|
csudo="sudo "
|
||||||
|
@ -153,18 +151,6 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
|
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
|
||||||
${csudo}rm -f ${tarbitratord_service_config}
|
${csudo}rm -f ${tarbitratord_service_config}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
|
||||||
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
|
|
||||||
if [ -d ${install_nginxd_dir} ]; then
|
|
||||||
if systemctl is-active --quiet ${nginx_service_name}; then
|
|
||||||
echo "Nginx for ${productName} is running, stopping it..."
|
|
||||||
${csudo}systemctl stop ${nginx_service_name} &>/dev/null || echo &>/dev/null
|
|
||||||
fi
|
|
||||||
${csudo}systemctl disable ${nginx_service_name} &>/dev/null || echo &>/dev/null
|
|
||||||
${csudo}rm -f ${nginx_service_config}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
|
@ -239,7 +225,6 @@ clean_config
|
||||||
${csudo}rm -rf ${data_link_dir} || :
|
${csudo}rm -rf ${data_link_dir} || :
|
||||||
|
|
||||||
${csudo}rm -rf ${install_main_dir}
|
${csudo}rm -rf ${install_main_dir}
|
||||||
${csudo}rm -rf ${install_nginxd_dir}
|
|
||||||
if [[ -e /etc/os-release ]]; then
|
if [[ -e /etc/os-release ]]; then
|
||||||
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||||
else
|
else
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include "functionMgt.h"
|
#include "functionMgt.h"
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
|
#include "qworker.h"
|
||||||
#include "scheduler.h"
|
#include "scheduler.h"
|
||||||
#include "tcache.h"
|
#include "tcache.h"
|
||||||
#include "tglobal.h"
|
#include "tglobal.h"
|
||||||
|
@ -27,7 +28,6 @@
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
#include "tsched.h"
|
#include "tsched.h"
|
||||||
#include "ttime.h"
|
#include "ttime.h"
|
||||||
#include "qworker.h"
|
|
||||||
|
|
||||||
#define TSC_VAR_NOT_RELEASE 1
|
#define TSC_VAR_NOT_RELEASE 1
|
||||||
#define TSC_VAR_RELEASED 0
|
#define TSC_VAR_RELEASED 0
|
||||||
|
@ -70,11 +70,10 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
||||||
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
||||||
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
|
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
|
||||||
|
|
||||||
int64_t nowUs = taosGetTimestampUs();
|
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
|
||||||
int64_t duration = nowUs - pRequest->metric.start;
|
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%.2f ms, "
|
||||||
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
|
"current:%d, app current:%d",
|
||||||
" ms, current:%d, app current:%d",
|
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
|
||||||
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
|
|
||||||
|
|
||||||
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
|
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
|
||||||
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
||||||
|
@ -85,11 +84,12 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
||||||
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
|
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
|
||||||
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
|
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
|
||||||
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
||||||
"us, planner:%" PRId64 "us, exec:%" PRId64 "us",
|
"us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%"PRIx64,
|
||||||
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
||||||
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
|
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
|
||||||
pRequest->metric.planEnd - pRequest->metric.semanticEnd,
|
pRequest->metric.planEnd - pRequest->metric.semanticEnd,
|
||||||
pRequest->metric.resultReady - pRequest->metric.planEnd);
|
pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId);
|
||||||
|
|
||||||
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
|
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -483,8 +483,8 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) {
|
||||||
|
|
||||||
int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SArray* pDbVgList) {
|
int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SArray* pDbVgList) {
|
||||||
SArray* nodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
|
SArray* nodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
|
||||||
char *policy = (tsQueryPolicy == QUERY_POLICY_VNODE) ? "vnode" : "client";
|
char* policy = (tsQueryPolicy == QUERY_POLICY_VNODE) ? "vnode" : "client";
|
||||||
|
|
||||||
int32_t dbNum = taosArrayGetSize(pDbVgList);
|
int32_t dbNum = taosArrayGetSize(pDbVgList);
|
||||||
for (int32_t i = 0; i < dbNum; ++i) {
|
for (int32_t i = 0; i < dbNum; ++i) {
|
||||||
SArray* pVg = taosArrayGetP(pDbVgList, i);
|
SArray* pVg = taosArrayGetP(pDbVgList, i);
|
||||||
|
@ -815,7 +815,7 @@ int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
|
||||||
|
|
||||||
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
||||||
if (NULL == pRequest->body.resInfo.execRes.res) {
|
if (NULL == pRequest->body.resInfo.execRes.res) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return pRequest->code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SCatalog* pCatalog = NULL;
|
SCatalog* pCatalog = NULL;
|
||||||
|
@ -868,10 +868,12 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//todo refacto the error code mgmt
|
||||||
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
||||||
SRequestObj* pRequest = (SRequestObj*)param;
|
SRequestObj* pRequest = (SRequestObj*)param;
|
||||||
pRequest->code = code;
|
STscObj* pTscObj = pRequest->pTscObj;
|
||||||
|
|
||||||
|
pRequest->code = code;
|
||||||
pRequest->metric.resultReady = taosGetTimestampUs();
|
pRequest->metric.resultReady = taosGetTimestampUs();
|
||||||
|
|
||||||
if (pResult) {
|
if (pResult) {
|
||||||
|
@ -879,33 +881,32 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
||||||
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
|
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_DELETE == pRequest->type ||
|
int32_t type = pRequest->type;
|
||||||
TDMT_VND_CREATE_TABLE == pRequest->type) {
|
if (TDMT_VND_SUBMIT == type || TDMT_VND_DELETE == type || TDMT_VND_CREATE_TABLE == type) {
|
||||||
if (pResult) {
|
if (pResult) {
|
||||||
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
|
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
|
||||||
if (TDMT_VND_SUBMIT == pRequest->type) {
|
|
||||||
STscObj* pTscObj = pRequest->pTscObj;
|
// record the insert rows
|
||||||
|
if (TDMT_VND_SUBMIT == type) {
|
||||||
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
|
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
|
||||||
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
|
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||||
|
|
||||||
pRequest->metric.execEnd = taosGetTimestampUs();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMemoryFree(pResult);
|
taosMemoryFree(pResult);
|
||||||
|
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||||
|
pRequest->requestId);
|
||||||
|
|
||||||
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
|
|
||||||
tstrerror(code), pRequest->requestId);
|
|
||||||
|
|
||||||
STscObj* pTscObj = pRequest->pTscObj;
|
|
||||||
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
|
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
|
||||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
|
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64,
|
||||||
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
pRequest->self, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||||
pRequest->prevCode = code;
|
pRequest->prevCode = code;
|
||||||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||||
|
qDestroyQuery(pRequest->pQuery);
|
||||||
|
pRequest->pQuery = NULL;
|
||||||
doAsyncQuery(pRequest, true);
|
doAsyncQuery(pRequest, true);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -915,7 +916,11 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
||||||
removeMeta(pTscObj, pRequest->targetTableList);
|
removeMeta(pTscObj, pRequest->targetTableList);
|
||||||
}
|
}
|
||||||
|
|
||||||
handleQueryExecRsp(pRequest);
|
pRequest->metric.execEnd = taosGetTimestampUs();
|
||||||
|
int32_t code1 = handleQueryExecRsp(pRequest);
|
||||||
|
if (pRequest->code == TSDB_CODE_SUCCESS && pRequest->code != code1) {
|
||||||
|
pRequest->code = code1;
|
||||||
|
}
|
||||||
|
|
||||||
// return to client
|
// return to client
|
||||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||||
|
|
|
@ -20,13 +20,13 @@
|
||||||
#include "functionMgt.h"
|
#include "functionMgt.h"
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
|
#include "qworker.h"
|
||||||
#include "scheduler.h"
|
#include "scheduler.h"
|
||||||
#include "tglobal.h"
|
#include "tglobal.h"
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
#include "tref.h"
|
#include "tref.h"
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
#include "version.h"
|
#include "version.h"
|
||||||
#include "qworker.h"
|
|
||||||
|
|
||||||
#define TSC_VAR_NOT_RELEASE 1
|
#define TSC_VAR_NOT_RELEASE 1
|
||||||
#define TSC_VAR_RELEASED 0
|
#define TSC_VAR_RELEASED 0
|
||||||
|
@ -700,6 +700,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
||||||
SQuery *pQuery = pRequest->pQuery;
|
SQuery *pQuery = pRequest->pQuery;
|
||||||
|
|
||||||
pRequest->metric.ctgEnd = taosGetTimestampUs();
|
pRequest->metric.ctgEnd = taosGetTimestampUs();
|
||||||
|
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||||
|
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
|
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
|
||||||
|
@ -723,13 +724,16 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
||||||
|
|
||||||
destorySqlParseWrapper(pWrapper);
|
destorySqlParseWrapper(pWrapper);
|
||||||
|
|
||||||
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, reqId:0x%" PRIx64, pRequest->self,
|
double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd)/1000.0;
|
||||||
pRequest->requestId);
|
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64,
|
||||||
|
pRequest->self, el, pRequest->requestId);
|
||||||
|
|
||||||
launchAsyncQuery(pRequest, pQuery, pResultMeta);
|
launchAsyncQuery(pRequest, pQuery, pResultMeta);
|
||||||
} else {
|
} else {
|
||||||
destorySqlParseWrapper(pWrapper);
|
destorySqlParseWrapper(pWrapper);
|
||||||
qDestroyQuery(pRequest->pQuery);
|
qDestroyQuery(pRequest->pQuery);
|
||||||
pRequest->pQuery = NULL;
|
pRequest->pQuery = NULL;
|
||||||
|
|
||||||
if (NEED_CLIENT_HANDLE_ERROR(code)) {
|
if (NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
|
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
|
||||||
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||||
|
|
|
@ -85,8 +85,7 @@ uint16_t tsTelemPort = 80;
|
||||||
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
||||||
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
|
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
|
||||||
// If set to empty system will generate table name using MD5 hash.
|
// If set to empty system will generate table name using MD5 hash.
|
||||||
bool tsSmlDataFormat =
|
bool tsSmlDataFormat = false; // true means that the name and order of cols in each line are the same(only for influx protocol)
|
||||||
true; // true means that the name and order of cols in each line are the same(only for influx protocol)
|
|
||||||
|
|
||||||
// query
|
// query
|
||||||
int32_t tsQueryPolicy = 1;
|
int32_t tsQueryPolicy = 1;
|
||||||
|
@ -206,7 +205,9 @@ static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *input
|
||||||
tstrncpy(cfgFile, cfgDir, sizeof(cfgDir));
|
tstrncpy(cfgFile, cfgDir, sizeof(cfgDir));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apolloUrl == NULL || apolloUrl[0] == '\0') cfgGetApollUrl(envCmd, envFile, apolloUrl);
|
if (apolloUrl != NULL && apolloUrl[0] == '\0') {
|
||||||
|
cfgGetApollUrl(envCmd, envFile, apolloUrl);
|
||||||
|
}
|
||||||
|
|
||||||
if (cfgLoad(pCfg, CFG_STYPE_APOLLO_URL, apolloUrl) != 0) {
|
if (cfgLoad(pCfg, CFG_STYPE_APOLLO_URL, apolloUrl) != 0) {
|
||||||
uError("failed to load from apollo url:%s since %s", apolloUrl, terrstr());
|
uError("failed to load from apollo url:%s since %s", apolloUrl, terrstr());
|
||||||
|
@ -1132,11 +1133,20 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
|
||||||
|
|
||||||
if (tsc) {
|
if (tsc) {
|
||||||
tsLogEmbedded = 0;
|
tsLogEmbedded = 0;
|
||||||
if (taosAddClientLogCfg(pCfg) != 0) return -1;
|
if (taosAddClientLogCfg(pCfg) != 0) {
|
||||||
|
cfgCleanup(pCfg);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tsLogEmbedded = 1;
|
tsLogEmbedded = 1;
|
||||||
if (taosAddClientLogCfg(pCfg) != 0) return -1;
|
if (taosAddClientLogCfg(pCfg) != 0) {
|
||||||
if (taosAddServerLogCfg(pCfg) != 0) return -1;
|
cfgCleanup(pCfg);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (taosAddServerLogCfg(pCfg) != 0) {
|
||||||
|
cfgCleanup(pCfg);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosLoadCfg(pCfg, envCmd, cfgDir, envFile, apolloUrl) != 0) {
|
if (taosLoadCfg(pCfg, envCmd, cfgDir, envFile, apolloUrl) != 0) {
|
||||||
|
|
|
@ -87,6 +87,7 @@ int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||||
return 0;
|
return 0;
|
||||||
default:
|
default:
|
||||||
terrno = TSDB_CODE_INVALID_PARA;
|
terrno = TSDB_CODE_INVALID_PARA;
|
||||||
|
taosFreeQitem(pMsg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,12 +135,14 @@ _OVER:
|
||||||
if (content != NULL) taosMemoryFree(content);
|
if (content != NULL) taosMemoryFree(content);
|
||||||
if (root != NULL) cJSON_Delete(root);
|
if (root != NULL) cJSON_Delete(root);
|
||||||
if (pFile != NULL) taosCloseFile(&pFile);
|
if (pFile != NULL) taosCloseFile(&pFile);
|
||||||
|
if (*ppCfgs == NULL && pCfgs != NULL) taosMemoryFree(pCfgs);
|
||||||
|
|
||||||
terrno = code;
|
terrno = code;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||||
|
int32_t ret = 0;
|
||||||
char file[PATH_MAX] = {0};
|
char file[PATH_MAX] = {0};
|
||||||
char realfile[PATH_MAX] = {0};
|
char realfile[PATH_MAX] = {0};
|
||||||
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
|
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
|
||||||
|
@ -161,13 +163,16 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||||
char *content = taosMemoryCalloc(1, maxLen + 1);
|
char *content = taosMemoryCalloc(1, maxLen + 1);
|
||||||
if (content == NULL) {
|
if (content == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
len += snprintf(content + len, maxLen - len, "{\n");
|
len += snprintf(content + len, maxLen - len, "{\n");
|
||||||
len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n");
|
len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n");
|
||||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||||
SVnodeObj *pVnode = pVnodes[i];
|
SVnodeObj *pVnode = pVnodes[i];
|
||||||
|
if (pVnode == NULL) continue;
|
||||||
|
|
||||||
len += snprintf(content + len, maxLen - len, " {\n");
|
len += snprintf(content + len, maxLen - len, " {\n");
|
||||||
len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", pVnode->vgId);
|
len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", pVnode->vgId);
|
||||||
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pVnode->dropped);
|
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pVnode->dropped);
|
||||||
|
@ -180,12 +185,13 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||||
}
|
}
|
||||||
len += snprintf(content + len, maxLen - len, " ]\n");
|
len += snprintf(content + len, maxLen - len, " ]\n");
|
||||||
len += snprintf(content + len, maxLen - len, "}\n");
|
len += snprintf(content + len, maxLen - len, "}\n");
|
||||||
|
terrno = 0;
|
||||||
|
|
||||||
|
_OVER:
|
||||||
taosWriteFile(pFile, content, len);
|
taosWriteFile(pFile, content, len);
|
||||||
taosFsyncFile(pFile);
|
taosFsyncFile(pFile);
|
||||||
taosCloseFile(&pFile);
|
taosCloseFile(&pFile);
|
||||||
taosMemoryFree(content);
|
taosMemoryFree(content);
|
||||||
terrno = 0;
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||||
SVnodeObj *pVnode = pVnodes[i];
|
SVnodeObj *pVnode = pVnodes[i];
|
||||||
|
@ -196,6 +202,8 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||||
taosMemoryFree(pVnodes);
|
taosMemoryFree(pVnodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ret != 0) return -1;
|
||||||
|
|
||||||
dDebug("successed to write %s, numOfVnodes:%d", realfile, numOfVnodes);
|
dDebug("successed to write %s, numOfVnodes:%d", realfile, numOfVnodes);
|
||||||
return taosRenameFile(file, realfile);
|
return taosRenameFile(file, realfile);
|
||||||
}
|
}
|
|
@ -175,7 +175,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
|
||||||
pCfg->hashSuffix = pCreate->hashSuffix;
|
pCfg->hashSuffix = pCreate->hashSuffix;
|
||||||
pCfg->tsdbPageSize = pCreate->tsdbPageSize * 1024;
|
pCfg->tsdbPageSize = pCreate->tsdbPageSize * 1024;
|
||||||
|
|
||||||
pCfg->standby = pCfg->standby;
|
pCfg->standby = 0;
|
||||||
pCfg->syncCfg.myIndex = pCreate->selfIndex;
|
pCfg->syncCfg.myIndex = pCreate->selfIndex;
|
||||||
pCfg->syncCfg.replicaNum = pCreate->replica;
|
pCfg->syncCfg.replicaNum = pCreate->replica;
|
||||||
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
|
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
|
||||||
|
|
|
@ -58,11 +58,14 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
||||||
|
|
||||||
if (pVnode->path == NULL) {
|
if (pVnode->path == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
taosMemoryFree(pVnode);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vmAllocQueue(pMgmt, pVnode) != 0) {
|
if (vmAllocQueue(pMgmt, pVnode) != 0) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
taosMemoryFree(pVnode->path);
|
||||||
|
taosMemoryFree(pVnode);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -221,6 +224,7 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) {
|
||||||
SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
|
SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||||
|
if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
|
||||||
vmCloseVnode(pMgmt, ppVnodes[i]);
|
vmCloseVnode(pMgmt, ppVnodes[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,7 +384,9 @@ static int32_t vmStartVnodes(SVnodeMgmt *pMgmt) {
|
||||||
for (int32_t v = 0; v < numOfVnodes; ++v) {
|
for (int32_t v = 0; v < numOfVnodes; ++v) {
|
||||||
int32_t t = v % threadNum;
|
int32_t t = v % threadNum;
|
||||||
SVnodeThread *pThread = &threads[t];
|
SVnodeThread *pThread = &threads[t];
|
||||||
pThread->ppVnodes[pThread->vnodeNum++] = ppVnodes[v];
|
if (pThread->ppVnodes != NULL) {
|
||||||
|
pThread->ppVnodes[pThread->vnodeNum++] = ppVnodes[v];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pMgmt->state.openVnodes = 0;
|
pMgmt->state.openVnodes = 0;
|
||||||
|
@ -411,8 +417,8 @@ static int32_t vmStartVnodes(SVnodeMgmt *pMgmt) {
|
||||||
taosMemoryFree(threads);
|
taosMemoryFree(threads);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||||
SVnodeObj *pVnode = ppVnodes[i];
|
if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
|
||||||
vmReleaseVnode(pMgmt, pVnode);
|
vmReleaseVnode(pMgmt, ppVnodes[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppVnodes != NULL) {
|
if (ppVnodes != NULL) {
|
||||||
|
|
|
@ -514,7 +514,7 @@ static void sdbCloseIter(SSdbIter *pIter) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pIter->name != NULL) {
|
if (pIter->name != NULL) {
|
||||||
taosRemoveFile(pIter->name);
|
(void)taosRemoveFile(pIter->name);
|
||||||
taosMemoryFree(pIter->name);
|
taosMemoryFree(pIter->name);
|
||||||
pIter->name = NULL;
|
pIter->name = NULL;
|
||||||
}
|
}
|
||||||
|
@ -606,6 +606,7 @@ int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
|
||||||
if (pIter->file == NULL) {
|
if (pIter->file == NULL) {
|
||||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
mError("failed to open %s since %s", pIter->name, terrstr());
|
mError("failed to open %s since %s", pIter->name, terrstr());
|
||||||
|
sdbCloseIter(pIter);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -636,9 +637,9 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
sdbCloseIter(pIter);
|
|
||||||
if (sdbReadFile(pSdb) != 0) {
|
if (sdbReadFile(pSdb) != 0) {
|
||||||
mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
|
mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
|
||||||
|
sdbCloseIter(pIter);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -656,6 +657,7 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
|
||||||
}
|
}
|
||||||
|
|
||||||
mInfo("sdbiter:%p, success applyed to sdb", pIter);
|
mInfo("sdbiter:%p, success applyed to sdb", pIter);
|
||||||
|
sdbCloseIter(pIter);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -662,7 +662,8 @@ typedef struct {
|
||||||
} SSkmInfo;
|
} SSkmInfo;
|
||||||
|
|
||||||
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
|
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
|
||||||
STimeWindow *pTimeWindow, SVersionRange *pVerRange, void *pBlockLoadInfo, bool destroyLoadInfo, const char *idStr);
|
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
|
||||||
|
bool destroyLoadInfo, const char *idStr);
|
||||||
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
|
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
|
||||||
bool tMergeTreeNext(SMergeTree *pMTree);
|
bool tMergeTreeNext(SMergeTree *pMTree);
|
||||||
TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
|
TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
|
||||||
|
|
|
@ -456,10 +456,10 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
|
||||||
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
|
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
|
|
||||||
struct SSttBlockLoadInfo* pLoadInfo = tCreateLastBlockLoadInfo(state->pTSchema, NULL, 0);
|
SSttBlockLoadInfo* pLoadInfo = tCreateLastBlockLoadInfo(state->pTSchema, NULL, 0);
|
||||||
tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid,
|
tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid,
|
||||||
&(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX},
|
&(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX},
|
||||||
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, pLoadInfo, true, NULL);
|
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, pLoadInfo,true, NULL);
|
||||||
bool hasVal = tMergeTreeNext(&state->mergeTree);
|
bool hasVal = tMergeTreeNext(&state->mergeTree);
|
||||||
if (!hasVal) {
|
if (!hasVal) {
|
||||||
state->state = SFSLASTNEXTROW_FILESET;
|
state->state = SFSLASTNEXTROW_FILESET;
|
||||||
|
|
|
@ -474,7 +474,8 @@ static FORCE_INLINE int32_t tLDataIterCmprFn(const void *p1, const void *p2) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
|
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
|
||||||
STimeWindow *pTimeWindow, SVersionRange *pVerRange, void* pBlockLoadInfo, bool destroyLoadInfo, const char* idStr) {
|
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
|
||||||
|
bool destroyLoadInfo, const char *idStr) {
|
||||||
pMTree->backward = backward;
|
pMTree->backward = backward;
|
||||||
pMTree->pIter = NULL;
|
pMTree->pIter = NULL;
|
||||||
pMTree->pIterList = taosArrayInit(4, POINTER_BYTES);
|
pMTree->pIterList = taosArrayInit(4, POINTER_BYTES);
|
||||||
|
|
|
@ -34,6 +34,7 @@ extern "C" {
|
||||||
#include "scalar.h"
|
#include "scalar.h"
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
#include "tarray.h"
|
#include "tarray.h"
|
||||||
|
#include "tfill.h"
|
||||||
#include "thash.h"
|
#include "thash.h"
|
||||||
#include "tlockfree.h"
|
#include "tlockfree.h"
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
|
@ -799,6 +800,22 @@ typedef struct SStreamPartitionOperatorInfo {
|
||||||
SSDataBlock* pDelRes;
|
SSDataBlock* pDelRes;
|
||||||
} SStreamPartitionOperatorInfo;
|
} SStreamPartitionOperatorInfo;
|
||||||
|
|
||||||
|
typedef struct SStreamFillOperatorInfo {
|
||||||
|
SStreamFillSupporter* pFillSup;
|
||||||
|
SSDataBlock* pRes;
|
||||||
|
SSDataBlock* pSrcBlock;
|
||||||
|
int32_t srcRowIndex;
|
||||||
|
SSDataBlock* pPrevSrcBlock;
|
||||||
|
SSDataBlock* pSrcDelBlock;
|
||||||
|
int32_t srcDelRowIndex;
|
||||||
|
SSDataBlock* pDelRes;
|
||||||
|
SNode* pCondition;
|
||||||
|
SArray* pColMatchColInfo;
|
||||||
|
int32_t primaryTsCol;
|
||||||
|
int32_t primarySrcSlotId;
|
||||||
|
SStreamFillInfo* pFillInfo;
|
||||||
|
} SStreamFillOperatorInfo;
|
||||||
|
|
||||||
typedef struct STimeSliceOperatorInfo {
|
typedef struct STimeSliceOperatorInfo {
|
||||||
SSDataBlock* pRes;
|
SSDataBlock* pRes;
|
||||||
STimeWindow win;
|
STimeWindow win;
|
||||||
|
@ -1007,6 +1024,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream,
|
||||||
|
|
||||||
SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||||
SExecTaskInfo* pTaskInfo);
|
SExecTaskInfo* pTaskInfo);
|
||||||
|
SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode,
|
||||||
|
SExecTaskInfo* pTaskInfo);
|
||||||
|
|
||||||
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
|
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
|
||||||
int32_t numOfOutput, SArray* pPseudoList);
|
int32_t numOfOutput, SArray* pPseudoList);
|
||||||
|
@ -1095,6 +1114,7 @@ int32_t setOutputBuf(STimeWindow* win, SResultRow** pResult, int64_t tableGroupI
|
||||||
SExecTaskInfo* pTaskInfo);
|
SExecTaskInfo* pTaskInfo);
|
||||||
int32_t releaseOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult);
|
int32_t releaseOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult);
|
||||||
int32_t saveOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
|
int32_t saveOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
|
||||||
|
void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,12 +23,13 @@ extern "C" {
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
#include "tcommon.h"
|
#include "tcommon.h"
|
||||||
|
#include "tsimplehash.h"
|
||||||
|
|
||||||
struct SSDataBlock;
|
struct SSDataBlock;
|
||||||
|
|
||||||
typedef struct SFillColInfo {
|
typedef struct SFillColInfo {
|
||||||
SExprInfo *pExpr;
|
SExprInfo* pExpr;
|
||||||
bool notFillCol; // denote if this column needs fill operation
|
bool notFillCol; // denote if this column needs fill operation
|
||||||
SVariant fillVal;
|
SVariant fillVal;
|
||||||
} SFillColInfo;
|
} SFillColInfo;
|
||||||
|
|
||||||
|
@ -51,46 +52,96 @@ typedef struct {
|
||||||
} SRowVal;
|
} SRowVal;
|
||||||
|
|
||||||
typedef struct SFillInfo {
|
typedef struct SFillInfo {
|
||||||
TSKEY start; // start timestamp
|
TSKEY start; // start timestamp
|
||||||
TSKEY end; // endKey for fill
|
TSKEY end; // endKey for fill
|
||||||
TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure.
|
TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure.
|
||||||
int32_t tsSlotId; // primary time stamp slot id
|
int32_t tsSlotId; // primary time stamp slot id
|
||||||
int32_t srcTsSlotId; // timestamp column id in the source data block.
|
int32_t srcTsSlotId; // timestamp column id in the source data block.
|
||||||
int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC]
|
int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC]
|
||||||
int32_t type; // fill type
|
int32_t type; // fill type
|
||||||
int32_t numOfRows; // number of rows in the input data block
|
int32_t numOfRows; // number of rows in the input data block
|
||||||
int32_t index; // active row index
|
int32_t index; // active row index
|
||||||
int32_t numOfTotal; // number of filled rows in one round
|
int32_t numOfTotal; // number of filled rows in one round
|
||||||
int32_t numOfCurrent; // number of filled rows in current results
|
int32_t numOfCurrent; // number of filled rows in current results
|
||||||
int32_t numOfCols; // number of columns, including the tags columns
|
int32_t numOfCols; // number of columns, including the tags columns
|
||||||
SInterval interval;
|
SInterval interval;
|
||||||
SRowVal prev;
|
SRowVal prev;
|
||||||
SRowVal next;
|
SRowVal next;
|
||||||
SSDataBlock *pSrcBlock;
|
SSDataBlock* pSrcBlock;
|
||||||
int32_t alloc; // data buffer size in rows
|
int32_t alloc; // data buffer size in rows
|
||||||
|
|
||||||
SFillColInfo* pFillCol; // column info for fill operations
|
SFillColInfo* pFillCol; // column info for fill operations
|
||||||
SFillTagColInfo* pTags; // tags value for filling gap
|
SFillTagColInfo* pTags; // tags value for filling gap
|
||||||
const char* id;
|
const char* id;
|
||||||
} SFillInfo;
|
} SFillInfo;
|
||||||
|
|
||||||
|
typedef struct SResultCellData {
|
||||||
|
bool isNull;
|
||||||
|
int8_t type;
|
||||||
|
int32_t bytes;
|
||||||
|
char pData[];
|
||||||
|
} SResultCellData;
|
||||||
|
|
||||||
|
typedef struct SResultRowData {
|
||||||
|
TSKEY key;
|
||||||
|
SResultCellData* pRowVal;
|
||||||
|
} SResultRowData;
|
||||||
|
|
||||||
|
typedef struct SStreamFillLinearInfo {
|
||||||
|
TSKEY nextEnd;
|
||||||
|
SArray* pDeltaVal; // double. value for Fill(linear).
|
||||||
|
SArray* pNextDeltaVal; // double. value for Fill(linear).
|
||||||
|
int64_t winIndex;
|
||||||
|
bool hasNext;
|
||||||
|
} SStreamFillLinearInfo;
|
||||||
|
|
||||||
|
typedef struct SStreamFillInfo {
|
||||||
|
TSKEY start; // startKey for fill
|
||||||
|
TSKEY end; // endKey for fill
|
||||||
|
TSKEY current; // current Key for fill
|
||||||
|
TSKEY preRowKey;
|
||||||
|
TSKEY nextRowKey;
|
||||||
|
SResultRowData* pResRow;
|
||||||
|
SStreamFillLinearInfo* pLinearInfo;
|
||||||
|
bool needFill;
|
||||||
|
int32_t type; // fill type
|
||||||
|
int32_t pos;
|
||||||
|
SArray* delRanges;
|
||||||
|
int32_t delIndex;
|
||||||
|
} SStreamFillInfo;
|
||||||
|
|
||||||
|
typedef struct SStreamFillSupporter {
|
||||||
|
int32_t type; // fill type
|
||||||
|
SInterval interval;
|
||||||
|
SResultRowData prev;
|
||||||
|
SResultRowData cur;
|
||||||
|
SResultRowData next;
|
||||||
|
SResultRowData nextNext;
|
||||||
|
SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
|
||||||
|
int32_t numOfAllCols; // number of all exprs, including the tags columns
|
||||||
|
int32_t numOfFillCols;
|
||||||
|
int32_t numOfNotFillCols;
|
||||||
|
int32_t rowSize;
|
||||||
|
SSHashObj* pResMap;
|
||||||
|
bool hasDelete;
|
||||||
|
} SStreamFillSupporter;
|
||||||
|
|
||||||
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
|
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
|
||||||
|
|
||||||
|
void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
|
||||||
void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
|
void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp);
|
||||||
void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp);
|
void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
|
||||||
void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
|
struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr,
|
||||||
struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, int32_t numOfNotFillCols, const struct SNodeListNode* val);
|
int32_t numOfNotFillCols, const struct SNodeListNode* val);
|
||||||
bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
|
bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
|
||||||
|
|
||||||
SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity,
|
SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity,
|
||||||
SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId,
|
SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId,
|
||||||
int32_t order, const char* id);
|
int32_t order, const char* id);
|
||||||
|
|
||||||
void* taosDestroyFillInfo(struct SFillInfo *pFillInfo);
|
void* taosDestroyFillInfo(struct SFillInfo* pFillInfo);
|
||||||
int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity);
|
int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity);
|
||||||
int64_t getFillInfoStart(struct SFillInfo *pFillInfo);
|
int64_t getFillInfoStart(struct SFillInfo* pFillInfo);
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -3754,6 +3754,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
||||||
pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
|
pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
|
||||||
pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, pTaskInfo);
|
pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, pTaskInfo);
|
||||||
|
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL == type) {
|
||||||
|
pOptr = createStreamFillOperatorInfo(ops[0], (SStreamFillPhysiNode*)pPhyNode, pTaskInfo);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) {
|
||||||
pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo);
|
pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo);
|
||||||
} else if (QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC == type) {
|
} else if (QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC == type) {
|
||||||
|
|
|
@ -1027,11 +1027,7 @@ static uint64_t getGroupIdByCol(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts,
|
||||||
return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
|
return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t getGroupIdByData(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
|
static uint64_t getGroupIdByUid(SStreamScanInfo* pInfo, uint64_t uid) {
|
||||||
if (pInfo->partitionSup.needCalc) {
|
|
||||||
return getGroupIdByCol(pInfo, uid, ts, maxVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
SHashObj* map = pInfo->pTableScanOp->pTaskInfo->tableqinfoList.map;
|
SHashObj* map = pInfo->pTableScanOp->pTaskInfo->tableqinfoList.map;
|
||||||
uint64_t* groupId = taosHashGet(map, &uid, sizeof(int64_t));
|
uint64_t* groupId = taosHashGet(map, &uid, sizeof(int64_t));
|
||||||
if (groupId) {
|
if (groupId) {
|
||||||
|
@ -1040,6 +1036,14 @@ static uint64_t getGroupIdByData(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t getGroupIdByData(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
|
||||||
|
if (pInfo->partitionSup.needCalc) {
|
||||||
|
return getGroupIdByCol(pInfo, uid, ts, maxVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
return getGroupIdByUid(pInfo, uid);
|
||||||
|
}
|
||||||
|
|
||||||
static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t* pRowIndex) {
|
static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t* pRowIndex) {
|
||||||
if ((*pRowIndex) == pBlock->info.rows) {
|
if ((*pRowIndex) == pBlock->info.rows) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -1086,26 +1090,32 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static STimeWindow getSlidingWindow(TSKEY* startTsCol, TSKEY* endTsCol, SInterval* pInterval,
|
static STimeWindow getSlidingWindow(TSKEY* startTsCol, TSKEY* endTsCol, uint64_t* gpIdCol, SInterval* pInterval,
|
||||||
SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex, bool hasGroup) {
|
SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex, bool hasGroup) {
|
||||||
SResultRowInfo dumyInfo;
|
SResultRowInfo dumyInfo;
|
||||||
dumyInfo.cur.pageId = -1;
|
dumyInfo.cur.pageId = -1;
|
||||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
|
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
|
||||||
STimeWindow endWin = win;
|
STimeWindow endWin = win;
|
||||||
STimeWindow preWin = win;
|
STimeWindow preWin = win;
|
||||||
|
uint64_t groupId = gpIdCol[*pRowIndex];
|
||||||
while (1) {
|
while (1) {
|
||||||
if (hasGroup) {
|
if (hasGroup) {
|
||||||
(*pRowIndex) += 1;
|
(*pRowIndex) += 1;
|
||||||
} else {
|
} else {
|
||||||
(*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, startTsCol, *pRowIndex, endWin.ekey, binarySearchForKey,
|
while ((groupId == gpIdCol[(*pRowIndex)] && startTsCol[*pRowIndex] < endWin.ekey)) {
|
||||||
NULL, TSDB_ORDER_ASC);
|
(*pRowIndex) += 1;
|
||||||
|
if ((*pRowIndex) == pDataBlockInfo->rows) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
preWin = endWin;
|
preWin = endWin;
|
||||||
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
|
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
|
||||||
} while (endTsCol[(*pRowIndex) - 1] >= endWin.skey);
|
} while (endTsCol[(*pRowIndex) - 1] >= endWin.skey);
|
||||||
endWin = preWin;
|
endWin = preWin;
|
||||||
if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows) {
|
if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows || groupId != gpIdCol[*pRowIndex]) {
|
||||||
win.ekey = endWin.ekey;
|
win.ekey = endWin.ekey;
|
||||||
return win;
|
return win;
|
||||||
}
|
}
|
||||||
|
@ -1240,11 +1250,13 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
|
||||||
int64_t version = pSrcBlock->info.version - 1;
|
int64_t version = pSrcBlock->info.version - 1;
|
||||||
for (int32_t i = 0; i < rows;) {
|
for (int32_t i = 0; i < rows;) {
|
||||||
uint64_t srcUid = srcUidData[i];
|
uint64_t srcUid = srcUidData[i];
|
||||||
uint64_t groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
|
uint64_t groupId = srcGp[i];
|
||||||
uint64_t srcGpId = srcGp[i];
|
if (groupId == 0) {
|
||||||
TSKEY calStartTs = srcStartTsCol[i];
|
groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
|
||||||
|
}
|
||||||
|
TSKEY calStartTs = srcStartTsCol[i];
|
||||||
colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
|
colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
|
||||||
STimeWindow win = getSlidingWindow(srcStartTsCol, srcEndTsCol, &pInfo->interval, &pSrcBlock->info, &i,
|
STimeWindow win = getSlidingWindow(srcStartTsCol, srcEndTsCol, srcGp, &pInfo->interval, &pSrcBlock->info, &i,
|
||||||
pInfo->partitionSup.needCalc);
|
pInfo->partitionSup.needCalc);
|
||||||
TSKEY calEndTs = srcStartTsCol[i - 1];
|
TSKEY calEndTs = srcStartTsCol[i - 1];
|
||||||
colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
|
colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
|
||||||
|
@ -1253,15 +1265,6 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
|
||||||
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
|
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
|
||||||
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
|
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
|
||||||
pDestBlock->info.rows++;
|
pDestBlock->info.rows++;
|
||||||
if (pInfo->partitionSup.needCalc && srcGpId != 0 && groupId != srcGpId) {
|
|
||||||
colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
|
|
||||||
colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
|
|
||||||
colDataAppend(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
|
|
||||||
colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
|
|
||||||
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
|
|
||||||
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&srcGpId), false);
|
|
||||||
pDestBlock->info.rows++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -1336,7 +1339,7 @@ void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t*
|
||||||
static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, bool out) {
|
static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, bool out) {
|
||||||
if (out) {
|
if (out) {
|
||||||
blockDataCleanup(pInfo->pUpdateDataRes);
|
blockDataCleanup(pInfo->pUpdateDataRes);
|
||||||
blockDataEnsureCapacity(pInfo->pUpdateDataRes, pBlock->info.rows);
|
blockDataEnsureCapacity(pInfo->pUpdateDataRes, pBlock->info.rows * 2);
|
||||||
}
|
}
|
||||||
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
|
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||||
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
|
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
|
||||||
|
@ -1357,10 +1360,12 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
|
||||||
isDeletedStreamWindow(&win, pBlock->info.groupId, pInfo->pTableScanOp, &pInfo->twAggSup);
|
isDeletedStreamWindow(&win, pBlock->info.groupId, pInfo->pTableScanOp, &pInfo->twAggSup);
|
||||||
if ((update || closedWin) && out) {
|
if ((update || closedWin) && out) {
|
||||||
qDebug("stream update check not pass, update %d, closedWin %d", update, closedWin);
|
qDebug("stream update check not pass, update %d, closedWin %d", update, closedWin);
|
||||||
uint64_t gpId = closedWin && pInfo->partitionSup.needCalc
|
uint64_t gpId = 0;
|
||||||
? calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pBlock, rowId)
|
|
||||||
: 0;
|
|
||||||
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid, &gpId);
|
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid, &gpId);
|
||||||
|
if (closedWin && pInfo->partitionSup.needCalc) {
|
||||||
|
gpId = calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pBlock, rowId);
|
||||||
|
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid, &gpId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (out && pInfo->pUpdateDataRes->info.rows > 0) {
|
if (out && pInfo->pUpdateDataRes->info.rows > 0) {
|
||||||
|
@ -1537,6 +1542,30 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// for partition by tag
|
||||||
|
static void setBlockGroupIdByUid(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
|
||||||
|
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||||
|
TSKEY* startTsCol = (TSKEY*)pStartTsCol->pData;
|
||||||
|
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||||
|
uint64_t* gpCol = (uint64_t*)pGpCol->pData;
|
||||||
|
SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
|
||||||
|
uint64_t* uidCol = (uint64_t*)pUidCol->pData;
|
||||||
|
int32_t rows = pBlock->info.rows;
|
||||||
|
if (!pInfo->partitionSup.needCalc) {
|
||||||
|
for (int32_t i = 0; i < rows; i++) {
|
||||||
|
uint64_t groupId = getGroupIdByUid(pInfo, uidCol[i]);
|
||||||
|
colDataAppend(pGpCol, i, (const char*)&groupId, false);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, uidCol[i], startTsCol, ts, maxVersion);
|
||||||
|
// if (!pPreRes || pPreRes->info.rows == 0) {
|
||||||
|
// return 0;
|
||||||
|
// }
|
||||||
|
// ASSERT(pPreRes->info.rows == 1);
|
||||||
|
// return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||||
// NOTE: this operator does never check if current status is done or not
|
// NOTE: this operator does never check if current status is done or not
|
||||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||||
|
@ -1633,7 +1662,8 @@ FETCH_NEXT_BLOCK:
|
||||||
} else {
|
} else {
|
||||||
pDelBlock = pBlock;
|
pDelBlock = pBlock;
|
||||||
}
|
}
|
||||||
printDataBlock(pBlock, "stream scan delete recv filtered");
|
setBlockGroupIdByUid(pInfo, pDelBlock);
|
||||||
|
printDataBlock(pDelBlock, "stream scan delete recv filtered");
|
||||||
if (!isIntervalWindow(pInfo) && !isSessionWindow(pInfo) && !isStateWindow(pInfo)) {
|
if (!isIntervalWindow(pInfo) && !isSessionWindow(pInfo) && !isStateWindow(pInfo)) {
|
||||||
generateDeleteResultBlock(pInfo, pDelBlock, pInfo->pDeleteDataRes);
|
generateDeleteResultBlock(pInfo, pDelBlock, pInfo->pDeleteDataRes);
|
||||||
pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT;
|
pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT;
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -271,6 +271,10 @@ static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t o
|
||||||
tw->ekey -= 1;
|
tw->ekey -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order) {
|
||||||
|
getNextTimeWindow(pInterval, pInterval->precision, order, tw);
|
||||||
|
}
|
||||||
|
|
||||||
void doTimeWindowInterpolation(SArray* pPrevValues, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs,
|
void doTimeWindowInterpolation(SArray* pPrevValues, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs,
|
||||||
int32_t curRowIndex, TSKEY windowKey, int32_t type, SExprSupp* pSup) {
|
int32_t curRowIndex, TSKEY windowKey, int32_t type, SExprSupp* pSup) {
|
||||||
SqlFunctionCtx* pCtx = pSup->pCtx;
|
SqlFunctionCtx* pCtx = pSup->pCtx;
|
||||||
|
@ -2095,12 +2099,17 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
||||||
bool hasInterp = true;
|
bool hasInterp = true;
|
||||||
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
|
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
|
||||||
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
|
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
|
||||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
|
||||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
|
||||||
|
|
||||||
// SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||||
|
|
||||||
|
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
|
||||||
|
colDataAppend(pDst, rows, (char*)&pSliceInfo->current, false);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||||
|
// SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||||
switch (pSliceInfo->fillType) {
|
switch (pSliceInfo->fillType) {
|
||||||
case TSDB_FILL_NULL: {
|
case TSDB_FILL_NULL: {
|
||||||
colDataAppendNULL(pDst, rows);
|
colDataAppendNULL(pDst, rows);
|
||||||
|
@ -2346,19 +2355,24 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
||||||
if (ts == pSliceInfo->current) {
|
if (ts == pSliceInfo->current) {
|
||||||
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
||||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
||||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
|
||||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
|
||||||
|
|
||||||
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||||
|
|
||||||
if (colDataIsNull_s(pSrc, i)) {
|
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
|
||||||
colDataAppendNULL(pDst, pResBlock->info.rows);
|
colDataAppend(pDst, pResBlock->info.rows, (char *)&pSliceInfo->current, false);
|
||||||
continue;
|
} else {
|
||||||
}
|
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||||
|
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||||
|
|
||||||
char* v = colDataGetData(pSrc, i);
|
if (colDataIsNull_s(pSrc, i)) {
|
||||||
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
colDataAppendNULL(pDst, pResBlock->info.rows);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* v = colDataGetData(pSrc, i);
|
||||||
|
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pResBlock->info.rows += 1;
|
pResBlock->info.rows += 1;
|
||||||
|
@ -2478,14 +2492,24 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
||||||
if (ts == pSliceInfo->current && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
if (ts == pSliceInfo->current && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||||
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
||||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
||||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
|
||||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
|
||||||
|
|
||||||
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||||
|
|
||||||
char* v = colDataGetData(pSrc, i);
|
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
|
||||||
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
colDataAppend(pDst, pResBlock->info.rows, (char *)&pSliceInfo->current, false);
|
||||||
|
} else {
|
||||||
|
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||||
|
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||||
|
|
||||||
|
if (colDataIsNull_s(pSrc, i)) {
|
||||||
|
colDataAppendNULL(pDst, pResBlock->info.rows);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* v = colDataGetData(pSrc, i);
|
||||||
|
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pResBlock->info.rows += 1;
|
pResBlock->info.rows += 1;
|
||||||
|
@ -3146,20 +3170,21 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||||
if (pInfo->binfo.pRes->info.rows == 0) {
|
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||||
pOperator->status = OP_EXEC_DONE;
|
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
||||||
if (!IS_FINAL_OP(pInfo)) {
|
return pInfo->binfo.pRes;
|
||||||
clearFunctionContext(&pOperator->exprSupp);
|
|
||||||
// semi interval operator clear disk buffer
|
|
||||||
clearStreamIntervalOperator(pInfo);
|
|
||||||
qDebug("===stream===clear semi operator");
|
|
||||||
} else {
|
|
||||||
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
|
||||||
return pInfo->binfo.pRes;
|
doSetOperatorCompleted(pOperator);
|
||||||
|
if (!IS_FINAL_OP(pInfo)) {
|
||||||
|
clearFunctionContext(&pOperator->exprSupp);
|
||||||
|
// semi interval operator clear disk buffer
|
||||||
|
clearStreamIntervalOperator(pInfo);
|
||||||
|
qDebug("===stream===clear semi operator");
|
||||||
|
} else {
|
||||||
|
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
if (!IS_FINAL_OP(pInfo)) {
|
if (!IS_FINAL_OP(pInfo)) {
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||||
|
@ -3316,7 +3341,13 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
return pInfo->pPullDataRes;
|
return pInfo->pPullDataRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
// we should send result first.
|
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||||
|
if (pInfo->pDelRes->info.rows != 0) {
|
||||||
|
// process the rest of the data
|
||||||
|
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
||||||
|
return pInfo->pDelRes;
|
||||||
|
}
|
||||||
|
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
||||||
|
@ -3330,13 +3361,6 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
return pInfo->pUpdateRes;
|
return pInfo->pUpdateRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
|
||||||
if (pInfo->pDelRes->info.rows != 0) {
|
|
||||||
// process the rest of the data
|
|
||||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
|
||||||
return pInfo->pDelRes;
|
|
||||||
}
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5744,19 +5768,18 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||||
if (pInfo->pDelRes->info.rows > 0) {
|
if (pInfo->pDelRes->info.rows > 0) {
|
||||||
printDataBlock(pInfo->pDelRes, "single interval");
|
printDataBlock(pInfo->pDelRes, "single interval delete");
|
||||||
return pInfo->pDelRes;
|
return pInfo->pDelRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||||
// doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
if (pInfo->binfo.pRes->info.rows > 0) {
|
||||||
if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
|
printDataBlock(pInfo->binfo.pRes, "single interval");
|
||||||
pOperator->status = OP_EXEC_DONE;
|
return pInfo->binfo.pRes;
|
||||||
qDebug("===stream===single interval is done");
|
|
||||||
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
|
|
||||||
}
|
}
|
||||||
printDataBlock(pInfo->binfo.pRes, "single interval");
|
|
||||||
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
|
doSetOperatorCompleted(pOperator);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||||
|
@ -5823,24 +5846,24 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
taosArraySort(pUpdated, resultrowComparAsc);
|
taosArraySort(pUpdated, resultrowComparAsc);
|
||||||
|
|
||||||
// new disc buf
|
|
||||||
// finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated,
|
|
||||||
// pSup->rowEntryInfoOffset);
|
|
||||||
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
|
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
|
||||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||||
removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
|
removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
|
||||||
taosHashCleanup(pUpdatedMap);
|
taosHashCleanup(pUpdatedMap);
|
||||||
|
|
||||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||||
if (pInfo->pDelRes->info.rows > 0) {
|
if (pInfo->pDelRes->info.rows > 0) {
|
||||||
printDataBlock(pInfo->pDelRes, "single interval");
|
printDataBlock(pInfo->pDelRes, "single interval delete");
|
||||||
return pInfo->pDelRes;
|
return pInfo->pDelRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
// doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
|
||||||
// new disc buf
|
|
||||||
doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||||
printDataBlock(pInfo->binfo.pRes, "single interval");
|
if (pInfo->binfo.pRes->info.rows > 0) {
|
||||||
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
|
printDataBlock(pInfo->binfo.pRes, "single interval");
|
||||||
|
return pInfo->binfo.pRes;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void destroyStreamIntervalOperatorInfo(void* param) {
|
void destroyStreamIntervalOperatorInfo(void* param) {
|
||||||
|
|
|
@ -302,6 +302,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTR
|
||||||
char* p = taosMemoryCalloc(1, strlen(c->colVal) + 1);
|
char* p = taosMemoryCalloc(1, strlen(c->colVal) + 1);
|
||||||
memcpy(p, c->colVal, strlen(c->colVal));
|
memcpy(p, c->colVal, strlen(c->colVal));
|
||||||
cond = cmpFn(p + skip, term->colVal, dType);
|
cond = cmpFn(p + skip, term->colVal, dType);
|
||||||
|
taosMemoryFree(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (cond == MATCH) {
|
if (cond == MATCH) {
|
||||||
|
|
|
@ -69,6 +69,8 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of
|
||||||
int32_t blkOffset = offset % kBlockSize;
|
int32_t blkOffset = offset % kBlockSize;
|
||||||
int32_t blkLeft = kBlockSize - blkOffset;
|
int32_t blkLeft = kBlockSize - blkOffset;
|
||||||
|
|
||||||
|
if (offset >= ctx->file.size) return 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
char key[128] = {0};
|
char key[128] = {0};
|
||||||
idxGenLRUKey(key, ctx->file.buf, blkId);
|
idxGenLRUKey(key, ctx->file.buf, blkId);
|
||||||
|
@ -80,24 +82,34 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of
|
||||||
memcpy(buf + total, blk->buf + blkOffset, nread);
|
memcpy(buf + total, blk->buf + blkOffset, nread);
|
||||||
taosLRUCacheRelease(ctx->lru, h, false);
|
taosLRUCacheRelease(ctx->lru, h, false);
|
||||||
} else {
|
} else {
|
||||||
int32_t cacheMemSize = sizeof(SDataBlock) + kBlockSize;
|
int32_t left = ctx->file.size - offset;
|
||||||
|
if (left < kBlockSize) {
|
||||||
|
nread = TMIN(left, len);
|
||||||
|
int32_t bytes = taosPReadFile(ctx->file.pFile, buf + total, nread, offset);
|
||||||
|
assert(bytes == nread);
|
||||||
|
|
||||||
SDataBlock* blk = taosMemoryCalloc(1, cacheMemSize);
|
total += bytes;
|
||||||
blk->blockId = blkId;
|
return total;
|
||||||
blk->nread = taosPReadFile(ctx->file.pFile, blk->buf, kBlockSize, blkId * kBlockSize);
|
} else {
|
||||||
assert(blk->nread <= kBlockSize);
|
int32_t cacheMemSize = sizeof(SDataBlock) + kBlockSize;
|
||||||
|
|
||||||
if (blk->nread < kBlockSize && blk->nread < len) {
|
SDataBlock* blk = taosMemoryCalloc(1, cacheMemSize);
|
||||||
break;
|
blk->blockId = blkId;
|
||||||
}
|
blk->nread = taosPReadFile(ctx->file.pFile, blk->buf, kBlockSize, blkId * kBlockSize);
|
||||||
|
assert(blk->nread <= kBlockSize);
|
||||||
|
|
||||||
nread = TMIN(blkLeft, len);
|
if (blk->nread < kBlockSize && blk->nread < len) {
|
||||||
memcpy(buf + total, blk->buf + blkOffset, nread);
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
LRUStatus s = taosLRUCacheInsert(ctx->lru, key, strlen(key), blk, cacheMemSize, deleteDataBlockFromLRU, NULL,
|
nread = TMIN(blkLeft, len);
|
||||||
TAOS_LRU_PRIORITY_LOW);
|
memcpy(buf + total, blk->buf + blkOffset, nread);
|
||||||
if (s != TAOS_LRU_STATUS_OK) {
|
|
||||||
return -1;
|
LRUStatus s = taosLRUCacheInsert(ctx->lru, key, strlen(key), blk, cacheMemSize, deleteDataBlockFromLRU, NULL,
|
||||||
|
TAOS_LRU_PRIORITY_LOW);
|
||||||
|
if (s != TAOS_LRU_STATUS_OK) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
total += nread;
|
total += nread;
|
||||||
|
@ -146,9 +158,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int
|
||||||
} else {
|
} else {
|
||||||
ctx->file.pFile = taosOpenFile(path, TD_FILE_READ);
|
ctx->file.pFile = taosOpenFile(path, TD_FILE_READ);
|
||||||
|
|
||||||
int64_t size = 0;
|
|
||||||
taosFStatFile(ctx->file.pFile, &ctx->file.size, NULL);
|
taosFStatFile(ctx->file.pFile, &ctx->file.size, NULL);
|
||||||
ctx->file.size = (int)size;
|
|
||||||
#ifdef USE_MMAP
|
#ifdef USE_MMAP
|
||||||
ctx->file.ptr = (char*)tfMmapReadOnly(ctx->file.pFile, ctx->file.size);
|
ctx->file.ptr = (char*)tfMmapReadOnly(ctx->file.pFile, ctx->file.size);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -172,9 +172,9 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
||||||
{
|
{
|
||||||
std::string colName("voltagefdadfa");
|
std::string colName("voltagefdadfa");
|
||||||
std::string colVal("abxxxxxxxxxxxx");
|
std::string colVal("abxxxxxxxxxxxx");
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10000; i++) {
|
||||||
colVal[i % colVal.size()] = '0' + i % 128;
|
colVal[i % colVal.size()] = '0' + i % 128;
|
||||||
for (size_t i = 0; i < 100; i++) {
|
for (size_t i = 0; i < 10; i++) {
|
||||||
SIndexTerm* term = indexTermCreateT(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
|
SIndexTerm* term = indexTermCreateT(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
|
||||||
colVal.c_str(), colVal.size());
|
colVal.c_str(), colVal.size());
|
||||||
|
|
||||||
|
|
|
@ -254,6 +254,7 @@ const char* nodesNodeName(ENodeType type) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||||
return "PhysiStreamSemiInterval";
|
return "PhysiStreamSemiInterval";
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||||
return "PhysiFill";
|
return "PhysiFill";
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||||
return "PhysiSessionWindow";
|
return "PhysiSessionWindow";
|
||||||
|
@ -4635,6 +4636,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||||
return physiIntervalNodeToJson(pObj, pJson);
|
return physiIntervalNodeToJson(pObj, pJson);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||||
return physiFillNodeToJson(pObj, pJson);
|
return physiFillNodeToJson(pObj, pJson);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
|
||||||
|
@ -4788,6 +4790,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||||
return jsonToPhysiIntervalNode(pJson, pObj);
|
return jsonToPhysiIntervalNode(pJson, pObj);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||||
return jsonToPhysiFillNode(pJson, pObj);
|
return jsonToPhysiFillNode(pJson, pObj);
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
|
||||||
|
|
|
@ -3633,6 +3633,7 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||||
code = physiIntervalNodeToMsg(pObj, pEncoder);
|
code = physiIntervalNodeToMsg(pObj, pEncoder);
|
||||||
break;
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||||
code = physiFillNodeToMsg(pObj, pEncoder);
|
code = physiFillNodeToMsg(pObj, pEncoder);
|
||||||
break;
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||||
|
@ -3770,6 +3771,7 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) {
|
||||||
code = msgToPhysiIntervalNode(pDecoder, pObj);
|
code = msgToPhysiIntervalNode(pDecoder, pObj);
|
||||||
break;
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||||
code = msgToPhysiFillNode(pDecoder, pObj);
|
code = msgToPhysiFillNode(pDecoder, pObj);
|
||||||
break;
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||||
|
|
|
@ -511,6 +511,7 @@ SNode* nodesMakeNode(ENodeType type) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||||
return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode));
|
return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode));
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||||
return makeNode(type, sizeof(SFillPhysiNode));
|
return makeNode(type, sizeof(SFillPhysiNode));
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||||
return makeNode(type, sizeof(SSessionWinodwPhysiNode));
|
return makeNode(type, sizeof(SSessionWinodwPhysiNode));
|
||||||
|
@ -1156,7 +1157,8 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||||
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
|
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
|
||||||
break;
|
break;
|
||||||
case QUERY_NODE_PHYSICAL_PLAN_FILL: {
|
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||||
|
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL: {
|
||||||
SFillPhysiNode* pPhyNode = (SFillPhysiNode*)pNode;
|
SFillPhysiNode* pPhyNode = (SFillPhysiNode*)pNode;
|
||||||
destroyPhysiNode((SPhysiNode*)pPhyNode);
|
destroyPhysiNode((SPhysiNode*)pPhyNode);
|
||||||
nodesDestroyList(pPhyNode->pFillExprs);
|
nodesDestroyList(pPhyNode->pFillExprs);
|
||||||
|
|
|
@ -1409,7 +1409,9 @@ static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
|
||||||
|
|
||||||
static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SFillLogicNode* pFillNode,
|
static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SFillLogicNode* pFillNode,
|
||||||
SPhysiNode** pPhyNode) {
|
SPhysiNode** pPhyNode) {
|
||||||
SFillPhysiNode* pFill = (SFillPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pFillNode, QUERY_NODE_PHYSICAL_PLAN_FILL);
|
SFillPhysiNode* pFill = (SFillPhysiNode*)makePhysiNode(
|
||||||
|
pCxt, (SLogicNode*)pFillNode,
|
||||||
|
pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL : QUERY_NODE_PHYSICAL_PLAN_FILL);
|
||||||
if (NULL == pFill) {
|
if (NULL == pFill) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,8 +47,7 @@ void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) {
|
||||||
return;
|
return;
|
||||||
|
|
||||||
_return:
|
_return:
|
||||||
|
SCH_JOB_DLOG("job errCode updated to %s", tstrerror(errCode));
|
||||||
SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool schJobDone(SSchJob *pJob) {
|
bool schJobDone(SSchJob *pJob) {
|
||||||
|
@ -491,7 +490,7 @@ int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) {
|
||||||
|
|
||||||
int32_t code = atomic_load_32(&pJob->errCode);
|
int32_t code = atomic_load_32(&pJob->errCode);
|
||||||
if (code) {
|
if (code) {
|
||||||
SCH_JOB_DLOG("job failed with error: %s", tstrerror(code));
|
SCH_JOB_DLOG("job failed with error %s", tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
schPostJobRes(pJob, 0);
|
schPostJobRes(pJob, 0);
|
||||||
|
|
|
@ -40,6 +40,11 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath) {
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo refactor
|
||||||
|
if (tdbTbOpen("func.state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pFillStateDb) < 0) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb) < 0) {
|
if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb) < 0) {
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
@ -55,6 +60,7 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath) {
|
||||||
_err:
|
_err:
|
||||||
tdbTbClose(pState->pStateDb);
|
tdbTbClose(pState->pStateDb);
|
||||||
tdbTbClose(pState->pFuncStateDb);
|
tdbTbClose(pState->pFuncStateDb);
|
||||||
|
tdbTbClose(pState->pFillStateDb);
|
||||||
tdbClose(pState->db);
|
tdbClose(pState->db);
|
||||||
taosMemoryFree(pState);
|
taosMemoryFree(pState);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -64,6 +70,7 @@ void streamStateClose(SStreamState* pState) {
|
||||||
tdbCommit(pState->db, &pState->txn);
|
tdbCommit(pState->db, &pState->txn);
|
||||||
tdbTbClose(pState->pStateDb);
|
tdbTbClose(pState->pStateDb);
|
||||||
tdbTbClose(pState->pFuncStateDb);
|
tdbTbClose(pState->pFuncStateDb);
|
||||||
|
tdbTbClose(pState->pFillStateDb);
|
||||||
tdbClose(pState->db);
|
tdbClose(pState->db);
|
||||||
|
|
||||||
taosMemoryFree(pState);
|
taosMemoryFree(pState);
|
||||||
|
@ -126,14 +133,30 @@ int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) {
|
||||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
||||||
return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
|
return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo refactor
|
||||||
|
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
||||||
|
return tdbTbUpsert(pState->pFillStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||||
return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen);
|
return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo refactor
|
||||||
|
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||||
|
return tdbTbGet(pState->pFillStateDb, key, sizeof(SWinKey), pVal, pVLen);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
|
int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
|
||||||
return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn);
|
return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo refactor
|
||||||
|
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key) {
|
||||||
|
return tdbTbDelete(pState->pFillStateDb, key, sizeof(SWinKey), &pState->txn);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||||
// todo refactor
|
// todo refactor
|
||||||
int32_t size = *pVLen;
|
int32_t size = *pVLen;
|
||||||
|
@ -165,6 +188,31 @@ SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
|
||||||
return pCur;
|
return pCur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SStreamStateCur* streamStateFillGetCur(SStreamState* pState, const SWinKey* key) {
|
||||||
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
|
if (pCur == NULL) return NULL;
|
||||||
|
tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL);
|
||||||
|
|
||||||
|
int32_t c;
|
||||||
|
tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c);
|
||||||
|
if (c != 0) {
|
||||||
|
taosMemoryFree(pCur);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return pCur;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key) {
|
||||||
|
SStreamStateCur* pCur = streamStateFillGetCur(pState, key);
|
||||||
|
if (pCur) {
|
||||||
|
int32_t code = streamStateGetGroupKVByCur(pCur, key, NULL, 0);
|
||||||
|
if (code == 0) {
|
||||||
|
return pCur;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||||
const SWinKey* pKTmp = NULL;
|
const SWinKey* pKTmp = NULL;
|
||||||
int32_t kLen;
|
int32_t kLen;
|
||||||
|
@ -175,6 +223,17 @@ int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void**
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||||
|
uint64_t groupId = pKey->groupId;
|
||||||
|
int32_t code = streamStateGetKVByCur(pCur, pKey, pVal, pVLen);
|
||||||
|
if (code == 0) {
|
||||||
|
if (pKey->groupId == groupId) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) {
|
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) {
|
||||||
//
|
//
|
||||||
return tdbTbcMoveToFirst(pCur->pCur);
|
return tdbTbcMoveToFirst(pCur->pCur);
|
||||||
|
@ -185,12 +244,12 @@ int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) {
|
||||||
return tdbTbcMoveToLast(pCur->pCur);
|
return tdbTbcMoveToLast(pCur->pCur);
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key) {
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL) < 0) {
|
if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
|
||||||
taosMemoryFree(pCur);
|
taosMemoryFree(pCur);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -211,12 +270,12 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key
|
||||||
return pCur;
|
return pCur;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key) {
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL) < 0) {
|
if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
|
||||||
taosMemoryFree(pCur);
|
taosMemoryFree(pCur);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,15 +151,15 @@ int64_t taosQueueMemorySize(STaosQueue *queue) {
|
||||||
|
|
||||||
void *taosAllocateQitem(int32_t size, EQItype itype) {
|
void *taosAllocateQitem(int32_t size, EQItype itype) {
|
||||||
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
|
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
|
||||||
pNode->size = size;
|
|
||||||
pNode->itype = itype;
|
|
||||||
pNode->timestamp = taosGetTimestampUs();
|
|
||||||
|
|
||||||
if (pNode == NULL) {
|
if (pNode == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pNode->size = size;
|
||||||
|
pNode->itype = itype;
|
||||||
|
pNode->timestamp = taosGetTimestampUs();
|
||||||
|
|
||||||
if (itype == RPC_QITEM) {
|
if (itype == RPC_QITEM) {
|
||||||
int64_t alloced = atomic_add_fetch_64(&tsRpcQueueMemoryUsed, size);
|
int64_t alloced = atomic_add_fetch_64(&tsRpcQueueMemoryUsed, size);
|
||||||
if (alloced > tsRpcQueueMemoryAllowed) {
|
if (alloced > tsRpcQueueMemoryAllowed) {
|
||||||
|
|
|
@ -20,8 +20,8 @@ static int32_t tUUIDSerialNo = 0;
|
||||||
|
|
||||||
int32_t tGenIdPI32(void) {
|
int32_t tGenIdPI32(void) {
|
||||||
if (tUUIDHashId == 0) {
|
if (tUUIDHashId == 0) {
|
||||||
char uid[64];
|
char uid[65] = {0};
|
||||||
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
|
int32_t code = taosGetSystemUUID(uid, sizeof(uid));
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -46,7 +46,7 @@ int32_t tQWorkerInit(SQWorkerPool *pool) {
|
||||||
void tQWorkerCleanup(SQWorkerPool *pool) {
|
void tQWorkerCleanup(SQWorkerPool *pool) {
|
||||||
for (int32_t i = 0; i < pool->max; ++i) {
|
for (int32_t i = 0; i < pool->max; ++i) {
|
||||||
SQWorker *worker = pool->workers + i;
|
SQWorker *worker = pool->workers + i;
|
||||||
if (worker == NULL) continue;
|
// if (worker == NULL) continue;
|
||||||
if (taosCheckPthreadValid(worker->thread)) {
|
if (taosCheckPthreadValid(worker->thread)) {
|
||||||
taosQsetThreadResume(pool->qset);
|
taosQsetThreadResume(pool->qset);
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ void tQWorkerCleanup(SQWorkerPool *pool) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < pool->max; ++i) {
|
for (int32_t i = 0; i < pool->max; ++i) {
|
||||||
SQWorker *worker = pool->workers + i;
|
SQWorker *worker = pool->workers + i;
|
||||||
if (worker == NULL) continue;
|
// if (worker == NULL) continue;
|
||||||
if (taosCheckPthreadValid(worker->thread)) {
|
if (taosCheckPthreadValid(worker->thread)) {
|
||||||
taosThreadJoin(worker->thread, NULL);
|
taosThreadJoin(worker->thread, NULL);
|
||||||
taosThreadClear(&worker->thread);
|
taosThreadClear(&worker->thread);
|
||||||
|
@ -138,8 +138,8 @@ STaosQueue *tQWorkerAllocQueue(SQWorkerPool *pool, void *ahandle, FItem fp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void tQWorkerFreeQueue(SQWorkerPool *pool, STaosQueue *queue) {
|
void tQWorkerFreeQueue(SQWorkerPool *pool, STaosQueue *queue) {
|
||||||
taosCloseQueue(queue);
|
|
||||||
uDebug("worker:%s, queue:%p is freed", pool->name, queue);
|
uDebug("worker:%s, queue:%p is freed", pool->name, queue);
|
||||||
|
taosCloseQueue(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tWWorkerInit(SWWorkerPool *pool) {
|
int32_t tWWorkerInit(SWWorkerPool *pool) {
|
||||||
|
@ -283,8 +283,8 @@ STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void tWWorkerFreeQueue(SWWorkerPool *pool, STaosQueue *queue) {
|
void tWWorkerFreeQueue(SWWorkerPool *pool, STaosQueue *queue) {
|
||||||
taosCloseQueue(queue);
|
|
||||||
uDebug("worker:%s, queue:%p is freed", pool->name, queue);
|
uDebug("worker:%s, queue:%p is freed", pool->name, queue);
|
||||||
|
taosCloseQueue(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tSingleWorkerInit(SSingleWorker *pWorker, const SSingleWorkerCfg *pCfg) {
|
int32_t tSingleWorkerInit(SSingleWorker *pWorker, const SSingleWorkerCfg *pCfg) {
|
||||||
|
|
|
@ -413,13 +413,8 @@ if $data12 != 3 then
|
||||||
goto loop14
|
goto loop14
|
||||||
endi
|
endi
|
||||||
|
|
||||||
return 1
|
|
||||||
|
|
||||||
sql drop stream if exists streams3;
|
|
||||||
sql drop database if exists test3;
|
|
||||||
sql drop database if exists test;
|
|
||||||
sql create database test3 vgroups 4;
|
sql create database test3 vgroups 4;
|
||||||
sql create database test vgroups 1;
|
|
||||||
sql use test3;
|
sql use test3;
|
||||||
sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
|
sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
|
||||||
sql create table t1 using st tags(1,1,1);
|
sql create table t1 using st tags(1,1,1);
|
||||||
|
@ -435,7 +430,7 @@ sql delete from t1;
|
||||||
|
|
||||||
loop15:
|
loop15:
|
||||||
sleep 200
|
sleep 200
|
||||||
sql select * from test.streamt2 order by c1, c2, c3;
|
sql select * from test.streamt3 order by c1, c2, c3;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 10 then
|
if $loop_count == 10 then
|
||||||
|
@ -453,7 +448,7 @@ sql delete from t1 where ts > 100;
|
||||||
|
|
||||||
loop16:
|
loop16:
|
||||||
sleep 200
|
sleep 200
|
||||||
sql select * from test.streamt2 order by c1, c2, c3;
|
sql select * from test.streamt3 order by c1, c2, c3;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 10 then
|
if $loop_count == 10 then
|
||||||
|
@ -471,7 +466,7 @@ sql delete from st;
|
||||||
|
|
||||||
loop17:
|
loop17:
|
||||||
sleep 200
|
sleep 200
|
||||||
sql select * from test.streamt2 order by c1, c2, c3;
|
sql select * from test.streamt3 order by c1, c2, c3;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 10 then
|
if $loop_count == 10 then
|
||||||
|
|
|
@ -0,0 +1,375 @@
|
||||||
|
$loop_all = 0
|
||||||
|
looptest:
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 200
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop database if exists test1;
|
||||||
|
sql create database test1 vgroups 1;
|
||||||
|
sql use test1;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL);
|
||||||
|
sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300);
|
||||||
|
sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next);
|
||||||
|
sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev);
|
||||||
|
sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
sql insert into t1 values(1648791213000,1,1,1,1.0,'aaa');
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql delete from t1;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop1:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows1=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows2=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows3=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows4=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows5=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791210000,4,4,4,4.0,'ddd');
|
||||||
|
sql insert into t1 values(1648791215000,2,2,2,2.0,'bbb');
|
||||||
|
sql insert into t1 values(1648791217000,3,3,3,3.0,'ccc');
|
||||||
|
sql insert into t1 values(1648791219000,5,5,5,5.0,'eee');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop2:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 10 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
#temp
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
return 1
|
||||||
|
|
||||||
|
sql delete from t1 where ts >= 1648791214000;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop3:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows1=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows2=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows3=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows4=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows5=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 4 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791213000,5,5,5,5.0,'eee');
|
||||||
|
sql insert into t1 values(1648791215000,5,5,5,5.0,'eee');
|
||||||
|
sql insert into t1 values(1648791219000,6,6,6,6.0,'fff');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop4:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 4 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql delete from t1 where ts <= 1648791216000;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop5:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows1=$rows
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows2=$rows
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows3=$rows
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows4=$rows
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by ts;
|
||||||
|
|
||||||
|
if $rows != 1 then
|
||||||
|
print =====rows5=$rows
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 6 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
sql drop stream if exists streams9;
|
||||||
|
sql drop stream if exists streams10;
|
||||||
|
sql drop database if exists test6;
|
||||||
|
sql create database test6 vgroups 1;
|
||||||
|
sql use test6;
|
||||||
|
sql create stable st(ts timestamp, a int, b int , c int, d double, s varchar(20)) tags(ta int,tb int,tc int);
|
||||||
|
sql create table t1 using st tags(1,1,1);
|
||||||
|
sql create table t2 using st tags(1,1,1);
|
||||||
|
sql create stream streams6 trigger at_once into streamt6 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL);
|
||||||
|
sql create stream streams7 trigger at_once into streamt7 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300);
|
||||||
|
sql create stream streams8 trigger at_once into streamt8 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next);
|
||||||
|
sql create stream streams9 trigger at_once into streamt9 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev);
|
||||||
|
sql create stream streams10 trigger at_once into streamt10 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791210000,1,1,1,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791217000,1,1,1,1.0,'aaa');
|
||||||
|
|
||||||
|
sql insert into t2 values(1648791215000,1,1,1,1.0,'aaa');
|
||||||
|
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop7:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt6 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop7
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql delete from t1;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop8:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt6 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows6=$rows
|
||||||
|
goto loop8
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt7 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows7=$rows
|
||||||
|
goto loop8
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt8 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows8=$rows
|
||||||
|
goto loop8
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt9 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows9=$rows
|
||||||
|
goto loop8
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt10 order by ts;
|
||||||
|
|
||||||
|
if $rows != 0 then
|
||||||
|
print =====rows10=$rows
|
||||||
|
goto loop8
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams0;
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
|
||||||
|
sql use test1;
|
||||||
|
sql select * from t1;
|
||||||
|
print $data00
|
||||||
|
|
||||||
|
$loop_all = $loop_all + 1
|
||||||
|
print ============loop_all=$loop_all
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
#goto looptest
|
|
@ -0,0 +1,379 @@
|
||||||
|
$loop_all = 0
|
||||||
|
looptest:
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 200
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop database if exists test1;
|
||||||
|
sql create database test1 vgroups 1;
|
||||||
|
sql use test1;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL);
|
||||||
|
sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300);
|
||||||
|
sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next);
|
||||||
|
sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev);
|
||||||
|
sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791210000,0,0,0,0.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791213000,1,1,1,1.0,'bbb');
|
||||||
|
sql insert into t1 values(1648791215000,5,5,5,5.0,'ccc');
|
||||||
|
sql insert into t1 values(1648791217000,6,6,6,6.0,'ddd');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql delete from t1 where ts = 1648791213000;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop2:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt1=rows1=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
if $data31 != NULL then
|
||||||
|
print ====streamt1=data31=$data31
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt2=rows2=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
if $data31 != 100 then
|
||||||
|
print ====streamt2=data31=$data31
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt3=rows3=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
if $data31 != 5 then
|
||||||
|
print ====streamt3=data31=$data31
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt4=rows4=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
if $data31 != 0 then
|
||||||
|
print ====streamt4=data31=$data31
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt5=rows5=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
if $data31 != 3 then
|
||||||
|
print ====streamt5=data31=$data31
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791212000,5,5,5,5.0,'eee');
|
||||||
|
sql insert into t1 values(1648791213000,6,6,6,6.0,'fff');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop3:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $data21 != 5 then
|
||||||
|
print ====133=rows=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
if $data31 != 6 then
|
||||||
|
print ====137=rows=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql delete from t1 where ts >= 1648791211000 and ts <= 1648791214000;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop4:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt1=rows1=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
if $data31 != NULL then
|
||||||
|
print ====streamt1=data31=$data31
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt2=rows2=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
if $data31 != 100 then
|
||||||
|
print ====streamt2=data31=$data31
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt3=rows3=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
if $data31 != 5 then
|
||||||
|
print ====streamt3=data31=$data31
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt4=rows4=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
if $data31 != 0 then
|
||||||
|
print ====streamt4=data31=$data31
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by ts;
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print ====streamt5=rows5=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
if $data31 != 3 then
|
||||||
|
print ====streamt5=data31=$data31
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
sql drop stream if exists streams9;
|
||||||
|
sql drop stream if exists streams10;
|
||||||
|
sql drop database if exists test6;
|
||||||
|
sql create database test6 vgroups 1;
|
||||||
|
sql use test6;
|
||||||
|
sql create stable st(ts timestamp, a int, b int , c int, d double, s varchar(20)) tags(ta int,tb int,tc int);
|
||||||
|
sql create table t1 using st tags(1,1,1);
|
||||||
|
sql create table t2 using st tags(1,1,1);
|
||||||
|
sql create stream streams6 trigger at_once into streamt6 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL);
|
||||||
|
sql create stream streams7 trigger at_once into streamt7 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300);
|
||||||
|
sql create stream streams8 trigger at_once into streamt8 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next);
|
||||||
|
sql create stream streams9 trigger at_once into streamt9 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev);
|
||||||
|
sql create stream streams10 trigger at_once into streamt10 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791210000,1,1,1,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791215000,6,8,8,8.0,'bbb');
|
||||||
|
sql insert into t1 values(1648791220000,11,10,10,10.0,'ccc');
|
||||||
|
sql insert into t1 values(1648791221000,6,6,6,6.0,'fff');
|
||||||
|
|
||||||
|
sql insert into t2 values(1648791212000,4,4,4,4.0,'ddd');
|
||||||
|
sql insert into t2 values(1648791214000,5,5,5,5.0,'eee');
|
||||||
|
sql insert into t2 values(1648791216000,2,2,2,2.0,'bbb');
|
||||||
|
sql insert into t2 values(1648791222000,6,6,6,6.0,'fff');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop5:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt6 order by ts;
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print ====streamt6=rows1=$rows
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
if $data21 != 4 then
|
||||||
|
print ====streamt6=data21=$data21
|
||||||
|
goto loop5
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql delete from t2;
|
||||||
|
print delete from t2;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop6:
|
||||||
|
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt6 order by ts;
|
||||||
|
|
||||||
|
if $rows != 12 then
|
||||||
|
print ====streamt6=rows2=$rows
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
if $data31 != NULL then
|
||||||
|
print ====streamt6=data31=$data31
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql select * from streamt7 order by ts;
|
||||||
|
|
||||||
|
if $rows != 12 then
|
||||||
|
print ====streamt7=rows2=$rows
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
if $data31 != 100 then
|
||||||
|
print ====streamt7=data31=$data31
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt8 order by ts;
|
||||||
|
|
||||||
|
if $rows != 12 then
|
||||||
|
print ====streamt8=rows3=$rows
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
if $data31 != 6 then
|
||||||
|
print ====streamt8=data31=$data31
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt9 order by ts;
|
||||||
|
|
||||||
|
if $rows != 12 then
|
||||||
|
print ====streamt9=rows4=$rows
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
if $data31 != 1 then
|
||||||
|
print ====streamt9=data31=$data31
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt10 order by ts;
|
||||||
|
|
||||||
|
if $rows != 12 then
|
||||||
|
print ====streamt10=rows5=$rows
|
||||||
|
goto loop6
|
||||||
|
endi
|
||||||
|
if $data21 != 3 then
|
||||||
|
print ====streamt10=data21=$data21
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data31 != 4 then
|
||||||
|
print ====streamt10=data31=$data31
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data71 != 8 then
|
||||||
|
print ====streamt10=data71=$data71
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data91 != 10 then
|
||||||
|
print ====streamt10=data91=$data91
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams0;
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
sql drop stream if exists streams9;
|
||||||
|
sql drop stream if exists streams10;
|
||||||
|
|
||||||
|
sql use test1;
|
||||||
|
sql select * from t1;
|
||||||
|
print $data00
|
||||||
|
|
||||||
|
$loop_all = $loop_all + 1
|
||||||
|
print ============loop_all=$loop_all
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
#goto looptest
|
|
@ -0,0 +1,695 @@
|
||||||
|
$loop_all = 0
|
||||||
|
looptest:
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 200
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop database if exists test1;
|
||||||
|
sql create database test1 vgroups 1;
|
||||||
|
sql use test1;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a)+sum(c), avg(b), first(s), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791216000,5,5,5,5.0,'bbb');
|
||||||
|
sql insert into t1 values(1648791210000,1,1,1,1.0,'ccc') (1648791219000,2,2,2,2.0,'ddd') (1648791222000,3,3,3,3.0,'eee');
|
||||||
|
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop1:
|
||||||
|
sleep 200
|
||||||
|
sql use test1;
|
||||||
|
sql select * from streamt1 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 2.000000000 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != 1.000000000 then
|
||||||
|
print =====data02=$data02
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != ccc then
|
||||||
|
print =====data03=$data03
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data04 != 1 then
|
||||||
|
print =====data04=$data04
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data11 != 4.000000000 then
|
||||||
|
print =====data11=$data11
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data12 != 2.000000000 then
|
||||||
|
print =====data12=$data12
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data13 != NULL then
|
||||||
|
print =====data13=$data13
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data21 != 6.000000000 then
|
||||||
|
print =====data21=$data21
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data22 != 3.000000000 then
|
||||||
|
print =====data22=$data22
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data23 != NULL then
|
||||||
|
print =====data23=$data23
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data31 != 8.000000000 then
|
||||||
|
print =====data31=$data31
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data32 != 4.000000000 then
|
||||||
|
print =====data32=$data32
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data33 != aaa then
|
||||||
|
print =====data33=$data33
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data41 != 8.666666667 then
|
||||||
|
print =====data41=$data41
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data42 != 4.333333333 then
|
||||||
|
print =====data42=$data42
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data43 != NULL then
|
||||||
|
print =====data43=$data43
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data51 != 9.333333333 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data52 != 4.666666667 then
|
||||||
|
print =====data52=$data52
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data53 != NULL then
|
||||||
|
print =====data53=$data53
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data61 != 10.000000000 then
|
||||||
|
print =====data61=$data61
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data62 != 5.000000000 then
|
||||||
|
print =====data62=$data62
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data71 != 8.000000000 then
|
||||||
|
print =====data71=$data71
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data72 != 4.000000000 then
|
||||||
|
print =====data72=$data72
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data81 != 6.000000000 then
|
||||||
|
print =====data81=$data81
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data82 != 3.000000000 then
|
||||||
|
print =====data82=$data82
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data91 != 4.000000000 then
|
||||||
|
print =====data91=$data91
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data92 != 2.000000000 then
|
||||||
|
print =====data92=$data92
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[10][1] != 4.666666667 then
|
||||||
|
print =====data[10][1]=$data[10][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[10][2] != 2.333333333 then
|
||||||
|
print =====data[10][2]=$data[10][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[11][1] != 5.333333333 then
|
||||||
|
print =====data[11][1]=$data[11][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[11][2] != 2.666666667 then
|
||||||
|
print =====data[11][2]=$data[11][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[12][1] != 6.000000000 then
|
||||||
|
print =====data[12][1]=$data[12][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[12][2] != 3.000000000 then
|
||||||
|
print =====data[12][2]=$data[12][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop database if exists test2;
|
||||||
|
sql create database test2 vgroups 1;
|
||||||
|
sql use test2;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a)+sum(c), avg(b), first(s), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
sql insert into t1 values(1648791210000,1,1,1,1.0,'ccc') (1648791219000,2,2,2,2.0,'ddd') (1648791222000,3,3,3,3.0,'eee');
|
||||||
|
sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791216000,5,5,5,5.0,'bbb');
|
||||||
|
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop2:
|
||||||
|
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 2.000000000 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != 1.000000000 then
|
||||||
|
print =====data02=$data02
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != ccc then
|
||||||
|
print =====data03=$data03
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data04 != 1 then
|
||||||
|
print =====data04=$data04
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data11 != 4.000000000 then
|
||||||
|
print =====data11=$data11
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data12 != 2.000000000 then
|
||||||
|
print =====data12=$data12
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data13 != NULL then
|
||||||
|
print =====data13=$data13
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data21 != 6.000000000 then
|
||||||
|
print =====data21=$data21
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data22 != 3.000000000 then
|
||||||
|
print =====data22=$data22
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data23 != NULL then
|
||||||
|
print =====data23=$data23
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data31 != 8.000000000 then
|
||||||
|
print =====data31=$data31
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data32 != 4.000000000 then
|
||||||
|
print =====data32=$data32
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data33 != aaa then
|
||||||
|
print =====data33=$data33
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data41 != 8.666666667 then
|
||||||
|
print =====data41=$data41
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data42 != 4.333333333 then
|
||||||
|
print =====data42=$data42
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data43 != NULL then
|
||||||
|
print =====data43=$data43
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data51 != 9.333333333 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data52 != 4.666666667 then
|
||||||
|
print =====data52=$data52
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data53 != NULL then
|
||||||
|
print =====data53=$data53
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data61 != 10.000000000 then
|
||||||
|
print =====data61=$data61
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data62 != 5.000000000 then
|
||||||
|
print =====data62=$data62
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data71 != 8.000000000 then
|
||||||
|
print =====data71=$data71
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data72 != 4.000000000 then
|
||||||
|
print =====data72=$data72
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data81 != 6.000000000 then
|
||||||
|
print =====data81=$data81
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data82 != 3.000000000 then
|
||||||
|
print =====data82=$data82
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data91 != 4.000000000 then
|
||||||
|
print =====data91=$data91
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data92 != 2.000000000 then
|
||||||
|
print =====data92=$data92
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[10][1] != 4.666666667 then
|
||||||
|
print =====data[10][1]=$data[10][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[10][2] != 2.333333333 then
|
||||||
|
print =====data[10][2]=$data[10][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[11][1] != 5.333333333 then
|
||||||
|
print =====data[11][1]=$data[11][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[11][2] != 2.666666667 then
|
||||||
|
print =====data[11][2]=$data[11][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[12][1] != 6.000000000 then
|
||||||
|
print =====data[12][1]=$data[12][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[12][2] != 3.000000000 then
|
||||||
|
print =====data[12][2]=$data[12][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop database if exists test3;
|
||||||
|
sql create database test3 vgroups 1;
|
||||||
|
sql use test3;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a), b+c, s, b+1, 1 from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(linear);
|
||||||
|
sql insert into t1 values(1648791215000,1,1,1,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791217000,2,2,2,2.0,'bbb');
|
||||||
|
sql insert into t1 values(1648791211000,3,3,3,3.0,'ccc');
|
||||||
|
sql insert into t1 values(1648791213000,4,4,4,4.0,'ddd');
|
||||||
|
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop3:
|
||||||
|
sleep 300
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $rows != 7 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data01 != 3 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != 6.000000000 then
|
||||||
|
print =====data02=$data02
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != ccc then
|
||||||
|
print =====data03=$data03
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != 3 then
|
||||||
|
print =====data11=$data11
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data12 != 7.000000000 then
|
||||||
|
print =====data12=$data12
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data13 != NULL then
|
||||||
|
print =====data13=$data13
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data21 != 4 then
|
||||||
|
print =====data21=$data21
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data22 != 8.000000000 then
|
||||||
|
print =====data22=$data22
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data23 != ddd then
|
||||||
|
print =====data23=$data23
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data31 != 2 then
|
||||||
|
print =====data31=$data31
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data32 != 5.000000000 then
|
||||||
|
print =====data32=$data32
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data33 != NULL then
|
||||||
|
print =====data33=$data33
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data41 != 1 then
|
||||||
|
print =====data41=$data41
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data42 != 2.000000000 then
|
||||||
|
print =====data42=$data42
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data43 != aaa then
|
||||||
|
print =====data43=$data43
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data51 != 1 then
|
||||||
|
print =====data51=$data51
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data52 != 3.000000000 then
|
||||||
|
print =====data52=$data52
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data53 != NULL then
|
||||||
|
print =====data53=$data53
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data61 != 2 then
|
||||||
|
print =====data61=$data61
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data62 != 4.000000000 then
|
||||||
|
print =====data62=$data62
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data63 != bbb then
|
||||||
|
print =====data63=$data63
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791212000,5,5,5,5.0,'eee');
|
||||||
|
sql insert into t1 values(1648791207000,6,6,6,6.0,'fff') (1648791209000,7,7,7,7.0,'ggg') (1648791219000,8,8,8,8.0,'hhh') (1648791221000,9,9,9,9.0,'iii');
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop4:
|
||||||
|
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
sql select * from test3.streamt3 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $rows != 15 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data01 != 6 then
|
||||||
|
print =====data01=$data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data02 != 12.000000000 then
|
||||||
|
print =====data02=$data02
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data03 != fff then
|
||||||
|
print =====data03=$data03
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != 6 then
|
||||||
|
print =====data11=$data11
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data12 != 13.000000000 then
|
||||||
|
print =====data12=$data12
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data13 != NULL then
|
||||||
|
print =====data13=$data13
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data21 != 7 then
|
||||||
|
print =====data21=$data21
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data22 != 14.000000000 then
|
||||||
|
print =====data22=$data22
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data23 != ggg then
|
||||||
|
print =====data23=$data23
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data31 != 5 then
|
||||||
|
print =====data31=$data31
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data32 != 10.000000000 then
|
||||||
|
print =====data32=$data32
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data33 != NULL then
|
||||||
|
print =====data33=$data33
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data51 != 5 then
|
||||||
|
print =====data51=$data51
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data52 != 10.000000000 then
|
||||||
|
print =====data52=$data52
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data53 != eee then
|
||||||
|
print =====data53=$data53
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[11][1] != 5 then
|
||||||
|
print =====data[11][1]=$data[11][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[11][2] != 10.000000000 then
|
||||||
|
print =====data[11][2]=$data[11][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[11][3] != NULL then
|
||||||
|
print =====data[11][3]=$data[11][3]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[12][1] != 8 then
|
||||||
|
print =====data[12][1]=$data[12][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[12][2] != 16.000000000 then
|
||||||
|
print =====data[12][2]=$data[12][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[12][3] != hhh then
|
||||||
|
print =====data[12][3]=$data[12][3]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[13][1] != 8 then
|
||||||
|
print =====data[13][1]=$data[13][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[13][2] != 17.000000000 then
|
||||||
|
print =====data[13][2]=$data[13][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[13][3] != NULL then
|
||||||
|
print =====data[13][3]=$data[13][3]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[14][1] != 9 then
|
||||||
|
print =====data[14][1]=$data[14][1]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[14][2] != 18.000000000 then
|
||||||
|
print =====data[14][2]=$data[14][2]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[14][3] != iii then
|
||||||
|
print =====data[14][3]=$data[14][3]
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams0;
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
|
||||||
|
sql use test1;
|
||||||
|
sql select * from t1;
|
||||||
|
print $data00
|
||||||
|
|
||||||
|
$loop_all = $loop_all + 1
|
||||||
|
print ============loop_all=$loop_all
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
#goto looptest
|
|
@ -0,0 +1,171 @@
|
||||||
|
$loop_all = 0
|
||||||
|
looptest:
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 200
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop database if exists test1;
|
||||||
|
sql create database test1 vgroups 1;
|
||||||
|
sql use test1;
|
||||||
|
sql create stable st(ts timestamp, a int, b int , c int, d double, s varchar(20)) tags(ta int,tb int,tc int);
|
||||||
|
sql create table t1 using st tags(1,1,1);
|
||||||
|
sql create table t2 using st tags(2,2,2);
|
||||||
|
sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(NULL);
|
||||||
|
sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(value,100,200,300);
|
||||||
|
sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(next);
|
||||||
|
sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(prev);
|
||||||
|
sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(linear);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791210000,0,0,0,0.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791213000,1,1,1,1.0,'bbb');
|
||||||
|
sql insert into t1 values(1648791215000,5,5,5,5.0,'ccc');
|
||||||
|
sql insert into t1 values(1648791216000,6,6,6,6.0,'ddd');
|
||||||
|
sql insert into t2 values(1648791210000,7,0,0,0.0,'aaa');
|
||||||
|
sql insert into t2 values(1648791213000,8,1,1,1.0,'bbb');
|
||||||
|
sql insert into t2 values(1648791215000,9,5,5,5.0,'ccc');
|
||||||
|
sql insert into t2 values(1648791216000,10,6,6,6.0,'ddd');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop2:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql select * from streamt1 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 14 then
|
||||||
|
print ====streamt1=rows1=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 14 then
|
||||||
|
print ====streamt2=rows2=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 14 then
|
||||||
|
print ====streamt3=rows3=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 14 then
|
||||||
|
print ====streamt4=rows4=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 14 then
|
||||||
|
print ====streamt5=rows5=$rows
|
||||||
|
goto loop2
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql delete from t1 where ts = 1648791216000;
|
||||||
|
print ======delete from t1 where ts = 1648791216000;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop3:
|
||||||
|
sleep 200
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt1 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print ====streamt1=rows1=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt2 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print ====streamt2=rows2=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt3 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print ====streamt3=rows3=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt4 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print ====streamt4=rows4=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select * from streamt5 order by group_id, ts;
|
||||||
|
|
||||||
|
if $rows != 13 then
|
||||||
|
print ====streamt5=rows5=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams0;
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
sql drop stream if exists streams9;
|
||||||
|
sql drop stream if exists streams10;
|
||||||
|
|
||||||
|
sql use test1;
|
||||||
|
sql select * from t1;
|
||||||
|
print $data00
|
||||||
|
|
||||||
|
$loop_all = $loop_all + 1
|
||||||
|
print ============loop_all=$loop_all
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
#goto looptest
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,488 @@
|
||||||
|
$loop_all = 0
|
||||||
|
looptest:
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 200
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql drop database if exists test;
|
||||||
|
sql create database test vgroups 1;
|
||||||
|
sql use test;
|
||||||
|
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));;
|
||||||
|
sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value, 100);
|
||||||
|
sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa');
|
||||||
|
sleep 100
|
||||||
|
sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791223000,1,2,3,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791283000,1,2,3,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791253000,1,2,3,1.0,'aaa');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
sleep 200
|
||||||
|
sql select * from streamt order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 8 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 1 then
|
||||||
|
print =====data01=$data01
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != 1 then
|
||||||
|
print =====data11=$data11
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data21 != 1 then
|
||||||
|
print =====data21=$data21
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data31 != 100 then
|
||||||
|
print =====data31=$data31
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data41 != 1 then
|
||||||
|
print =====data41=$data41
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data51 != 100 then
|
||||||
|
print =====data01=$data01
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data61 != 100 then
|
||||||
|
print =====data61=$data61
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data71 != 1 then
|
||||||
|
print =====data71=$data71
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop database if exists test2;
|
||||||
|
sql create database test2 vgroups 1;
|
||||||
|
sql use test2;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value, 100,200);
|
||||||
|
sql insert into t1 values(1648791211000,1,1,1,1.0,'aaa') (1648791217000,2,2,2,2.0,'aaa') (1648791220000,3,3,3,3.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791215000,5,5,5,5.0,'aaa');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop1:
|
||||||
|
sleep 200
|
||||||
|
sql select * from streamt2 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 10 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 1 then
|
||||||
|
print =====data01=$data01
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data02 != 2.000000000 then
|
||||||
|
print =====data02=$data02
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data11 != 100 then
|
||||||
|
print =====data11=$data11
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data12 != 200.000000000 then
|
||||||
|
print =====data12=$data12
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data21 != 1 then
|
||||||
|
print =====data21=$data21
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data22 != 8.000000000 then
|
||||||
|
print =====data22=$data22
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data31 != 100 then
|
||||||
|
print =====data31=$data31
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data32 != 200.000000000 then
|
||||||
|
print =====data32=$data32
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data41 != 1 then
|
||||||
|
print =====data41=$data41
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data42 != 10.000000000 then
|
||||||
|
print =====data42=$data42
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data51 != 100 then
|
||||||
|
print =====data01=$data01
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data52 != 200.000000000 then
|
||||||
|
print =====data52=$data52
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data61 != 1 then
|
||||||
|
print =====data61=$data61
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data62 != 4.000000000 then
|
||||||
|
print =====data62=$data62
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data71 != 100 then
|
||||||
|
print =====data71=$data71
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data72 != 200.000000000 then
|
||||||
|
print =====data72=$data72
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data81 != 100 then
|
||||||
|
print =====data81=$data81
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data82 != 200.000000000 then
|
||||||
|
print =====data82=$data82
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data91 != 1 then
|
||||||
|
print =====data91=$data91
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data92 != 6.000000000 then
|
||||||
|
print =====data92=$data92
|
||||||
|
goto loop1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop database if exists test3;
|
||||||
|
sql create database test3 vgroups 1;
|
||||||
|
sql use test3;
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));
|
||||||
|
sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(b), a+b, c from t1 where ts >= 1648791200000 and ts < 1648791261000 interval(10s) sliding(3s) fill(value, 100,200,300);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791220000,1,1,1,1.0,'aaa');
|
||||||
|
sleep 100
|
||||||
|
sql insert into t1 values(1648791260000,1,1,1,1.0,'aaa');
|
||||||
|
sleep 100
|
||||||
|
sql insert into t1 values(1648791200000,1,1,1,1.0,'aaa');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop3:
|
||||||
|
sleep 200
|
||||||
|
sql select * from streamt3 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 23 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 1 then
|
||||||
|
print =====data01=$data01
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data02 != 2.000000000 then
|
||||||
|
print =====data02=$data02
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data03 != 1 then
|
||||||
|
print =====data03=$data03
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data21 != 1 then
|
||||||
|
print =====data21=$data21
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data22 != 2.000000000 then
|
||||||
|
print =====data22=$data22
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data23 != 1 then
|
||||||
|
print =====data23=$data23
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data31 != 100 then
|
||||||
|
print =====data31=$data31
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data32 != 200.000000000 then
|
||||||
|
print =====data32=$data32
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data33 != 300 then
|
||||||
|
print =====data33=$data33
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data61 != 100 then
|
||||||
|
print =====data61=$data61
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data62 != 200.000000000 then
|
||||||
|
print =====data62=$data62
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data63 != 300 then
|
||||||
|
print =====data63=$data63
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data71 != 1 then
|
||||||
|
print =====data71=$data71
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data72 != 2.000000000 then
|
||||||
|
print =====data72=$data72
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data73 != 1 then
|
||||||
|
print =====data73=$data73
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data91 != 1 then
|
||||||
|
print =====data91=$data91
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data92 != 2.000000000 then
|
||||||
|
print =====data92=$data92
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data93 != 1 then
|
||||||
|
print =====data93=$data93
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[10][1] != 100 then
|
||||||
|
print =====data[10][1]=$data[10][1]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[10][2] != 200.000000000 then
|
||||||
|
print =====data[10][2]=$data[10][2]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[10][3] != 300 then
|
||||||
|
print =====data[10][3]=$data[10][3]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[19][1] != 100 then
|
||||||
|
print =====data[19][1]=$data[19][1]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[19][2] != 200.000000000 then
|
||||||
|
print =====data[19][2]=$data[19][2]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[19][3] != 300 then
|
||||||
|
print =====data[19][3]=$data[19][3]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[20][1] != 1 then
|
||||||
|
print =====data[20][1]=$data[20][1]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[20][2] != 2.000000000 then
|
||||||
|
print =====data[20][2]=$data[20][2]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[20][3] != 1 then
|
||||||
|
print =====data[20][3]=$data[20][3]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
if $data[22][1] != 1 then
|
||||||
|
print =====data[22][1]=$data[22][1]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[22][2] != 2.000000000 then
|
||||||
|
print =====data[22][2]=$data[22][2]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[22][3] != 1 then
|
||||||
|
print =====data[22][3]=$data[22][3]
|
||||||
|
goto loop3
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop database if exists test4;
|
||||||
|
sql create database test4 vgroups 1;
|
||||||
|
sql use test4;
|
||||||
|
|
||||||
|
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));;
|
||||||
|
sql create stream streams4 trigger at_once into streamt4 as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(NULL);
|
||||||
|
sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa');
|
||||||
|
sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa');
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop4:
|
||||||
|
sleep 200
|
||||||
|
sql select * from streamt4 order by ts;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 3 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != NULL then
|
||||||
|
print =====data11=$data11
|
||||||
|
goto loop4
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sql drop stream if exists streams0;
|
||||||
|
sql drop stream if exists streams1;
|
||||||
|
sql drop stream if exists streams2;
|
||||||
|
sql drop stream if exists streams3;
|
||||||
|
sql drop stream if exists streams4;
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop stream if exists streams6;
|
||||||
|
sql drop stream if exists streams7;
|
||||||
|
sql drop stream if exists streams8;
|
||||||
|
|
||||||
|
sql use test;
|
||||||
|
sql select * from t1;
|
||||||
|
print $data00
|
||||||
|
|
||||||
|
$loop_all = $loop_all + 1
|
||||||
|
print ============loop_all=$loop_all
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
#goto looptest
|
|
@ -5,13 +5,14 @@ sleep 50
|
||||||
sql connect
|
sql connect
|
||||||
|
|
||||||
sql create database test vgroups 4;
|
sql create database test vgroups 4;
|
||||||
|
sql create database test0 vgroups 1;
|
||||||
sql use test;
|
sql use test;
|
||||||
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
|
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
|
||||||
sql create table ts1 using st tags(1,1,1);
|
sql create table ts1 using st tags(1,1,1);
|
||||||
sql create table ts2 using st tags(2,2,2);
|
sql create table ts2 using st tags(2,2,2);
|
||||||
sql create table ts3 using st tags(3,2,2);
|
sql create table ts3 using st tags(3,2,2);
|
||||||
sql create table ts4 using st tags(4,2,2);
|
sql create table ts4 using st tags(4,2,2);
|
||||||
sql create stream stream_t1 trigger at_once into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by ta,tb,tc interval(10s);
|
sql create stream stream_t1 trigger at_once into test0.streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by ta,tb,tc interval(10s);
|
||||||
|
|
||||||
sql insert into ts1 values(1648791213001,1,12,3,1.0);
|
sql insert into ts1 values(1648791213001,1,12,3,1.0);
|
||||||
sql insert into ts2 values(1648791213001,1,12,3,1.0);
|
sql insert into ts2 values(1648791213001,1,12,3,1.0);
|
||||||
|
@ -22,7 +23,7 @@ $loop_count = 0
|
||||||
|
|
||||||
loop0:
|
loop0:
|
||||||
sleep 300
|
sleep 300
|
||||||
sql select * from streamtST1;
|
sql select * from test0.streamtST1;
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
$loop_count = $loop_count + 1
|
||||||
if $loop_count == 10 then
|
if $loop_count == 10 then
|
||||||
|
@ -34,6 +35,29 @@ print =====rows=$rows
|
||||||
goto loop0
|
goto loop0
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
sql insert into ts1 values(1648791223001,1,12,3,1.0);
|
||||||
|
sql insert into ts2 values(1648791223001,1,12,3,1.0);
|
||||||
|
|
||||||
|
sql insert into ts3 values(1648791223001,1,12,3,1.0);
|
||||||
|
sql insert into ts4 values(1648791223001,1,12,3,1.0);
|
||||||
|
sleep 300
|
||||||
|
sql delete from st where ts = 1648791223001;
|
||||||
|
|
||||||
|
loop00:
|
||||||
|
sleep 300
|
||||||
|
sql select * from test0.streamtST1;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 4 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop00
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
print =====loop0
|
print =====loop0
|
||||||
|
|
||||||
sql create database test1 vgroups 1;
|
sql create database test1 vgroups 1;
|
||||||
|
|
|
@ -562,6 +562,53 @@ if $data21 != 1 then
|
||||||
goto loop14
|
goto loop14
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
sql drop stream if exists streams5;
|
||||||
|
sql drop database if exists test5;
|
||||||
|
sql create database test5 vgroups 4;
|
||||||
|
sql use test5;
|
||||||
|
sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
|
||||||
|
sql create table t1 using st tags(1,1,1);
|
||||||
|
sql create table t2 using st tags(2,2,2);
|
||||||
|
sql create table t3 using st tags(2,2,2);
|
||||||
|
sql create table t4 using st tags(2,2,2);
|
||||||
|
sql create stream streams5 trigger at_once into test.streamt5 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791213000,1,2,3,1.0);
|
||||||
|
sql insert into t2 values(1648791213000,2,2,3,1.0);
|
||||||
|
sql insert into t3 values(1648791213000,3,2,3,1.0);
|
||||||
|
sql insert into t4 values(1648791213000,4,2,3,1.0);
|
||||||
|
|
||||||
|
sql insert into t1 values(1648791223000,1,2,3,1.0);
|
||||||
|
sql insert into t2 values(1648791223000,2,2,3,1.0);
|
||||||
|
sql insert into t3 values(1648791223000,3,2,3,1.0);
|
||||||
|
sql insert into t4 values(1648791223000,4,2,3,1.0);
|
||||||
|
|
||||||
|
sleep 300
|
||||||
|
|
||||||
|
sql delete from st where ts = 1648791223000;
|
||||||
|
|
||||||
|
sql select * from test.streamt5;
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
|
||||||
|
loop15:
|
||||||
|
sleep 50
|
||||||
|
sql select * from test.streamt5 order by c1, c2, c3;
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $rows != 4 then
|
||||||
|
print =====rows=$rows
|
||||||
|
print =====rows=$rows
|
||||||
|
print =====rows=$rows
|
||||||
|
# goto loop15
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
$loop_all = $loop_all + 1
|
$loop_all = $loop_all + 1
|
||||||
print ============loop_all=$loop_all
|
print ============loop_all=$loop_all
|
||||||
|
|
||||||
|
|
|
@ -356,8 +356,261 @@ class TDTestCase:
|
||||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(linear)")
|
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(linear)")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step8:test _irowts with interp")
|
||||||
|
|
||||||
tdLog.printNoPrefix("==========step8:test intra block interpolation")
|
# fill null
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(null)")
|
||||||
|
tdSql.checkRows(9)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:09.500')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.500')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
|
||||||
|
tdSql.checkRows(13)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(9, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
|
||||||
|
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(null)")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
# fill value
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(value, 1)")
|
||||||
|
tdSql.checkRows(9)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:09.500')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.500')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
|
||||||
|
tdSql.checkRows(13)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(9, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
|
||||||
|
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(value, 1)")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
# fill prev
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(prev)")
|
||||||
|
tdSql.checkRows(9)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:09.500')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.500')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(prev)")
|
||||||
|
tdSql.checkRows(12)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(9, 0, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(10, 0, '2020-02-01 00:00:15.000')
|
||||||
|
tdSql.checkData(11, 0, '2020-02-01 00:00:16.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(prev)")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
# fill next
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(next)")
|
||||||
|
tdSql.checkRows(9)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:09.500')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.500')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
|
||||||
|
tdSql.checkRows(12)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(9, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
# fill linear
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(500a) fill(linear)")
|
||||||
|
tdSql.checkRows(9)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:08.500')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:09.500')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.500')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:11.500')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
|
||||||
|
tdSql.checkRows(11)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(6, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 0, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(8, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(9, 0, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(10, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(linear)")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 0, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(2, 0, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(3, 0, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(4, 0, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(5, 0, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
# multiple _irowts
|
||||||
|
tdSql.query(f"select interp(c0),_irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
|
||||||
|
tdSql.checkRows(11)
|
||||||
|
tdSql.checkCols(2)
|
||||||
|
|
||||||
|
tdSql.checkData(0, 1, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, 1, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(2, 1, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(3, 1, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(4, 1, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(5, 1, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(6, 1, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, 1, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(8, 1, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(9, 1, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(10, 1, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
tdSql.query(f"select _irowts, interp(c0), interp(c0), _irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
|
||||||
|
tdSql.checkRows(11)
|
||||||
|
tdSql.checkCols(4)
|
||||||
|
|
||||||
|
cols = (0, 3)
|
||||||
|
for i in cols:
|
||||||
|
tdSql.checkData(0, i, '2020-02-01 00:00:05.000')
|
||||||
|
tdSql.checkData(1, i, '2020-02-01 00:00:06.000')
|
||||||
|
tdSql.checkData(2, i, '2020-02-01 00:00:07.000')
|
||||||
|
tdSql.checkData(3, i, '2020-02-01 00:00:08.000')
|
||||||
|
tdSql.checkData(4, i, '2020-02-01 00:00:09.000')
|
||||||
|
tdSql.checkData(5, i, '2020-02-01 00:00:10.000')
|
||||||
|
tdSql.checkData(6, i, '2020-02-01 00:00:11.000')
|
||||||
|
tdSql.checkData(7, i, '2020-02-01 00:00:12.000')
|
||||||
|
tdSql.checkData(8, i, '2020-02-01 00:00:13.000')
|
||||||
|
tdSql.checkData(9, i, '2020-02-01 00:00:14.000')
|
||||||
|
tdSql.checkData(10, i, '2020-02-01 00:00:15.000')
|
||||||
|
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("==========step9:test intra block interpolation")
|
||||||
tdSql.execute(f"drop database {dbname}");
|
tdSql.execute(f"drop database {dbname}");
|
||||||
|
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
@ -551,7 +804,7 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 0, 15)
|
tdSql.checkData(0, 0, 15)
|
||||||
tdSql.checkData(1, 0, 15)
|
tdSql.checkData(1, 0, 15)
|
||||||
|
|
||||||
tdLog.printNoPrefix("==========step9:test multi-interp cases")
|
tdLog.printNoPrefix("==========step10:test multi-interp cases")
|
||||||
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)")
|
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)")
|
||||||
tdSql.checkRows(5)
|
tdSql.checkRows(5)
|
||||||
tdSql.checkCols(4)
|
tdSql.checkCols(4)
|
||||||
|
@ -601,7 +854,7 @@ class TDTestCase:
|
||||||
for i in range (tdSql.queryCols):
|
for i in range (tdSql.queryCols):
|
||||||
tdSql.checkData(0, i, 13)
|
tdSql.checkData(0, i, 13)
|
||||||
|
|
||||||
tdLog.printNoPrefix("==========step10:test error cases")
|
tdLog.printNoPrefix("==========step11:test error cases")
|
||||||
|
|
||||||
tdSql.error(f"select interp(c0) from {dbname}.{tbname}")
|
tdSql.error(f"select interp(c0) from {dbname}.{tbname}")
|
||||||
tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')")
|
tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')")
|
||||||
|
|
|
@ -295,7 +295,7 @@ class TDTestCase:
|
||||||
ifManualCommit = 1
|
ifManualCommit = 1
|
||||||
keyList = 'group.id:cgrp1,\
|
keyList = 'group.id:cgrp1,\
|
||||||
enable.auto.commit:true,\
|
enable.auto.commit:true,\
|
||||||
auto.commit.interval.ms:1000,\
|
auto.commit.interval.ms:200,\
|
||||||
auto.offset.reset:earliest'
|
auto.offset.reset:earliest'
|
||||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
|
|
@ -358,7 +358,7 @@ static int32_t shellCheckArgs() {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pArgs->password != NULL && (strlen(pArgs->password) <= 0)) {
|
if (/*pArgs->password != NULL && */ (strlen(pArgs->password) <= 0)) {
|
||||||
printf("Invalid password\r\n");
|
printf("Invalid password\r\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,10 +224,24 @@ char *simGetVariable(SScript *script, char *varName, int32_t varLen) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t simExecuteExpression(SScript *script, char *exp) {
|
int32_t simExecuteExpression(SScript *script, char *exp) {
|
||||||
char *op1, *op2, *var1, *var2, *var3, *rest;
|
char *op1 = NULL;
|
||||||
int32_t op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1;
|
char *op2 = NULL;
|
||||||
char t0[1024], t1[1024], t2[1024], t3[2048];
|
char *var1 = NULL;
|
||||||
int32_t result;
|
char *var2 = NULL;
|
||||||
|
char *var3 = NULL;
|
||||||
|
char *rest = NULL;
|
||||||
|
int32_t op1Len = 0;
|
||||||
|
int32_t op2Len = 0;
|
||||||
|
int32_t var1Len = 0;
|
||||||
|
int32_t var2Len = 0;
|
||||||
|
int32_t var3Len = 0;
|
||||||
|
int32_t val0 = 0;
|
||||||
|
int32_t val1 = 0;
|
||||||
|
char t0[1024] = {0};
|
||||||
|
char t1[1024] = {0};
|
||||||
|
char t2[1024] = {0};
|
||||||
|
char t3[2048] = {0};
|
||||||
|
int32_t result = 0;
|
||||||
|
|
||||||
rest = paGetToken(exp, &var1, &var1Len);
|
rest = paGetToken(exp, &var1, &var1Len);
|
||||||
rest = paGetToken(rest, &op1, &op1Len);
|
rest = paGetToken(rest, &op1, &op1Len);
|
||||||
|
@ -241,9 +255,9 @@ int32_t simExecuteExpression(SScript *script, char *exp) {
|
||||||
t0[var1Len] = 0;
|
t0[var1Len] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (var2[0] == '$')
|
if (var2[0] == '$') {
|
||||||
strcpy(t1, simGetVariable(script, var2 + 1, var2Len - 1));
|
tstrncpy(t1, simGetVariable(script, var2 + 1, var2Len - 1), 1024);
|
||||||
else {
|
} else {
|
||||||
memcpy(t1, var2, var2Len);
|
memcpy(t1, var2, var2Len);
|
||||||
t1[var2Len] = 0;
|
t1[var2Len] = 0;
|
||||||
}
|
}
|
||||||
|
@ -258,14 +272,21 @@ int32_t simExecuteExpression(SScript *script, char *exp) {
|
||||||
t2[var3Len] = 0;
|
t2[var3Len] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t t1l = atoll(t1);
|
||||||
|
int64_t t2l = atoll(t2);
|
||||||
|
|
||||||
if (op2[0] == '+') {
|
if (op2[0] == '+') {
|
||||||
sprintf(t3, "%lld", atoll(t1) + atoll(t2));
|
sprintf(t3, "%" PRId64, t1l + t2l);
|
||||||
} else if (op2[0] == '-') {
|
} else if (op2[0] == '-') {
|
||||||
sprintf(t3, "%lld", atoll(t1) - atoll(t2));
|
sprintf(t3, "%" PRId64, t1l - t2l);
|
||||||
} else if (op2[0] == '*') {
|
} else if (op2[0] == '*') {
|
||||||
sprintf(t3, "%lld", atoll(t1) * atoll(t2));
|
sprintf(t3, "%" PRId64, t1l * t2l);
|
||||||
} else if (op2[0] == '/') {
|
} else if (op2[0] == '/') {
|
||||||
sprintf(t3, "%lld", atoll(t1) / atoll(t2));
|
if (t2l == 0) {
|
||||||
|
sprintf(t3, "%" PRId64, INT64_MAX);
|
||||||
|
} else {
|
||||||
|
sprintf(t3, "%" PRId64, t1l / t2l);
|
||||||
|
}
|
||||||
} else if (op2[0] == '.') {
|
} else if (op2[0] == '.') {
|
||||||
sprintf(t3, "%s%s", t1, t2);
|
sprintf(t3, "%s%s", t1, t2);
|
||||||
}
|
}
|
||||||
|
@ -636,7 +657,7 @@ bool simCreateTaosdConnect(SScript *script, char *rest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
||||||
char timeStr[30] = {0};
|
char timeStr[80] = {0};
|
||||||
time_t tt;
|
time_t tt;
|
||||||
struct tm tp;
|
struct tm tp;
|
||||||
SCmdLine *line = &script->lines[script->linePos];
|
SCmdLine *line = &script->lines[script->linePos];
|
||||||
|
@ -943,7 +964,7 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS_RES *pSql = pSql = taos_query(script->taos, rest);
|
TAOS_RES *pSql = taos_query(script->taos, rest);
|
||||||
int32_t ret = taos_errno(pSql);
|
int32_t ret = taos_errno(pSql);
|
||||||
taos_free_result(pSql);
|
taos_free_result(pSql);
|
||||||
|
|
||||||
|
@ -961,7 +982,7 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool simExecuteLineInsertCmd(SScript *script, char *rest) {
|
bool simExecuteLineInsertCmd(SScript *script, char *rest) {
|
||||||
char buf[TSDB_MAX_BINARY_LEN];
|
char buf[TSDB_MAX_BINARY_LEN] = {0};
|
||||||
|
|
||||||
simVisuallizeOption(script, rest, buf);
|
simVisuallizeOption(script, rest, buf);
|
||||||
rest = buf;
|
rest = buf;
|
||||||
|
@ -973,10 +994,7 @@ bool simExecuteLineInsertCmd(SScript *script, char *rest) {
|
||||||
char *lines[] = {rest};
|
char *lines[] = {rest};
|
||||||
#if 0
|
#if 0
|
||||||
int32_t ret = taos_insert_lines(script->taos, lines, 1);
|
int32_t ret = taos_insert_lines(script->taos, lines, 1);
|
||||||
#else
|
if (ret == TSDB_CODE_SUCCESS) {
|
||||||
int32_t ret = 0;
|
|
||||||
#endif
|
|
||||||
if (ret == TSDB_CODE_SUCCESS) {
|
|
||||||
simDebug("script:%s, taos:%p, %s executed. success.", script->fileName, script->taos, rest);
|
simDebug("script:%s, taos:%p, %s executed. success.", script->fileName, script->taos, rest);
|
||||||
script->linePos++;
|
script->linePos++;
|
||||||
return true;
|
return true;
|
||||||
|
@ -985,6 +1003,11 @@ bool simExecuteLineInsertCmd(SScript *script, char *rest) {
|
||||||
tstrerror(ret));
|
tstrerror(ret));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
simDebug("script:%s, taos:%p, %s executed. success.", script->fileName, script->taos, rest);
|
||||||
|
script->linePos++;
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool simExecuteLineInsertErrorCmd(SScript *script, char *rest) {
|
bool simExecuteLineInsertErrorCmd(SScript *script, char *rest) {
|
||||||
|
|
|
@ -33,7 +33,7 @@ int32_t main(int32_t argc, char *argv[]) {
|
||||||
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
|
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
|
||||||
tstrncpy(configDir, argv[++i], 128);
|
tstrncpy(configDir, argv[++i], 128);
|
||||||
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
|
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
|
||||||
strcpy(scriptFile, argv[++i]);
|
tstrncpy(scriptFile, argv[++i], MAX_FILE_NAME_LEN);
|
||||||
} else if (strcmp(argv[i], "-m") == 0) {
|
} else if (strcmp(argv[i], "-m") == 0) {
|
||||||
useMultiProcess = true;
|
useMultiProcess = true;
|
||||||
} else if (strcmp(argv[i], "-v") == 0) {
|
} else if (strcmp(argv[i], "-v") == 0) {
|
||||||
|
|
|
@ -175,14 +175,17 @@ SScript *simBuildScriptObj(char *fileName) {
|
||||||
SScript *simParseScript(char *fileName) {
|
SScript *simParseScript(char *fileName) {
|
||||||
TdFilePtr pFile;
|
TdFilePtr pFile;
|
||||||
int32_t tokenLen, lineNum = 0;
|
int32_t tokenLen, lineNum = 0;
|
||||||
char buffer[10*1024], name[128], *token, *rest;
|
char buffer[10 * 1024] = {0};
|
||||||
SCommand *pCmd;
|
char name[PATH_MAX] = {9};
|
||||||
SScript *script;
|
char *token = NULL;
|
||||||
|
char *rest = NULL;
|
||||||
|
SCommand *pCmd = NULL;
|
||||||
|
SScript *script = NULL;
|
||||||
|
|
||||||
if ((fileName[0] == '.') || (fileName[0] == '/')) {
|
if ((fileName[0] == '.') || (fileName[0] == '/')) {
|
||||||
strcpy(name, fileName);
|
tstrncpy(name, fileName, PATH_MAX);
|
||||||
} else {
|
} else {
|
||||||
sprintf(name, "%s" TD_DIRSEP "%s", simScriptDir, fileName);
|
snprintf(name, PATH_MAX, "%s" TD_DIRSEP "%s", simScriptDir, fileName);
|
||||||
taosRealPath(name, NULL, sizeof(name));
|
taosRealPath(name, NULL, sizeof(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue