Merge branch '3.0' into feature/3.0_interval_hash_optimize
This commit is contained in:
commit
691118f8cd
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 2dba49c
|
||||
GIT_TAG 3588b3d
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -52,7 +52,7 @@ Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) i
|
|||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||
|
||||
|
@ -74,10 +74,10 @@ Query the average, maximum, and minimum values of all 100 million rows of data:
|
|||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the number of rows whose `location` tag is `San Francisco`:
|
||||
Query the number of rows whose `location` tag is `California.SanFrancisco`:
|
||||
|
||||
```sql
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||
|
|
|
@ -221,7 +221,7 @@ Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) i
|
|||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||
|
||||
|
@ -243,10 +243,10 @@ Query the average, maximum, and minimum values of all 100 million rows of data:
|
|||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the number of rows whose `location` tag is `San Francisco`:
|
||||
Query the number of rows whose `location` tag is `California.SanFrancisco`:
|
||||
|
||||
```sql
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||
|
|
|
@ -917,7 +917,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Return value type**:Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable data types**: Numeric, Timestamp
|
||||
**Applicable data types**: Numeric
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
|
@ -932,7 +932,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**Return value type**:Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable data types**: Numeric, Timestamp
|
||||
**Applicable data types**: Numeric
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
|
|
|
@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
|||
let inserted = taos.exec_many([
|
||||
// create super table
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||
TAGS (`groupid` INT, `location` BINARY(16))",
|
||||
TAGS (`groupid` INT, `location` BINARY(24))",
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||
// insert into child table
|
||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||
// insert with NULL values
|
||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||
// insert and automatically create table with tags if not exists
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
// insert many records in a single sql
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||
]).await?;
|
||||
|
|
|
@ -38,12 +38,12 @@ public class SubscribeDemo {
|
|||
statement.executeUpdate("create database " + DB_NAME);
|
||||
statement.executeUpdate("use " + DB_NAME);
|
||||
statement.executeUpdate(
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))");
|
||||
statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')");
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))");
|
||||
statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')");
|
||||
statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)");
|
||||
statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)");
|
||||
statement.executeUpdate(
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)");
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119)");
|
||||
statement.executeUpdate(
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)");
|
||||
// create topic
|
||||
|
|
|
@ -16,7 +16,7 @@ class MockDataSource implements Iterator {
|
|||
private int currentTbId = -1;
|
||||
|
||||
// mock values
|
||||
String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
|
||||
String[] location = {"California.LosAngeles", "California.SanDiego", "California.SanJose", "California.Campbell", "California.SanFrancisco"};
|
||||
float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
|
||||
int[] voltage = {119, 116, 111, 113, 118};
|
||||
float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
|
||||
|
|
|
@ -3,11 +3,11 @@ import time
|
|||
|
||||
class MockDataSource:
|
||||
samples = [
|
||||
"8.8,119,0.32,LosAngeles,0",
|
||||
"10.7,116,0.34,SanDiego,1",
|
||||
"9.9,111,0.33,Hollywood,2",
|
||||
"8.9,113,0.329,Compton,3",
|
||||
"9.4,118,0.141,San Francisco,4"
|
||||
"8.8,119,0.32,California.LosAngeles,0",
|
||||
"10.7,116,0.34,California.SanDiego,1",
|
||||
"9.9,111,0.33,California.SanJose,2",
|
||||
"8.9,113,0.329,California.Campbell,3",
|
||||
"9.4,118,0.141,California.SanFrancisco,4"
|
||||
]
|
||||
|
||||
def __init__(self, tb_name_prefix, table_count):
|
||||
|
|
|
@ -12,7 +12,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
// bind table name and tags
|
||||
stmt.set_tbname_tags(
|
||||
"d1001",
|
||||
&[Value::VarChar("San Fransico".into()), Value::Int(2)],
|
||||
&[Value::VarChar("California.SanFransico".into()), Value::Int(2)],
|
||||
)?;
|
||||
// bind values.
|
||||
let values = vec![
|
||||
|
|
|
@ -19,13 +19,13 @@ struct Record {
|
|||
async fn prepare(taos: Taos) -> anyhow::Result<()> {
|
||||
let inserted = taos.exec_many([
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||
// insert into child table
|
||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||
// insert with NULL values
|
||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||
// insert and automatically create table with tags if not exists
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
// insert many records in a single sql
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||
]).await?;
|
||||
|
@ -48,7 +48,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
format!("CREATE DATABASE `{db}`"),
|
||||
format!("USE `{db}`"),
|
||||
// create super table
|
||||
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"),
|
||||
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
|
||||
// create topic for subscription
|
||||
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
||||
])
|
||||
|
|
|
@ -14,14 +14,14 @@ async fn main() -> anyhow::Result<()> {
|
|||
]).await?;
|
||||
|
||||
let inserted = taos.exec("INSERT INTO
|
||||
power.d1001 USING power.meters TAGS('San Francisco', 2)
|
||||
power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
|
||||
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
|
||||
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
power.d1002 USING power.meters TAGS('San Francisco', 3)
|
||||
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
|
||||
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
power.d1003 USING power.meters TAGS('Los Angeles', 2)
|
||||
power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
|
||||
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
power.d1004 USING power.meters TAGS('Los Angeles', 3)
|
||||
power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
|
||||
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?;
|
||||
|
||||
assert_eq!(inserted, 8);
|
||||
|
|
|
@ -52,7 +52,7 @@ taos>
|
|||
$ taosBenchmark
|
||||
```
|
||||
|
||||
该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。
|
||||
该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。
|
||||
|
||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||
|
||||
|
@ -74,10 +74,10 @@ SELECT COUNT(*) FROM test.meters;
|
|||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
查询 location = "San Francisco" 的记录总条数:
|
||||
查询 location = "California.SanFrancisco" 的记录总条数:
|
||||
|
||||
```sql
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||
```
|
||||
|
||||
查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
|
||||
|
|
|
@ -223,7 +223,7 @@ Query OK, 2 row(s) in set (0.003128s)
|
|||
$ taosBenchmark
|
||||
```
|
||||
|
||||
该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。
|
||||
该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。
|
||||
|
||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||
|
||||
|
@ -245,10 +245,10 @@ SELECT COUNT(*) FROM test.meters;
|
|||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
查询 location = "San Francisco" 的记录总条数:
|
||||
查询 location = "California.SanFrancisco" 的记录总条数:
|
||||
|
||||
```sql
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "Calaifornia.SanFrancisco";
|
||||
```
|
||||
|
||||
查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
|
||||
|
|
|
@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
|||
let inserted = taos.exec_many([
|
||||
// create super table
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||
TAGS (`groupid` INT, `location` BINARY(16))",
|
||||
TAGS (`groupid` INT, `location` BINARY(24))",
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||
// insert into child table
|
||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||
// insert with NULL values
|
||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||
// insert and automatically create table with tags if not exists
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
// insert many records in a single sql
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||
]).await?;
|
||||
|
|
|
@ -918,7 +918,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**返回数据类型**:同应用的字段。
|
||||
|
||||
**适用数据类型**:数值类型,时间戳类型。
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
@ -933,7 +933,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**返回数据类型**:同应用的字段。
|
||||
|
||||
**适用数据类型**:数值类型,时间戳类型。
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ description: TDengine 保留关键字的详细列表
|
|||
|
||||
## 保留关键字
|
||||
|
||||
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写如果需要用作库名、表名、STable 名、数据列名及标签列名等,需要使用符合``将关键字括起来使用,例如`ADD`。
|
||||
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写如果需要用作库名、表名、STable 名、数据列名及标签列名等,需要使用符号 `` ` `` 将关键字括起来使用,例如 \`ADD\`。
|
||||
关键字列表如下:
|
||||
|
||||
### A
|
||||
|
|
|
@ -26,7 +26,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
|||
|
||||
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
|
||||
|
||||
**弹性计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。
|
||||
**计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。当一个查询执行时,依赖执行计划,调度器会安排一个或多个 qnode 来一起执行。qnode 能从 vnode 获取数据,也可以将自己的计算结果发给其他 qnode 做进一步的处理。通过引入独立的计算节点,TDengine 实现了存储和计算分离。
|
||||
|
||||
**流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。
|
||||
|
||||
|
|
|
@ -45,8 +45,8 @@ enum {
|
|||
// clang-format on
|
||||
|
||||
typedef struct {
|
||||
TSKEY ts;
|
||||
uint64_t groupId;
|
||||
TSKEY ts;
|
||||
} SWinKey;
|
||||
|
||||
static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||
|
@ -68,6 +68,37 @@ static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, i
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
uint64_t groupId;
|
||||
TSKEY ts;
|
||||
int32_t exprIdx;
|
||||
} STupleKey;
|
||||
|
||||
static inline int STupleKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||
STupleKey* pTuple1 = (STupleKey*)pKey1;
|
||||
STupleKey* pTuple2 = (STupleKey*)pKey2;
|
||||
|
||||
if (pTuple1->groupId > pTuple2->groupId) {
|
||||
return 1;
|
||||
} else if (pTuple1->groupId < pTuple2->groupId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pTuple1->ts > pTuple2->ts) {
|
||||
return 1;
|
||||
} else if (pTuple1->ts < pTuple2->ts) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pTuple1->exprIdx > pTuple2->exprIdx) {
|
||||
return 1;
|
||||
} else if (pTuple1->exprIdx < pTuple2->exprIdx) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
TMQ_MSG_TYPE__DUMMY = 0,
|
||||
TMQ_MSG_TYPE__POLL_RSP,
|
||||
|
|
|
@ -83,7 +83,10 @@ enum {
|
|||
|
||||
typedef struct SPoint1 {
|
||||
int64_t key;
|
||||
union{double val; char* ptr;};
|
||||
union {
|
||||
double val;
|
||||
char *ptr;
|
||||
};
|
||||
} SPoint1;
|
||||
|
||||
struct SqlFunctionCtx;
|
||||
|
@ -123,6 +126,7 @@ typedef struct SInputColumnInfoData {
|
|||
typedef struct SSerializeDataHandle {
|
||||
struct SDiskbasedBuf *pBuf;
|
||||
int32_t currentPage;
|
||||
void *pState;
|
||||
} SSerializeDataHandle;
|
||||
|
||||
// sql function runtime context
|
||||
|
@ -192,7 +196,8 @@ typedef struct SPoint {
|
|||
void *val;
|
||||
} SPoint;
|
||||
|
||||
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
|
||||
int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2,
|
||||
int32_t inputType);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// udf api
|
||||
|
|
|
@ -52,10 +52,14 @@ int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstrea
|
|||
|
||||
void qClearSubplanExecutionNode(SSubplan* pSubplan);
|
||||
|
||||
// Convert to subplan to string for the scheduler to send to the executor
|
||||
// Convert to subplan to display string for the scheduler to send to the executor
|
||||
int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
|
||||
int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan);
|
||||
|
||||
// Convert to subplan to msg for the scheduler to send to the executor
|
||||
int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
|
||||
int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan);
|
||||
|
||||
char* qQueryPlanToString(const SQueryPlan* pPlan);
|
||||
SQueryPlan* qStringToQueryPlan(const char* pStr);
|
||||
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tdatablock.h"
|
||||
#include "tdbInt.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef _STREAM_STATE_H_
|
||||
#define _STREAM_STATE_H_
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
|
||||
// incremental state storage
|
||||
typedef struct {
|
||||
SStreamTask* pOwner;
|
||||
TDB* db;
|
||||
TTB* pStateDb;
|
||||
TTB* pFuncStateDb;
|
||||
TXN txn;
|
||||
} SStreamState;
|
||||
|
||||
SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
|
||||
void streamStateClose(SStreamState* pState);
|
||||
int32_t streamStateBegin(SStreamState* pState);
|
||||
int32_t streamStateCommit(SStreamState* pState);
|
||||
int32_t streamStateAbort(SStreamState* pState);
|
||||
|
||||
typedef struct {
|
||||
TBC* pCur;
|
||||
} SStreamStateCur;
|
||||
|
||||
#if 1
|
||||
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
|
||||
|
||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||
void streamFreeVal(void* val);
|
||||
|
||||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
||||
void streamStateFreeCur(SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ifndef _STREAM_STATE_H_ */
|
|
@ -16,6 +16,7 @@
|
|||
#include "executor.h"
|
||||
#include "os.h"
|
||||
#include "query.h"
|
||||
#include "streamState.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tdbInt.h"
|
||||
#include "tmsg.h"
|
||||
|
@ -263,14 +264,6 @@ typedef struct {
|
|||
SArray* checkpointVer;
|
||||
} SStreamRecoveringState;
|
||||
|
||||
// incremental state storage
|
||||
typedef struct {
|
||||
SStreamTask* pOwner;
|
||||
TDB* db;
|
||||
TTB* pStateDb;
|
||||
TXN txn;
|
||||
} SStreamState;
|
||||
|
||||
typedef struct SStreamTask {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
|
@ -540,39 +533,6 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
|
|||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||
int32_t streamLoadTasks(SStreamMeta* pMeta);
|
||||
|
||||
SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
|
||||
void streamStateClose(SStreamState* pState);
|
||||
int32_t streamStateBegin(SStreamState* pState);
|
||||
int32_t streamStateCommit(SStreamState* pState);
|
||||
int32_t streamStateAbort(SStreamState* pState);
|
||||
|
||||
typedef struct {
|
||||
TBC* pCur;
|
||||
} SStreamStateCur;
|
||||
|
||||
#if 1
|
||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||
void streamFreeVal(void* val);
|
||||
|
||||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
|
||||
void streamStateFreeCur(SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -285,6 +285,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
|
||||
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
|
||||
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
|
||||
#define TSDB_CODE_MND_IN_REBALANCE TAOS_DEF_ERROR_CODE(0, 0x03EF)
|
||||
|
||||
// mnode-stream
|
||||
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
|
||||
|
@ -577,6 +578,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
|
||||
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
|
||||
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
|
||||
#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805)
|
||||
|
||||
//udf
|
||||
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
|
||||
|
|
|
@ -31,7 +31,6 @@ typedef struct SSchedMsg {
|
|||
void *thandle;
|
||||
} SSchedMsg;
|
||||
|
||||
|
||||
typedef struct {
|
||||
char label[TSDB_LABEL_LEN];
|
||||
tsem_t emptySem;
|
||||
|
@ -48,7 +47,6 @@ typedef struct {
|
|||
void *pTimer;
|
||||
} SSchedQueue;
|
||||
|
||||
|
||||
/**
|
||||
* Create a thread-safe ring-buffer based task queue and return the instance. A thread
|
||||
* pool will be created to consume the messages in the queue.
|
||||
|
@ -83,7 +81,7 @@ void taosCleanUpScheduler(void *queueScheduler);
|
|||
* @param queueScheduler the queue scheduler instance
|
||||
* @param pMsg the message for the task
|
||||
*/
|
||||
void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
|
||||
int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -840,14 +840,20 @@ function updateProduct() {
|
|||
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
||||
echo -e "${GREEN_DARK}To configure Adapter (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml"
|
||||
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}"
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||
fi
|
||||
|
||||
if [ ${openresty_work} = 'true' ]; then
|
||||
|
@ -926,14 +932,20 @@ function installProduct() {
|
|||
# Ask if to start the service
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
|
||||
echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml"
|
||||
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}"
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||
fi
|
||||
|
||||
if [ ! -z "$firstEp" ]; then
|
||||
|
|
|
@ -609,14 +609,20 @@ function update_TDengine() {
|
|||
echo
|
||||
|
||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
||||
echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}"
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
|
||||
|
@ -649,14 +655,20 @@ function install_TDengine() {
|
|||
echo -e "\033[44;32;1m${productName} is installed successfully!${NC}"
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
||||
echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}"
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
|
||||
|
|
|
@ -414,6 +414,9 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
|||
int32_t code = hbBuildQueryDesc(hbBasic, pTscObj);
|
||||
if (code) {
|
||||
releaseTscObj(connKey->tscRid);
|
||||
if (hbBasic->queryDesc) {
|
||||
taosArrayDestroyEx(hbBasic->queryDesc, tFreeClientHbQueryDesc);
|
||||
}
|
||||
taosMemoryFree(hbBasic);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -854,6 +854,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
pRequest->metric.resultReady = taosGetTimestampUs();
|
||||
|
||||
if (pResult) {
|
||||
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
|
||||
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
|
||||
}
|
||||
|
||||
|
@ -1384,6 +1385,7 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
pSendInfo->fp(pSendInfo->param, &buf, pMsg->code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
destroySendMsgInfo(pSendInfo);
|
||||
|
||||
taosMemoryFree(arg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1399,7 +1401,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
arg->msg = *pMsg;
|
||||
arg->pEpset = tEpSet;
|
||||
|
||||
taosAsyncExec(doProcessMsgFromServer, arg, NULL);
|
||||
if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) {
|
||||
tscError("failed to sched msg to tsc, tsc ready to quit");
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosMemoryFree(arg->pEpset);
|
||||
taosMemoryFree(arg);
|
||||
}
|
||||
}
|
||||
|
||||
TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) {
|
||||
|
|
|
@ -870,11 +870,13 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
|
|||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pRequest->code = code;
|
||||
taosMemoryFreeClear(pResultInfo->pData);
|
||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFreeClear(pResultInfo->pData);
|
||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -3347,7 +3347,13 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
|
||||
void tFreeSTableMetaRsp(void *pRsp) {
|
||||
if (NULL == pRsp) {
|
||||
return;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas);
|
||||
}
|
||||
|
||||
void tFreeSTableIndexRsp(void *info) {
|
||||
if (NULL == info) {
|
||||
|
@ -4723,9 +4729,8 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
|
|||
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
|
||||
if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1;
|
||||
if (tEncodeU32(&encoder, pReq->phyLen) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->msg) < 0) return -1;
|
||||
if (tEncodeBinary(&encoder, pReq->msg, pReq->phyLen) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
|
@ -4755,13 +4760,12 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
|
|||
if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1;
|
||||
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
||||
if (tDecodeU32(&decoder, &pReq->sqlLen) < 0) return -1;
|
||||
if (tDecodeU32(&decoder, &pReq->phyLen) < 0) return -1;
|
||||
pReq->sql = taosMemoryCalloc(1, pReq->sqlLen + 1);
|
||||
if (NULL == pReq->sql) return -1;
|
||||
pReq->msg = taosMemoryCalloc(1, pReq->phyLen + 1);
|
||||
if (NULL == pReq->msg) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->msg) < 0) return -1;
|
||||
uint64_t msgLen = 0;
|
||||
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, &msgLen) < 0) return -1;
|
||||
pReq->phyLen = msgLen;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
|
@ -5441,6 +5445,8 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
|
|||
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
|
||||
SSubmitBlkRsp *sRsp = pRsp->pBlocks + i;
|
||||
taosMemoryFree(sRsp->tblFName);
|
||||
tFreeSTableMetaRsp(sRsp->pMeta);
|
||||
taosMemoryFree(sRsp->pMeta);
|
||||
}
|
||||
|
||||
taosMemoryFree(pRsp->pBlocks);
|
||||
|
|
|
@ -900,6 +900,7 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName)
|
|||
// iter all vnode to delete handle
|
||||
if (taosHashGetSize(pSub->consumerHash) != 0) {
|
||||
sdbRelease(pSdb, pSub);
|
||||
terrno = TSDB_CODE_MND_IN_REBALANCE;
|
||||
return -1;
|
||||
}
|
||||
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
|
||||
|
|
|
@ -713,7 +713,6 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
|
|||
mndReleaseTopic(pMnode, pTopic);
|
||||
|
||||
if (code != 0) {
|
||||
terrno = code;
|
||||
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -99,7 +99,16 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
|
|||
}
|
||||
|
||||
void ctgFreeQNode(SCtgQNode *node) {
|
||||
//TODO
|
||||
if (NULL == node) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->op) {
|
||||
taosMemoryFree(node->op->data);
|
||||
taosMemoryFree(node->op);
|
||||
}
|
||||
|
||||
taosMemoryFree(node);
|
||||
}
|
||||
|
||||
void ctgFreeSTableIndex(void *info) {
|
||||
|
|
|
@ -88,6 +88,7 @@ struct SqlFunctionCtx;
|
|||
size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
|
||||
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
|
||||
void closeResultRow(SResultRow* pResultRow);
|
||||
void resetResultRow(SResultRow* pResultRow, size_t entrySize);
|
||||
|
||||
struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
|
||||
|
||||
|
|
|
@ -585,11 +585,12 @@ typedef struct SIntervalAggOperatorInfo {
|
|||
typedef struct SMergeAlignedIntervalAggOperatorInfo {
|
||||
SIntervalAggOperatorInfo* intervalAggOperatorInfo;
|
||||
|
||||
bool hasGroupId;
|
||||
// bool hasGroupId;
|
||||
uint64_t groupId; // current groupId
|
||||
int64_t curTs; // current ts
|
||||
SSDataBlock* prefetchedBlock;
|
||||
SNode* pCondition;
|
||||
SResultRow* pResultRow;
|
||||
} SMergeAlignedIntervalAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamIntervalOperatorInfo {
|
||||
|
@ -649,7 +650,6 @@ typedef struct SAggOperatorInfo {
|
|||
} SAggOperatorInfo;
|
||||
|
||||
typedef struct SProjectOperatorInfo {
|
||||
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SNode* pFilterNode; // filter info, which is push down by optimizer
|
||||
|
@ -691,7 +691,6 @@ typedef struct SFillOperatorInfo {
|
|||
} SFillOperatorInfo;
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
|
||||
|
@ -738,7 +737,6 @@ typedef struct SWindowRowsSup {
|
|||
} SWindowRowsSup;
|
||||
|
||||
typedef struct SSessionAggOperatorInfo {
|
||||
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
|
||||
|
@ -827,7 +825,6 @@ typedef struct SStateWindowOperatorInfo {
|
|||
SStateKeys stateKey;
|
||||
int32_t tsSlotId; // primary timestamp column slot id
|
||||
STimeWindowAggSupp twAggSup;
|
||||
// bool reptScan;
|
||||
const SNode* pCondition;
|
||||
} SStateWindowOperatorInfo;
|
||||
|
||||
|
@ -848,24 +845,6 @@ typedef struct SStreamStateAggOperatorInfo {
|
|||
bool ignoreExpiredData;
|
||||
} SStreamStateAggOperatorInfo;
|
||||
|
||||
typedef struct SSortedMergeOperatorInfo {
|
||||
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
|
||||
SArray* pSortInfo;
|
||||
int32_t numOfSources;
|
||||
SSortHandle* pSortHandle;
|
||||
int32_t bufPageSize;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
int32_t resultRowFactor;
|
||||
bool hasGroupVal;
|
||||
SDiskbasedBuf* pTupleStore; // keep the final results
|
||||
int32_t numOfResPerPage;
|
||||
char** groupVal;
|
||||
SArray* groupInfo;
|
||||
} SSortedMergeOperatorInfo;
|
||||
|
||||
typedef struct SSortOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
|
@ -873,7 +852,6 @@ typedef struct SSortOperatorInfo {
|
|||
SSortHandle* pSortHandle;
|
||||
SArray* pColMatchInfo; // for index map from table scan output
|
||||
int32_t bufPageSize;
|
||||
|
||||
int64_t startTs; // sort start time
|
||||
uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
|
||||
SLimitInfo limitInfo;
|
||||
|
@ -909,7 +887,6 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
|
|||
__optr_decode_fn_t decode, __optr_explain_fn_t explain);
|
||||
|
||||
int32_t operatorDummyOpenFn(SOperatorInfo* pOperator);
|
||||
void operatorDummyCloseFn(void* param, int32_t numOfCols);
|
||||
int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num);
|
||||
|
||||
void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock);
|
||||
|
@ -944,7 +921,6 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int
|
|||
SSDataBlock* pBlock, const char* idStr);
|
||||
|
||||
void cleanupAggSup(SAggSupporter* pAggSup);
|
||||
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
|
||||
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
|
||||
void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
|
||||
|
||||
|
@ -1091,10 +1067,8 @@ void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEn
|
|||
void printDataBlock(SSDataBlock* pBlock, const char* flag);
|
||||
uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
|
||||
|
||||
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
|
||||
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs,
|
||||
const int32_t* rowCellOffset, SSDataBlock* pBlock,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
|
||||
SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
|
||||
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
|
|
|
@ -33,6 +33,17 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) {
|
|||
|
||||
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
|
||||
|
||||
void resetResultRow(SResultRow* pResultRow, size_t entrySize) {
|
||||
pResultRow->numOfRows = 0;
|
||||
pResultRow->closed = false;
|
||||
pResultRow->endInterp = false;
|
||||
pResultRow->startInterp = false;
|
||||
|
||||
if (entrySize > 0) {
|
||||
memset(pResultRow->pEntryInfo, 0, entrySize);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO refactor: use macro
|
||||
SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset) {
|
||||
assert(index >= 0 && offset != NULL);
|
||||
|
@ -799,9 +810,15 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
|
|||
taosMemoryFreeClear(pColInfoData);
|
||||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(res); i++) {
|
||||
size_t numOfTables = taosArrayGetSize(res);
|
||||
for (int i = 0; i < numOfTables; i++) {
|
||||
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
void* p = taosArrayPush(pListInfo->pTableList, &info);
|
||||
if (p == NULL) {
|
||||
taosArrayDestroy(res);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
qDebug("tagfilter get uid:%ld", info.uid);
|
||||
}
|
||||
|
||||
|
|
|
@ -132,8 +132,6 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
|
|||
return fpSet;
|
||||
}
|
||||
|
||||
void operatorDummyCloseFn(void* param, int32_t numOfCols) {}
|
||||
|
||||
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo);
|
||||
|
||||
|
@ -1269,33 +1267,12 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu
|
|||
}
|
||||
}
|
||||
|
||||
// todo extract method with copytoSSDataBlock
|
||||
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
|
||||
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs,
|
||||
const int32_t* rowCellOffset, SSDataBlock* pBlock,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId);
|
||||
SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset);
|
||||
|
||||
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
|
||||
if (pRow->numOfRows == 0) {
|
||||
releaseBufPage(pBuf, page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
|
||||
int32_t code = blockDataEnsureCapacity(pBlock, pBlock->info.capacity * 1.25);
|
||||
if (TAOS_FAILED(code)) {
|
||||
releaseBufPage(pBuf, page);
|
||||
qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
}
|
||||
|
||||
static void doCopyResultToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx,
|
||||
SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) {
|
||||
for (int32_t j = 0; j < numOfExprs; ++j) {
|
||||
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
|
||||
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
|
||||
if (pCtx[j].fpSet.finalize) {
|
||||
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
|
||||
if (TAOS_FAILED(code)) {
|
||||
|
@ -1303,7 +1280,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
|
|||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
|
||||
// do nothing, todo refactor
|
||||
// do nothing
|
||||
} else {
|
||||
// expand the result into multiple rows. E.g., _wstart, top(k, 20)
|
||||
// the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
|
||||
|
@ -1314,10 +1291,40 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// todo refactor. SResultRow has direct pointer in miainfo
|
||||
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
|
||||
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
|
||||
SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId);
|
||||
SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset);
|
||||
|
||||
SqlFunctionCtx* pCtx = pSup->pCtx;
|
||||
SExprInfo* pExprInfo = pSup->pExprInfo;
|
||||
const int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
|
||||
|
||||
doUpdateNumOfRows(pCtx, pRow, pSup->numOfExprs, rowEntryOffset);
|
||||
if (pRow->numOfRows == 0) {
|
||||
releaseBufPage(pBuf, page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t size = pBlock->info.capacity;
|
||||
while (pBlock->info.rows + pRow->numOfRows > size) {
|
||||
size = size * 1.25;
|
||||
}
|
||||
|
||||
int32_t code = blockDataEnsureCapacity(pBlock, size);
|
||||
if (TAOS_FAILED(code)) {
|
||||
releaseBufPage(pBuf, page);
|
||||
qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
doCopyResultToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo);
|
||||
|
||||
releaseBufPage(pBuf, page);
|
||||
pBlock->info.rows += pRow->numOfRows;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1362,32 +1369,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
|
|||
}
|
||||
|
||||
pGroupResInfo->index += 1;
|
||||
|
||||
for (int32_t j = 0; j < numOfExprs; ++j) {
|
||||
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
|
||||
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
|
||||
if (pCtx[j].fpSet.finalize) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("\npage_finalize %d", numOfExprs);
|
||||
#endif
|
||||
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
|
||||
if (TAOS_FAILED(code)) {
|
||||
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
|
||||
// do nothing, todo refactor
|
||||
} else {
|
||||
// expand the result into multiple rows. E.g., _wstart, top(k, 20)
|
||||
// the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
|
||||
char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
|
||||
for (int32_t k = 0; k < pRow->numOfRows; ++k) {
|
||||
colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
|
||||
}
|
||||
}
|
||||
}
|
||||
doCopyResultToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo);
|
||||
|
||||
releaseBufPage(pBuf, page);
|
||||
pBlock->info.rows += pRow->numOfRows;
|
||||
|
@ -1727,22 +1709,6 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
|
|||
|
||||
static void doDestroyTableList(STableListInfo* pTableqinfoList);
|
||||
|
||||
static void doTableQueryInfoTimeWindowCheck(SExecTaskInfo* pTaskInfo, STableQueryInfo* pTableQueryInfo, int32_t order) {
|
||||
#if 0
|
||||
if (order == TSDB_ORDER_ASC) {
|
||||
assert(
|
||||
(pTableQueryInfo->win.skey <= pTableQueryInfo->win.ekey) &&
|
||||
(pTableQueryInfo->lastKey >= pTaskInfo->window.skey) &&
|
||||
(pTableQueryInfo->win.skey >= pTaskInfo->window.skey && pTableQueryInfo->win.ekey <= pTaskInfo->window.ekey));
|
||||
} else {
|
||||
assert(
|
||||
(pTableQueryInfo->win.skey >= pTableQueryInfo->win.ekey) &&
|
||||
(pTableQueryInfo->lastKey <= pTaskInfo->window.skey) &&
|
||||
(pTableQueryInfo->win.skey <= pTaskInfo->window.skey && pTableQueryInfo->win.ekey >= pTaskInfo->window.ekey));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
typedef struct SFetchRspHandleWrapper {
|
||||
uint32_t exchangeId;
|
||||
int32_t sourceIndex;
|
||||
|
@ -2307,21 +2273,6 @@ _error:
|
|||
static int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
|
||||
const char* pKey);
|
||||
|
||||
static void destroySortedMergeOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SSortedMergeOperatorInfo* pInfo = (SSortedMergeOperatorInfo*)param;
|
||||
taosArrayDestroy(pInfo->pSortInfo);
|
||||
taosArrayDestroy(pInfo->groupInfo);
|
||||
|
||||
if (pInfo->pSortHandle != NULL) {
|
||||
tsortDestroySortHandle(pInfo->pSortHandle);
|
||||
}
|
||||
|
||||
blockDataDestroy(pInfo->binfo.pRes);
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int32_t rowIndex) {
|
||||
size_t size = taosArrayGetSize(groupInfo);
|
||||
if (size == 0) {
|
||||
|
@ -2357,41 +2308,6 @@ static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int3
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void doMergeResultImpl(SSortedMergeOperatorInfo* pInfo, SqlFunctionCtx* pCtx, int32_t numOfExpr,
|
||||
int32_t rowIndex) {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) { // TODO set row index
|
||||
// pCtx[j].startRow = rowIndex;
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
// pCtx[j].fpSet->addInput(&pCtx[j]);
|
||||
|
||||
// if (functionId < 0) {
|
||||
// SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
// doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
|
||||
// } else {
|
||||
// assert(!TSDB_FUNC_IS_SCALAR(functionId));
|
||||
// aAggs[functionId].mergeFunc(&pCtx[j]);
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
static void doFinalizeResultImpl(SqlFunctionCtx* pCtx, int32_t numOfExpr) {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
// if (functionId == FUNC_TAG_DUMMY || functionId == FUNC_TS_DUMMY) {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// if (functionId < 0) {
|
||||
// SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
// doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
|
||||
// } else {
|
||||
// pCtx[j].fpSet.finalize(&pCtx[j]);
|
||||
}
|
||||
}
|
||||
|
||||
static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex) {
|
||||
int32_t size = (int32_t)taosArrayGetSize(pColumnList);
|
||||
|
||||
|
@ -2406,210 +2322,6 @@ static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock
|
|||
return true;
|
||||
}
|
||||
|
||||
static void doMergeImpl(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
|
||||
SSortedMergeOperatorInfo* pInfo = pOperator->info;
|
||||
|
||||
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
||||
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
if (!pInfo->hasGroupVal) {
|
||||
ASSERT(i == 0);
|
||||
doMergeResultImpl(pInfo, pCtx, numOfExpr, i);
|
||||
pInfo->hasGroupVal = saveCurrentTuple(pInfo->groupVal, pInfo->groupInfo, pBlock, i);
|
||||
} else {
|
||||
if (needToMerge(pBlock, pInfo->groupInfo, pInfo->groupVal, i)) {
|
||||
doMergeResultImpl(pInfo, pCtx, numOfExpr, i);
|
||||
} else {
|
||||
doFinalizeResultImpl(pCtx, numOfExpr);
|
||||
int32_t numOfRows = getNumOfResult(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL);
|
||||
// setTagValueForMultipleRows(pCtx, pOperator->exprSupp.numOfExprs, numOfRows);
|
||||
|
||||
// TODO check for available buffer;
|
||||
|
||||
// next group info data
|
||||
pInfo->binfo.pRes->info.rows += numOfRows;
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
if (pCtx[j].functionId < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
pCtx[j].fpSet.process(&pCtx[j]);
|
||||
}
|
||||
|
||||
doMergeResultImpl(pInfo, pCtx, numOfExpr, i);
|
||||
pInfo->hasGroupVal = saveCurrentTuple(pInfo->groupVal, pInfo->groupInfo, pBlock, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doMerge(SOperatorInfo* pOperator) {
|
||||
SSortedMergeOperatorInfo* pInfo = pOperator->info;
|
||||
SSortHandle* pHandle = pInfo->pSortHandle;
|
||||
|
||||
SSDataBlock* pDataBlock = createOneDataBlock(pInfo->binfo.pRes, false);
|
||||
blockDataEnsureCapacity(pDataBlock, pOperator->resultInfo.capacity);
|
||||
|
||||
while (1) {
|
||||
blockDataCleanup(pDataBlock);
|
||||
while (1) {
|
||||
STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
|
||||
if (pTupleHandle == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
// build datablock for merge for one group
|
||||
appendOneRowToDataBlock(pDataBlock, pTupleHandle);
|
||||
if (pDataBlock->info.rows >= pOperator->resultInfo.capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pDataBlock->info.rows == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
setInputDataBlock(pOperator, pOperator->exprSupp.pCtx, pDataBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
|
||||
// updateOutputBuf(&pInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor,
|
||||
// pOperator->pRuntimeEnv, true);
|
||||
doMergeImpl(pOperator, pOperator->exprSupp.numOfExprs, pDataBlock);
|
||||
// flush to tuple store, and after all data have been handled, return to upstream node or sink node
|
||||
}
|
||||
|
||||
doFinalizeResultImpl(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs);
|
||||
int32_t numOfRows = getNumOfResult(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL);
|
||||
// setTagValueForMultipleRows(pCtx, pOperator->exprSupp.numOfExprs, numOfRows);
|
||||
|
||||
// TODO check for available buffer;
|
||||
|
||||
// next group info data
|
||||
pInfo->binfo.pRes->info.rows += numOfRows;
|
||||
return (pInfo->binfo.pRes->info.rows > 0) ? pInfo->binfo.pRes : NULL;
|
||||
}
|
||||
|
||||
SSDataBlock* getSortedMergeBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity,
|
||||
SArray* pColMatchInfo, SSortedMergeOperatorInfo* pInfo) {
|
||||
blockDataCleanup(pDataBlock);
|
||||
|
||||
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blockDataEnsureCapacity(p, capacity);
|
||||
|
||||
while (1) {
|
||||
STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
|
||||
if (pTupleHandle == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
if (p->info.rows >= capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (p->info.rows > 0) {
|
||||
int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i);
|
||||
ASSERT(pmInfo->matchType == COL_MATCH_FROM_SLOT_ID);
|
||||
|
||||
SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId);
|
||||
SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->targetSlotId);
|
||||
colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info);
|
||||
}
|
||||
|
||||
pDataBlock->info.rows = p->info.rows;
|
||||
pDataBlock->info.capacity = p->info.rows;
|
||||
}
|
||||
|
||||
blockDataDestroy(p);
|
||||
return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SSortedMergeOperatorInfo* pInfo = pOperator->info;
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
return getSortedMergeBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, NULL, pInfo);
|
||||
}
|
||||
|
||||
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
|
||||
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
|
||||
pInfo->binfo.pRes, "GET_TASKID(pTaskInfo)");
|
||||
|
||||
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL);
|
||||
|
||||
for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
|
||||
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
|
||||
ps->param = pOperator->pDownstream[i];
|
||||
tsortAddSource(pInfo->pSortHandle, ps);
|
||||
}
|
||||
|
||||
int32_t code = tsortOpen(pInfo->pSortHandle);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||
}
|
||||
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
return doMerge(pOperator);
|
||||
}
|
||||
|
||||
static int32_t initGroupCol(SExprInfo* pExprInfo, int32_t numOfCols, SArray* pGroupInfo,
|
||||
SSortedMergeOperatorInfo* pInfo) {
|
||||
if (pGroupInfo == NULL || taosArrayGetSize(pGroupInfo) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t len = 0;
|
||||
SArray* plist = taosArrayInit(3, sizeof(SColumn));
|
||||
pInfo->groupInfo = taosArrayInit(3, sizeof(int32_t));
|
||||
|
||||
if (plist == NULL || pInfo->groupInfo == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
size_t numOfGroupCol = taosArrayGetSize(pInfo->groupInfo);
|
||||
for (int32_t i = 0; i < numOfGroupCol; ++i) {
|
||||
SColumn* pCol = taosArrayGet(pGroupInfo, i);
|
||||
for (int32_t j = 0; j < numOfCols; ++j) {
|
||||
SExprInfo* pe = &pExprInfo[j];
|
||||
if (pe->base.resSchema.slotId == pCol->colId) {
|
||||
taosArrayPush(plist, pCol);
|
||||
taosArrayPush(pInfo->groupInfo, &j);
|
||||
len += pCol->bytes;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(taosArrayGetSize(pGroupInfo) == taosArrayGetSize(plist));
|
||||
|
||||
pInfo->groupVal = taosMemoryCalloc(1, (POINTER_BYTES * numOfGroupCol + len));
|
||||
if (pInfo->groupVal == NULL) {
|
||||
taosArrayDestroy(plist);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
int32_t offset = 0;
|
||||
char* start = (char*)(pInfo->groupVal + (POINTER_BYTES * numOfGroupCol));
|
||||
for (int32_t i = 0; i < numOfGroupCol; ++i) {
|
||||
pInfo->groupVal[i] = start + offset;
|
||||
SColumn* pCol = taosArrayGet(plist, i);
|
||||
offset += pCol->bytes;
|
||||
}
|
||||
|
||||
taosArrayDestroy(plist);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) {
|
||||
// todo add more information about exchange operation
|
||||
int32_t type = pOperator->operatorType;
|
||||
|
@ -3342,13 +3054,6 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
|
|||
pInfo->pRes = blockDataDestroy(pInfo->pRes);
|
||||
}
|
||||
|
||||
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)param;
|
||||
cleanupBasicInfo(pInfo);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
static void freeItem(void* pItem) {
|
||||
void** p = pItem;
|
||||
if (*p != NULL) {
|
||||
|
@ -3855,7 +3560,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode;
|
||||
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pTaskInfo->code = terrno;
|
||||
pTaskInfo->code = code;
|
||||
qError("failed to getTableList, code: %s", tstrerror(code));
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -46,19 +46,6 @@ static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, co
|
|||
uint64_t groupId);
|
||||
static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult);
|
||||
|
||||
///*
|
||||
// * There are two cases to handle:
|
||||
// *
|
||||
// * 1. Query range is not set yet (queryRangeSet = 0). we need to set the query range info, including
|
||||
// * pQueryAttr->lastKey, pQueryAttr->window.skey, and pQueryAttr->eKey.
|
||||
// * 2. Query range is set and query is in progress. There may be another result with the same query ranges to be
|
||||
// * merged during merge stage. In this case, we need the pTableQueryInfo->lastResRows to decide if there
|
||||
// * is a previous result generated or not.
|
||||
// */
|
||||
// static void setIntervalQueryRange(STableQueryInfo* pTableQueryInfo, TSKEY key, STimeWindow* pQRange) {
|
||||
// // do nothing
|
||||
//}
|
||||
|
||||
static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) { return tsCols == NULL ? win->skey : tsCols[0]; }
|
||||
|
||||
static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindow* win, bool masterscan,
|
||||
|
@ -3011,9 +2998,9 @@ static void addRetriveWindow(SArray* wins, SStreamFinalIntervalOperatorInfo* pIn
|
|||
SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId};
|
||||
// add pull data request
|
||||
savePullWindow(&pull, pInfo->pPullWins);
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, winKey, size);
|
||||
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size);
|
||||
int32_t size1 = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, winKey, size1);
|
||||
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4895,72 +4882,65 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void destroyMergeAlignedIntervalOperatorInfo(void* param) {
|
||||
void destroyMAIOperatorInfo(void* param) {
|
||||
SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param;
|
||||
destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId,
|
||||
SSDataBlock* pResultBlock, TSKEY wstartTs) {
|
||||
SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
|
||||
static SResultRow* doSetSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, SAggSupporter* pSup) {
|
||||
SResultRow* pResult = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
|
||||
pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
|
||||
return pResult;
|
||||
}
|
||||
|
||||
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
|
||||
SExprSupp* pSup = &pOperatorInfo->exprSupp;
|
||||
|
||||
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId);
|
||||
SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(
|
||||
iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
|
||||
ASSERT(p1 != NULL);
|
||||
|
||||
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs,
|
||||
pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
|
||||
tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
|
||||
ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
|
||||
static int32_t setSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, STimeWindow* win, SResultRow** pResult,
|
||||
SExprSupp* pExprSup, SAggSupporter* pAggSup) {
|
||||
if (*pResult == NULL) {
|
||||
*pResult = doSetSingleOutputTupleBuf(pResultRowInfo, pAggSup);
|
||||
if (*pResult == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
// set time window for current result
|
||||
(*pResult)->win = (*win);
|
||||
setResultRowInitCtx((*pResult), pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo,
|
||||
SSDataBlock* pBlock, int32_t scanFlag, SSDataBlock* pResultBlock) {
|
||||
SSDataBlock* pBlock, SSDataBlock* pResultBlock) {
|
||||
SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
|
||||
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
|
||||
SExprSupp* pSup = &pOperatorInfo->exprSupp;
|
||||
SInterval* pInterval = &iaInfo->interval;
|
||||
|
||||
int32_t startPos = 0;
|
||||
int32_t numOfOutput = pSup->numOfExprs;
|
||||
int64_t* tsCols = extractTsCol(pBlock, iaInfo);
|
||||
uint64_t tableGroupId = pBlock->info.groupId;
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
|
||||
|
||||
// there is an result exists
|
||||
if (miaInfo->curTs != INT64_MIN) {
|
||||
ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
|
||||
|
||||
if (ts != miaInfo->curTs) {
|
||||
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
|
||||
finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo);
|
||||
resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow));
|
||||
miaInfo->curTs = ts;
|
||||
}
|
||||
} else {
|
||||
miaInfo->curTs = ts;
|
||||
ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
|
||||
}
|
||||
|
||||
STimeWindow win = {0};
|
||||
win.skey = miaInfo->curTs;
|
||||
win.ekey =
|
||||
taosTimeAdd(win.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, iaInfo->interval.precision) - 1;
|
||||
win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
||||
|
||||
// TODO: remove the hash table (groupid + winkey => result row position)
|
||||
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
|
||||
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
|
||||
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
int32_t ret = setSingleOutputTupleBuf(pResultRowInfo, &win, &miaInfo->pResultRow, pSup, &iaInfo->aggSup);
|
||||
if (ret != TSDB_CODE_SUCCESS || miaInfo->pResultRow == NULL) {
|
||||
T_LONG_JMP(pTaskInfo->env, ret);
|
||||
}
|
||||
|
||||
int32_t currPos = startPos;
|
||||
|
@ -4973,21 +4953,19 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
|
|||
|
||||
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
|
||||
doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
|
||||
pBlock->info.rows, numOfOutput);
|
||||
pBlock->info.rows, pSup->numOfExprs);
|
||||
|
||||
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
|
||||
finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo);
|
||||
resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow));
|
||||
miaInfo->curTs = tsCols[currPos];
|
||||
|
||||
currWin.skey = miaInfo->curTs;
|
||||
currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit,
|
||||
iaInfo->interval.precision) -
|
||||
1;
|
||||
currWin.ekey = taosTimeAdd(currWin.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
||||
|
||||
startPos = currPos;
|
||||
ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
|
||||
numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
|
||||
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
ret = setSingleOutputTupleBuf(pResultRowInfo, &win, &miaInfo->pResultRow, pSup, &iaInfo->aggSup);
|
||||
if (ret != TSDB_CODE_SUCCESS || miaInfo->pResultRow == NULL) {
|
||||
T_LONG_JMP(pTaskInfo->env, ret);
|
||||
}
|
||||
|
||||
miaInfo->curTs = currWin.skey;
|
||||
|
@ -4995,68 +4973,79 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
|
|||
|
||||
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
|
||||
doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
|
||||
pBlock->info.rows, numOfOutput);
|
||||
pBlock->info.rows, pSup->numOfExprs);
|
||||
}
|
||||
|
||||
static void cleanupAfterGroupResultGen(SMergeAlignedIntervalAggOperatorInfo* pMiaInfo, SSDataBlock* pRes) {
|
||||
pRes->info.groupId = pMiaInfo->groupId;
|
||||
pMiaInfo->curTs = INT64_MIN;
|
||||
pMiaInfo->groupId = 0;
|
||||
}
|
||||
|
||||
static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperator->info;
|
||||
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
|
||||
SMergeAlignedIntervalAggOperatorInfo* pMiaInfo = pOperator->info;
|
||||
SIntervalAggOperatorInfo* pIaInfo = pMiaInfo->intervalAggOperatorInfo;
|
||||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SSDataBlock* pRes = iaInfo->binfo.pRes;
|
||||
|
||||
SSDataBlock* pRes = pIaInfo->binfo.pRes;
|
||||
SResultRowInfo* pResultRowInfo = &pIaInfo->binfo.resultRowInfo;
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (miaInfo->prefetchedBlock == NULL) {
|
||||
if (pMiaInfo->prefetchedBlock == NULL) {
|
||||
pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
} else {
|
||||
pBlock = miaInfo->prefetchedBlock;
|
||||
miaInfo->prefetchedBlock = NULL;
|
||||
pBlock = pMiaInfo->prefetchedBlock;
|
||||
pMiaInfo->prefetchedBlock = NULL;
|
||||
|
||||
miaInfo->groupId = pBlock->info.groupId;
|
||||
pMiaInfo->groupId = pBlock->info.groupId;
|
||||
}
|
||||
|
||||
// no data exists, all query processing is done
|
||||
if (pBlock == NULL) {
|
||||
// close last unfinalized time window
|
||||
if (miaInfo->curTs != INT64_MIN) {
|
||||
ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
|
||||
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
// close last unclosed time window
|
||||
if (pMiaInfo->curTs != INT64_MIN) {
|
||||
finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo);
|
||||
resetResultRow(pMiaInfo->pResultRow, pIaInfo->aggSup.resultRowSize - sizeof(SResultRow));
|
||||
cleanupAfterGroupResultGen(pMiaInfo, pRes);
|
||||
}
|
||||
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!miaInfo->hasGroupId) {
|
||||
miaInfo->hasGroupId = true;
|
||||
miaInfo->groupId = pBlock->info.groupId;
|
||||
} else if (miaInfo->groupId != pBlock->info.groupId) {
|
||||
if (pMiaInfo->groupId == 0) {
|
||||
if (pMiaInfo->groupId != pBlock->info.groupId) {
|
||||
pMiaInfo->groupId = pBlock->info.groupId;
|
||||
}
|
||||
} else {
|
||||
if (pMiaInfo->groupId != pBlock->info.groupId) {
|
||||
// if there are unclosed time window, close it firstly.
|
||||
ASSERT(miaInfo->curTs != INT64_MIN);
|
||||
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
|
||||
miaInfo->prefetchedBlock = pBlock;
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
ASSERT(pMiaInfo->curTs != INT64_MIN);
|
||||
finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo);
|
||||
resetResultRow(pMiaInfo->pResultRow, pIaInfo->aggSup.resultRowSize - sizeof(SResultRow));
|
||||
|
||||
pMiaInfo->prefetchedBlock = pBlock;
|
||||
cleanupAfterGroupResultGen(pMiaInfo, pRes);
|
||||
break;
|
||||
} else {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
|
||||
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
|
||||
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
|
||||
getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag);
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pIaInfo->inputOrder, scanFlag, true);
|
||||
doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes);
|
||||
|
||||
doFilter(miaInfo->pCondition, pRes, NULL);
|
||||
doFilter(pMiaInfo->pCondition, pRes, NULL);
|
||||
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pRes->info.groupId = miaInfo->groupId;
|
||||
miaInfo->hasGroupId = false;
|
||||
}
|
||||
|
||||
static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
||||
|
@ -5155,7 +5144,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
pOperator->info = miaInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
|
||||
destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL);
|
||||
destroyMAIOperatorInfo, NULL, NULL, NULL);
|
||||
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -5165,7 +5154,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
return pOperator;
|
||||
|
||||
_error:
|
||||
destroyMergeAlignedIntervalOperatorInfo(miaInfo);
|
||||
destroyMAIOperatorInfo(miaInfo);
|
||||
taosMemoryFreeClear(pOperator);
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
|
@ -5208,8 +5197,7 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table
|
|||
SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(
|
||||
iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
|
||||
ASSERT(p1 != NULL);
|
||||
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
|
||||
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
|
||||
// finalizeResultRows(iaInfo->aggSup.pResultBuf, p1, pResultBlock, pTaskInfo);
|
||||
tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -5218,9 +5206,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
|
|||
STimeWindow* newWin) {
|
||||
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
|
||||
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
|
||||
bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
|
||||
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
|
||||
|
||||
SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin};
|
||||
tdListAppend(miaInfo->groupIntervals, &groupTimeWindow);
|
||||
|
@ -5233,9 +5219,10 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
|
|||
if (prevGrpWin->groupId != tableGroupId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
STimeWindow* prevWin = &prevGrpWin->window;
|
||||
if ((ascScan && newWin->skey > prevWin->ekey) || ((!ascScan) && newWin->skey < prevWin->ekey)) {
|
||||
finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
|
||||
// finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
|
||||
tdListPopNode(miaInfo->groupIntervals, listNode);
|
||||
}
|
||||
}
|
||||
|
@ -5395,7 +5382,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
if (listNode != NULL) {
|
||||
SGroupTimeWindow* grpWin = (SGroupTimeWindow*)(listNode->data);
|
||||
finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes);
|
||||
// finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes);
|
||||
pRes->info.groupId = grpWin->groupId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ target_include_directories(
|
|||
|
||||
target_link_libraries(
|
||||
function
|
||||
PRIVATE os util common nodes scalar qcom transport
|
||||
PRIVATE os util common nodes scalar qcom transport stream
|
||||
PUBLIC uv_a
|
||||
)
|
||||
|
||||
|
|
|
@ -311,22 +311,6 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
|
||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||
if (!IS_TIMESTAMP_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
} else if (IS_NULL_TYPE(paraType)) {
|
||||
paraType = TSDB_DATA_TYPE_BIGINT;
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) {
|
||||
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
|
||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
|
@ -2076,7 +2060,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.name = "min",
|
||||
.type = FUNCTION_TYPE_MIN,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
|
||||
.translateFunc = translateMinMax,
|
||||
.translateFunc = translateInOutNum,
|
||||
.dataRequiredFunc = statisDataRequired,
|
||||
.getEnvFunc = getMinmaxFuncEnv,
|
||||
.initFunc = minmaxFunctionSetup,
|
||||
|
@ -2091,7 +2075,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.name = "max",
|
||||
.type = FUNCTION_TYPE_MAX,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
|
||||
.translateFunc = translateMinMax,
|
||||
.translateFunc = translateInOutNum,
|
||||
.dataRequiredFunc = statisDataRequired,
|
||||
.getEnvFunc = getMinmaxFuncEnv,
|
||||
.initFunc = minmaxFunctionSetup,
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "function.h"
|
||||
#include "query.h"
|
||||
#include "querynodes.h"
|
||||
#include "streamState.h"
|
||||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tdigest.h"
|
||||
|
@ -56,8 +57,13 @@ typedef struct SAvgRes {
|
|||
} SAvgRes;
|
||||
|
||||
typedef struct STuplePos {
|
||||
union {
|
||||
struct {
|
||||
int32_t pageId;
|
||||
int32_t offset;
|
||||
};
|
||||
STupleKey streamTupleKey;
|
||||
};
|
||||
} STuplePos;
|
||||
|
||||
typedef struct SMinmaxResInfo {
|
||||
|
@ -1146,7 +1152,8 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock);
|
||||
static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock,
|
||||
const STupleKey* pKey);
|
||||
static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
||||
static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
|
||||
|
||||
|
@ -1201,10 +1208,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
pBuf->v = *(int64_t*)tval;
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
} else {
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) {
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
||||
int64_t prev = 0;
|
||||
GET_TYPED_DATA(prev, int64_t, type, &pBuf->v);
|
||||
|
||||
|
@ -1213,7 +1220,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
*(int64_t*)&pBuf->v = val;
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
}
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
||||
|
@ -1225,7 +1232,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
*(uint64_t*)&pBuf->v = val;
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
}
|
||||
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
|
||||
|
@ -1237,7 +1244,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
*(double*)&pBuf->v = val;
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
}
|
||||
} else if (type == TSDB_DATA_TYPE_FLOAT) {
|
||||
|
@ -1251,7 +1258,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1263,7 +1270,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
int32_t start = pInput->startRowIndex;
|
||||
int32_t numOfRows = pInput->numOfRows;
|
||||
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
|
||||
if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) {
|
||||
int8_t* pData = (int8_t*)pCol->pData;
|
||||
int8_t* val = (int8_t*)&pBuf->v;
|
||||
|
@ -1276,7 +1283,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1307,7 +1314,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1338,7 +1345,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1357,7 +1364,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
|
||||
numOfElems += 1;
|
||||
}
|
||||
} else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
} else if (type == TSDB_DATA_TYPE_BIGINT) {
|
||||
int64_t* pData = (int64_t*)pCol->pData;
|
||||
int64_t* val = (int64_t*)&pBuf->v;
|
||||
|
||||
|
@ -1369,7 +1376,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1402,7 +1409,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1433,7 +1440,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1464,7 +1471,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1495,7 +1502,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1527,7 +1534,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1558,7 +1565,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
if (!pBuf->assign) {
|
||||
*val = pData[i];
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
|
||||
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pBuf->assign = true;
|
||||
} else {
|
||||
|
@ -1581,7 +1588,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
|||
|
||||
_min_max_over:
|
||||
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) {
|
||||
pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
|
||||
pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
|
||||
pBuf->nullTupleSaved = true;
|
||||
}
|
||||
return numOfElems;
|
||||
|
@ -2758,7 +2765,7 @@ static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowInde
|
|||
}
|
||||
|
||||
if (!pInfo->hasResult) {
|
||||
pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock);
|
||||
pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL);
|
||||
} else {
|
||||
updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
|
||||
}
|
||||
|
@ -3426,7 +3433,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
|
||||
pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
|
||||
pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
|
||||
pRes->nullTupleSaved = true;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -3454,7 +3461,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
|
||||
pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
|
||||
pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
|
||||
pRes->nullTupleSaved = true;
|
||||
}
|
||||
|
||||
|
@ -3506,7 +3513,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
|||
|
||||
// save the data of this tuple
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock);
|
||||
pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL);
|
||||
}
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
|
||||
|
@ -3578,7 +3585,8 @@ void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsid
|
|||
return buf;
|
||||
}
|
||||
|
||||
static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) {
|
||||
static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length,
|
||||
const STupleKey* pKey) {
|
||||
STuplePos p = {0};
|
||||
if (pHandle->pBuf != NULL) {
|
||||
SFilePage* pPage = NULL;
|
||||
|
@ -3604,12 +3612,16 @@ static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf
|
|||
releaseBufPage(pHandle->pBuf, pPage);
|
||||
} else {
|
||||
// other tuple save policy
|
||||
if (streamStateFuncPut(pHandle->pState, pKey, pBuf, length) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
p.streamTupleKey = *pKey;
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) {
|
||||
STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, const STupleKey* pKey) {
|
||||
if (pCtx->subsidiaries.rowLen == 0) {
|
||||
int32_t rowLen = 0;
|
||||
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
|
||||
|
@ -3622,7 +3634,7 @@ STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBloc
|
|||
}
|
||||
|
||||
char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
|
||||
return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen);
|
||||
return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pKey);
|
||||
}
|
||||
|
||||
static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
|
||||
|
@ -3632,6 +3644,7 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf
|
|||
setBufPageDirty(pPage, true);
|
||||
releaseBufPage(pHandle->pBuf, pPage);
|
||||
} else {
|
||||
streamStateFuncPut(pHandle->pState, &pPos->streamTupleKey, pBuf, length);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -3650,7 +3663,10 @@ static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPo
|
|||
releaseBufPage(pHandle->pBuf, pPage);
|
||||
return p;
|
||||
} else {
|
||||
return NULL;
|
||||
void* value = NULL;
|
||||
int32_t vLen;
|
||||
streamStateFuncGet(pHandle->pState, &pPos->streamTupleKey, &value, &vLen);
|
||||
return (char*)value;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4981,7 +4997,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
|
|||
if (pInfo->numSampled < pInfo->samples) {
|
||||
sampleAssignResult(pInfo, data, pInfo->numSampled);
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock);
|
||||
pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
|
||||
}
|
||||
pInfo->numSampled++;
|
||||
} else {
|
||||
|
@ -5012,7 +5028,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
|
||||
pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
|
||||
pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
|
||||
pInfo->nullTupleSaved = true;
|
||||
}
|
||||
|
||||
|
@ -5398,8 +5414,8 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
int32_t i = pInput->startRowIndex;
|
||||
if (pCtx->start.key != INT64_MIN) {
|
||||
ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) ||
|
||||
(pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC));
|
||||
//ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) ||
|
||||
// (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC));
|
||||
|
||||
ASSERT(last->key == INT64_MIN);
|
||||
for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||
|
@ -5447,6 +5463,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5462,6 +5482,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5476,6 +5500,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5490,6 +5518,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5504,6 +5536,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5518,6 +5554,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5532,6 +5572,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5546,6 +5590,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5560,6 +5608,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
@ -5574,6 +5626,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
|||
numOfElems++;
|
||||
|
||||
INIT_INTP_POINT(st, tsList[i], val[i]);
|
||||
if (pInfo->p.key == st.key) {
|
||||
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
|
||||
}
|
||||
|
||||
pInfo->dOutput += twa_get_area(pInfo->p, st);
|
||||
pInfo->p = st;
|
||||
}
|
||||
|
|
|
@ -323,10 +323,6 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr,
|
|||
while ((rt = stmStNextWith(st, NULL)) != NULL) {
|
||||
FstSlice* s = &rt->data;
|
||||
char* ch = (char*)fstSliceData(s, NULL);
|
||||
// if (0 != strncmp(ch, tem->colName, tem->nColName)) {
|
||||
// swsResultDestroy(rt);
|
||||
// break;
|
||||
//}
|
||||
|
||||
TExeCond cond = cmpFn(ch, p, tem->colType);
|
||||
if (MATCH == cond) {
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
typedef struct STlv {
|
||||
int16_t type;
|
||||
int16_t len;
|
||||
int32_t len;
|
||||
char value[0];
|
||||
} STlv;
|
||||
|
||||
|
@ -70,7 +70,7 @@ static void endTlvEncode(STlvEncoder* pEncoder, char** pMsg, int32_t* pLen) {
|
|||
// nodesWarn("encode tlv count = %d, tl size = %d", pEncoder->tlvCount, sizeof(STlv) * pEncoder->tlvCount);
|
||||
}
|
||||
|
||||
static int32_t tlvEncodeImpl(STlvEncoder* pEncoder, int16_t type, const void* pValue, int16_t len) {
|
||||
static int32_t tlvEncodeImpl(STlvEncoder* pEncoder, int16_t type, const void* pValue, int32_t len) {
|
||||
int32_t tlvLen = sizeof(STlv) + len;
|
||||
if (pEncoder->offset + tlvLen > pEncoder->allocSize) {
|
||||
void* pNewBuf = taosMemoryRealloc(pEncoder->pBuf, pEncoder->allocSize * 2);
|
||||
|
@ -130,6 +130,9 @@ static int32_t tlvEncodeBool(STlvEncoder* pEncoder, int16_t type, bool value) {
|
|||
}
|
||||
|
||||
static int32_t tlvEncodeCStr(STlvEncoder* pEncoder, int16_t type, const char* pValue) {
|
||||
if (NULL == pValue) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return tlvEncodeImpl(pEncoder, type, pValue, strlen(pValue));
|
||||
}
|
||||
|
||||
|
@ -187,7 +190,7 @@ static int32_t tlvGetNextTlv(STlvDecoder* pDecoder, STlv** pTlv) {
|
|||
|
||||
static bool tlvDecodeEnd(STlvDecoder* pDecoder) { return pDecoder->offset == pDecoder->bufSize; }
|
||||
|
||||
static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int16_t len) {
|
||||
static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int32_t len) {
|
||||
if (pTlv->len != len) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
@ -237,6 +240,11 @@ static int32_t tlvDecodeCStr(STlv* pTlv, char* pValue) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t tlvDecodeCStrP(STlv* pTlv, char** pValue) {
|
||||
*pValue = strndup(pTlv->value, pTlv->len);
|
||||
return NULL == *pValue ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t tlvDecodeDynBinary(STlv* pTlv, void** pValue) {
|
||||
*pValue = taosMemoryMalloc(pTlv->len);
|
||||
if (NULL == *pValue) {
|
||||
|
@ -246,6 +254,11 @@ static int32_t tlvDecodeDynBinary(STlv* pTlv, void** pValue) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t tlvDecodeBinary(STlv* pTlv, void* pValue) {
|
||||
memcpy(pValue, pTlv->value, pTlv->len);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t tlvDecodeObjFromTlv(STlv* pTlv, FToObject func, void* pObj) {
|
||||
STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
|
||||
return func(&decoder, pObj);
|
||||
|
@ -367,6 +380,10 @@ enum {
|
|||
COLUMN_CODE_TABLE_TYPE,
|
||||
COLUMN_CODE_COLUMN_ID,
|
||||
COLUMN_CODE_COLUMN_TYPE,
|
||||
COLUMN_CODE_DB_NAME,
|
||||
COLUMN_CODE_TABLE_NAME,
|
||||
COLUMN_CODE_TABLE_ALIAS,
|
||||
COLUMN_CODE_COL_NAME,
|
||||
COLUMN_CODE_DATABLOCK_ID,
|
||||
COLUMN_CODE_SLOT_ID
|
||||
};
|
||||
|
@ -387,6 +404,18 @@ static int32_t columnNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, COLUMN_CODE_COLUMN_TYPE, pNode->colType);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeCStr(pEncoder, COLUMN_CODE_DB_NAME, pNode->dbName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeCStr(pEncoder, COLUMN_CODE_TABLE_NAME, pNode->tableName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeCStr(pEncoder, COLUMN_CODE_TABLE_ALIAS, pNode->tableAlias);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeCStr(pEncoder, COLUMN_CODE_COL_NAME, pNode->colName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI16(pEncoder, COLUMN_CODE_DATABLOCK_ID, pNode->dataBlockId);
|
||||
}
|
||||
|
@ -419,6 +448,18 @@ static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case COLUMN_CODE_COLUMN_TYPE:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->colType, sizeof(pNode->colType));
|
||||
break;
|
||||
case COLUMN_CODE_DB_NAME:
|
||||
code = tlvDecodeCStr(pTlv, pNode->dbName);
|
||||
break;
|
||||
case COLUMN_CODE_TABLE_NAME:
|
||||
code = tlvDecodeCStr(pTlv, pNode->tableName);
|
||||
break;
|
||||
case COLUMN_CODE_TABLE_ALIAS:
|
||||
code = tlvDecodeCStr(pTlv, pNode->tableAlias);
|
||||
break;
|
||||
case COLUMN_CODE_COL_NAME:
|
||||
code = tlvDecodeCStr(pTlv, pNode->colName);
|
||||
break;
|
||||
case COLUMN_CODE_DATABLOCK_ID:
|
||||
code = tlvDecodeI16(pTlv, &pNode->dataBlockId);
|
||||
break;
|
||||
|
@ -433,7 +474,15 @@ static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
enum { VALUE_CODE_EXPR_BASE = 1, VALUE_CODE_IS_NULL, VALUE_CODE_DATUM };
|
||||
enum {
|
||||
VALUE_CODE_EXPR_BASE = 1,
|
||||
VALUE_CODE_LITERAL,
|
||||
VALUE_CODE_IS_DURATION,
|
||||
VALUE_CODE_TRANSLATE,
|
||||
VALUE_CODE_NOT_RESERVED,
|
||||
VALUE_CODE_IS_NULL,
|
||||
VALUE_CODE_DATUM
|
||||
};
|
||||
|
||||
static int32_t datumToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SValueNode* pNode = (const SValueNode*)pObj;
|
||||
|
@ -485,9 +534,21 @@ static int32_t valueNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
|
||||
int32_t code = tlvEncodeObj(pEncoder, VALUE_CODE_EXPR_BASE, exprNodeToMsg, pNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull);
|
||||
code = tlvEncodeCStr(pEncoder, VALUE_CODE_LITERAL, pNode->literal);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_DURATION, pNode->isDuration);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeBool(pEncoder, VALUE_CODE_TRANSLATE, pNode->translate);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeBool(pEncoder, VALUE_CODE_NOT_RESERVED, pNode->notReserved);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && !pNode->isNull) {
|
||||
code = datumToMsg(pNode, pEncoder);
|
||||
}
|
||||
|
||||
|
@ -551,12 +612,18 @@ static int32_t msgToDatum(STlv* pTlv, void* pObj) {
|
|||
break;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_VARCHAR:
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p);
|
||||
case TSDB_DATA_TYPE_VARBINARY: {
|
||||
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
|
||||
if (NULL == pNode->datum.p) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
break;
|
||||
}
|
||||
code = tlvDecodeBinary(pTlv, pNode->datum.p);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
varDataSetLen(pNode->datum.p, pNode->node.resType.bytes - VARSTR_HEADER_SIZE);
|
||||
varDataSetLen(pNode->datum.p, pTlv->len - VARSTR_HEADER_SIZE);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p);
|
||||
break;
|
||||
|
@ -580,6 +647,18 @@ static int32_t msgToValueNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case VALUE_CODE_EXPR_BASE:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
|
||||
break;
|
||||
case VALUE_CODE_LITERAL:
|
||||
code = tlvDecodeCStrP(pTlv, &pNode->literal);
|
||||
break;
|
||||
case VALUE_CODE_IS_DURATION:
|
||||
code = tlvDecodeBool(pTlv, &pNode->isDuration);
|
||||
break;
|
||||
case VALUE_CODE_TRANSLATE:
|
||||
code = tlvDecodeBool(pTlv, &pNode->translate);
|
||||
break;
|
||||
case VALUE_CODE_NOT_RESERVED:
|
||||
code = tlvDecodeBool(pTlv, &pNode->notReserved);
|
||||
break;
|
||||
case VALUE_CODE_IS_NULL:
|
||||
code = tlvDecodeBool(pTlv, &pNode->isNull);
|
||||
break;
|
||||
|
@ -682,6 +761,7 @@ static int32_t msgToLogicConditionNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
|
||||
enum {
|
||||
FUNCTION_CODE_EXPR_BASE = 1,
|
||||
FUNCTION_CODE_FUNCTION_NAME,
|
||||
FUNCTION_CODE_FUNCTION_ID,
|
||||
FUNCTION_CODE_FUNCTION_TYPE,
|
||||
FUNCTION_CODE_PARAMETERS,
|
||||
|
@ -692,6 +772,9 @@ static int32_t functionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
const SFunctionNode* pNode = (const SFunctionNode*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeObj(pEncoder, FUNCTION_CODE_EXPR_BASE, exprNodeToMsg, pNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeCStr(pEncoder, FUNCTION_CODE_FUNCTION_NAME, pNode->functionName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI32(pEncoder, FUNCTION_CODE_FUNCTION_ID, pNode->funcId);
|
||||
}
|
||||
|
@ -718,6 +801,9 @@ static int32_t msgToFunctionNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case FUNCTION_CODE_EXPR_BASE:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
|
||||
break;
|
||||
case FUNCTION_CODE_FUNCTION_NAME:
|
||||
code = tlvDecodeCStr(pTlv, pNode->functionName);
|
||||
break;
|
||||
case FUNCTION_CODE_FUNCTION_ID:
|
||||
code = tlvDecodeI32(pTlv, &pNode->funcId);
|
||||
break;
|
||||
|
@ -1082,6 +1168,170 @@ static int32_t msgToSlotDescNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
enum { EP_CODE_FQDN = 1, EP_CODE_port };
|
||||
|
||||
static int32_t epToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SEp* pNode = (const SEp*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeCStr(pEncoder, EP_CODE_FQDN, pNode->fqdn);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeU16(pEncoder, EP_CODE_port, pNode->port);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToEp(STlvDecoder* pDecoder, void* pObj) {
|
||||
SEp* pNode = (SEp*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case EP_CODE_FQDN:
|
||||
code = tlvDecodeCStr(pTlv, pNode->fqdn);
|
||||
break;
|
||||
case EP_CODE_port:
|
||||
code = tlvDecodeU16(pTlv, &pNode->port);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum { EP_SET_CODE_IN_USE = 1, EP_SET_CODE_NUM_OF_EPS, EP_SET_CODE_EPS };
|
||||
|
||||
static int32_t epSetToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SEpSet* pNode = (const SEpSet*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeI8(pEncoder, EP_SET_CODE_IN_USE, pNode->inUse);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI8(pEncoder, EP_SET_CODE_NUM_OF_EPS, pNode->numOfEps);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObjArray(pEncoder, EP_SET_CODE_EPS, epToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToEpSet(STlvDecoder* pDecoder, void* pObj) {
|
||||
SEpSet* pNode = (SEpSet*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case EP_SET_CODE_IN_USE:
|
||||
code = tlvDecodeI8(pTlv, &pNode->inUse);
|
||||
break;
|
||||
case EP_SET_CODE_NUM_OF_EPS:
|
||||
code = tlvDecodeI8(pTlv, &pNode->numOfEps);
|
||||
break;
|
||||
case EP_SET_CODE_EPS:
|
||||
code = tlvDecodeObjArrayFromTlv(pTlv, msgToEp, pNode->eps, sizeof(SEp));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum { QUERY_NODE_ADDR_CODE_NODE_ID = 1, QUERY_NODE_ADDR_CODE_EP_SET };
|
||||
|
||||
static int32_t queryNodeAddrToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeI32(pEncoder, QUERY_NODE_ADDR_CODE_NODE_ID, pNode->nodeId);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, QUERY_NODE_ADDR_CODE_EP_SET, epSetToMsg, &pNode->epSet);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToQueryNodeAddr(STlvDecoder* pDecoder, void* pObj) {
|
||||
SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case QUERY_NODE_ADDR_CODE_NODE_ID:
|
||||
code = tlvDecodeI32(pTlv, &pNode->nodeId);
|
||||
break;
|
||||
case QUERY_NODE_ADDR_CODE_EP_SET:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum {
|
||||
DOWNSTREAM_SOURCE_CODE_ADDR = 1,
|
||||
DOWNSTREAM_SOURCE_CODE_TASK_ID,
|
||||
DOWNSTREAM_SOURCE_CODE_SCHED_ID,
|
||||
DOWNSTREAM_SOURCE_CODE_EXEC_ID,
|
||||
DOWNSTREAM_SOURCE_CODE_FETCH_MSG_TYPE
|
||||
};
|
||||
|
||||
static int32_t downstreamSourceNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SDownstreamSourceNode* pNode = (const SDownstreamSourceNode*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeObj(pEncoder, DOWNSTREAM_SOURCE_CODE_ADDR, queryNodeAddrToMsg, &pNode->addr);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeU64(pEncoder, DOWNSTREAM_SOURCE_CODE_TASK_ID, pNode->taskId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeU64(pEncoder, DOWNSTREAM_SOURCE_CODE_SCHED_ID, pNode->schedId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI32(pEncoder, DOWNSTREAM_SOURCE_CODE_EXEC_ID, pNode->execId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI32(pEncoder, DOWNSTREAM_SOURCE_CODE_FETCH_MSG_TYPE, pNode->fetchMsgType);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToDownstreamSourceNode(STlvDecoder* pDecoder, void* pObj) {
|
||||
SDownstreamSourceNode* pNode = (SDownstreamSourceNode*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case DOWNSTREAM_SOURCE_CODE_ADDR:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToQueryNodeAddr, &pNode->addr);
|
||||
break;
|
||||
case DOWNSTREAM_SOURCE_CODE_TASK_ID:
|
||||
code = tlvDecodeU64(pTlv, &pNode->taskId);
|
||||
break;
|
||||
case DOWNSTREAM_SOURCE_CODE_SCHED_ID:
|
||||
code = tlvDecodeU64(pTlv, &pNode->schedId);
|
||||
break;
|
||||
case DOWNSTREAM_SOURCE_CODE_EXEC_ID:
|
||||
code = tlvDecodeI32(pTlv, &pNode->execId);
|
||||
break;
|
||||
case DOWNSTREAM_SOURCE_CODE_FETCH_MSG_TYPE:
|
||||
code = tlvDecodeI32(pTlv, &pNode->fetchMsgType);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum {
|
||||
PHY_NODE_CODE_OUTPUT_DESC = 1,
|
||||
PHY_NODE_CODE_CONDITIONS,
|
||||
|
@ -1401,80 +1651,6 @@ static int32_t msgToPhysiTableScanNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
enum { EP_CODE_FQDN = 1, EP_CODE_port };
|
||||
|
||||
static int32_t epToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SEp* pNode = (const SEp*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeCStr(pEncoder, EP_CODE_FQDN, pNode->fqdn);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeU16(pEncoder, EP_CODE_port, pNode->port);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToEp(STlvDecoder* pDecoder, void* pObj) {
|
||||
SEp* pNode = (SEp*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case EP_CODE_FQDN:
|
||||
code = tlvDecodeCStr(pTlv, pNode->fqdn);
|
||||
break;
|
||||
case EP_CODE_port:
|
||||
code = tlvDecodeU16(pTlv, &pNode->port);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum { EP_SET_CODE_IN_USE = 1, EP_SET_CODE_NUM_OF_EPS, EP_SET_CODE_EPS };
|
||||
|
||||
static int32_t epSetToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SEpSet* pNode = (const SEpSet*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeI8(pEncoder, EP_SET_CODE_IN_USE, pNode->inUse);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI8(pEncoder, EP_SET_CODE_NUM_OF_EPS, pNode->numOfEps);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObjArray(pEncoder, EP_SET_CODE_EPS, epToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToEpSet(STlvDecoder* pDecoder, void* pObj) {
|
||||
SEpSet* pNode = (SEpSet*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case EP_SET_CODE_IN_USE:
|
||||
code = tlvDecodeI8(pTlv, &pNode->inUse);
|
||||
break;
|
||||
case EP_SET_CODE_NUM_OF_EPS:
|
||||
code = tlvDecodeI8(pTlv, &pNode->numOfEps);
|
||||
break;
|
||||
case EP_SET_CODE_EPS:
|
||||
code = tlvDecodeObjArrayFromTlv(pTlv, msgToEp, pNode->eps, sizeof(SEp));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum {
|
||||
PHY_SYSTABLE_SCAN_CODE_SCAN = 1,
|
||||
PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET,
|
||||
|
@ -2594,38 +2770,6 @@ static int32_t msgToSubplanId(STlvDecoder* pDecoder, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
enum { QUERY_NODE_ADDR_CODE_NODE_ID = 1, QUERY_NODE_ADDR_CODE_EP_SET };
|
||||
|
||||
static int32_t queryNodeAddrToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeI32(pEncoder, QUERY_NODE_ADDR_CODE_NODE_ID, pNode->nodeId);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, QUERY_NODE_ADDR_CODE_EP_SET, epSetToMsg, &pNode->epSet);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToQueryNodeAddr(STlvDecoder* pDecoder, void* pObj) {
|
||||
SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case QUERY_NODE_ADDR_CODE_NODE_ID:
|
||||
code = tlvDecodeI32(pTlv, &pNode->nodeId);
|
||||
break;
|
||||
case QUERY_NODE_ADDR_CODE_EP_SET:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum {
|
||||
SUBPLAN_CODE_SUBPLAN_ID = 1,
|
||||
SUBPLAN_CODE_SUBPLAN_TYPE,
|
||||
|
@ -2802,6 +2946,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
case QUERY_NODE_SLOT_DESC:
|
||||
code = slotDescNodeToMsg(pObj, pEncoder);
|
||||
break;
|
||||
case QUERY_NODE_DOWNSTREAM_SOURCE:
|
||||
return downstreamSourceNodeToMsg(pObj, pEncoder);
|
||||
case QUERY_NODE_LEFT_VALUE:
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
|
||||
|
@ -2929,6 +3075,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case QUERY_NODE_SLOT_DESC:
|
||||
code = msgToSlotDescNode(pDecoder, pObj);
|
||||
break;
|
||||
case QUERY_NODE_DOWNSTREAM_SOURCE:
|
||||
return msgToDownstreamSourceNode(pDecoder, pObj);
|
||||
case QUERY_NODE_LEFT_VALUE:
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
|
||||
|
|
|
@ -1303,7 +1303,7 @@ SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pD
|
|||
EOperatorType tableCondType) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (needDbShowStmt(type) && NULL == pDbName) {
|
||||
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
|
||||
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "database not specified");
|
||||
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -1423,9 +1423,7 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa
|
|||
}
|
||||
|
||||
static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
|
||||
if (!pCxt->pComCxt->async) {
|
||||
taosMemoryFreeClear(pCxt->pTableMeta);
|
||||
}
|
||||
destroyBoundColumnInfo(&pCxt->tags);
|
||||
tdDestroySVCreateTbReq(&pCxt->createTblReq);
|
||||
}
|
||||
|
|
|
@ -1283,6 +1283,36 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount)
|
|||
return code;
|
||||
}
|
||||
|
||||
static bool isCountTbname(SFunctionNode* pFunc) {
|
||||
if (FUNCTION_TYPE_COUNT != pFunc->funcType || 1 != LIST_LENGTH(pFunc->pParameterList)) {
|
||||
return false;
|
||||
}
|
||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
return (QUERY_NODE_FUNCTION == nodeType(pPara) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPara)->funcType);
|
||||
}
|
||||
|
||||
// count(tbname) is rewritten as count(ts) for scannning optimization
|
||||
static int32_t rewriteCountTbname(STranslateContext* pCxt, SFunctionNode* pCount) {
|
||||
SFunctionNode* pTbname = (SFunctionNode*)nodesListGetNode(pCount->pParameterList, 0);
|
||||
const char* pTableAlias = NULL;
|
||||
if (LIST_LENGTH(pTbname->pParameterList) > 0) {
|
||||
pTableAlias = ((SValueNode*)nodesListGetNode(pTbname->pParameterList, 0))->literal;
|
||||
}
|
||||
STableNode* pTable = NULL;
|
||||
int32_t code = findTable(pCxt, pTableAlias, &pTable);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
|
||||
if (NULL == pCol) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol);
|
||||
NODES_DESTORY_LIST(pCount->pParameterList);
|
||||
code = nodesListMakeAppend(&pCount->pParameterList, (SNode*)pCol);
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
|
||||
bool hasInvalidFunc = false;
|
||||
nodesWalkExprs(pParameterList, haveVectorFunction, &hasInvalidFunc);
|
||||
|
@ -1318,6 +1348,9 @@ static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
|
|||
if (isCountStar(pFunc)) {
|
||||
return rewriteCountStar(pCxt, pFunc);
|
||||
}
|
||||
if (isCountTbname(pFunc)) {
|
||||
return rewriteCountTbname(pCxt, pFunc);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -5927,12 +5960,6 @@ typedef struct SVgroupCreateTableBatch {
|
|||
char dbName[TSDB_DB_NAME_LEN];
|
||||
} SVgroupCreateTableBatch;
|
||||
|
||||
static void destroyCreateTbReq(SVCreateTbReq* pReq) {
|
||||
taosMemoryFreeClear(pReq->name);
|
||||
taosMemoryFreeClear(pReq->comment);
|
||||
taosMemoryFreeClear(pReq->ntb.schemaRow.pSchema);
|
||||
}
|
||||
|
||||
static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pVgroupInfo,
|
||||
SVgroupCreateTableBatch* pBatch) {
|
||||
char dbFName[TSDB_DB_FNAME_LEN] = {0};
|
||||
|
@ -5947,7 +5974,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
|
|||
if (pStmt->pOptions->commentNull == false) {
|
||||
req.comment = strdup(pStmt->pOptions->comment);
|
||||
if (NULL == req.comment) {
|
||||
destroyCreateTbReq(&req);
|
||||
tdDestroySVCreateTbReq(&req);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
req.commentLen = strlen(pStmt->pOptions->comment);
|
||||
|
@ -5958,7 +5985,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
|
|||
req.ntb.schemaRow.version = 1;
|
||||
req.ntb.schemaRow.pSchema = taosMemoryCalloc(req.ntb.schemaRow.nCols, sizeof(SSchema));
|
||||
if (NULL == req.name || NULL == req.ntb.schemaRow.pSchema) {
|
||||
destroyCreateTbReq(&req);
|
||||
tdDestroySVCreateTbReq(&req);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pStmt->ignoreExists) {
|
||||
|
@ -5974,7 +6001,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
|
|||
strcpy(pBatch->dbName, pStmt->dbName);
|
||||
pBatch->req.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
|
||||
if (NULL == pBatch->req.pArray) {
|
||||
destroyCreateTbReq(&req);
|
||||
tdDestroySVCreateTbReq(&req);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
taosArrayPush(pBatch->req.pArray, &req);
|
||||
|
@ -6019,16 +6046,7 @@ static void destroyCreateTbReqBatch(void* data) {
|
|||
size_t size = taosArrayGetSize(pTbBatch->req.pArray);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
SVCreateTbReq* pTableReq = taosArrayGet(pTbBatch->req.pArray, i);
|
||||
taosMemoryFreeClear(pTableReq->name);
|
||||
taosMemoryFreeClear(pTableReq->comment);
|
||||
|
||||
if (pTableReq->type == TSDB_NORMAL_TABLE) {
|
||||
taosMemoryFreeClear(pTableReq->ntb.schemaRow.pSchema);
|
||||
} else if (pTableReq->type == TSDB_CHILD_TABLE) {
|
||||
taosMemoryFreeClear(pTableReq->ctb.pTag);
|
||||
taosMemoryFreeClear(pTableReq->ctb.name);
|
||||
taosArrayDestroy(pTableReq->ctb.tagName);
|
||||
}
|
||||
tdDestroySVCreateTbReq(pTableReq);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTbBatch->req.pArray);
|
||||
|
@ -6389,6 +6407,8 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, pTag, pSuperTableMeta->uid,
|
||||
pStmt->useTableName, &info, tagName, pSuperTableMeta->tableInfo.numOfTags);
|
||||
} else {
|
||||
taosMemoryFree(pTag);
|
||||
}
|
||||
|
||||
taosArrayDestroy(tagName);
|
||||
|
|
|
@ -1124,7 +1124,7 @@ int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* p
|
|||
int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo);
|
||||
SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex);
|
||||
if (TSDB_CODE_SUCCESS == pRes->code) {
|
||||
*pMeta = pRes->pRes;
|
||||
*pMeta = tableMetaDup(pRes->pRes);
|
||||
if (NULL == *pMeta) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
|
|
@ -123,6 +123,21 @@ int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen) {
|
|||
|
||||
int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan) { return nodesStringToNode(pStr, (SNode**)pSubplan); }
|
||||
|
||||
int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen) {
|
||||
if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType && NULL == pSubplan->pNode) {
|
||||
SDataInserterNode* insert = (SDataInserterNode*)pSubplan->pDataSink;
|
||||
*pLen = insert->size;
|
||||
*pStr = insert->pData;
|
||||
insert->pData = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return nodesNodeToMsg((const SNode*)pSubplan, pStr, pLen);
|
||||
}
|
||||
|
||||
int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan) {
|
||||
return nodesMsgToNode(pStr, len, (SNode**)pSubplan);
|
||||
}
|
||||
|
||||
char* qQueryPlanToString(const SQueryPlan* pPlan) {
|
||||
char* pStr = NULL;
|
||||
int32_t len = 0;
|
||||
|
|
|
@ -35,6 +35,8 @@ TEST_F(PlanOptimizeTest, scanPath) {
|
|||
|
||||
run("SELECT LAST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) "
|
||||
"FILL(LINEAR)");
|
||||
|
||||
run("SELECT COUNT(TBNAME) FROM t1");
|
||||
}
|
||||
|
||||
TEST_F(PlanOptimizeTest, pushDownCondition) {
|
||||
|
|
|
@ -480,9 +480,14 @@ class PlannerTestBaseImpl {
|
|||
DO_WITH_THROW(nodesNodeToMsg, pNode, &pNewStr, &newlen)
|
||||
if (newlen != len || 0 != memcmp(pStr, pNewStr, len)) {
|
||||
cout << "nodesNodeToMsg error!!!!!!!!!!!!!! len = " << len << ", newlen = " << newlen << endl;
|
||||
taosMemoryFreeClear(pNewStr);
|
||||
DO_WITH_THROW(nodesNodeToString, pRoot, false, &pNewStr, &newlen)
|
||||
cout << "orac node: " << pNewStr << endl;
|
||||
taosMemoryFreeClear(pNewStr);
|
||||
DO_WITH_THROW(nodesNodeToString, pNode, false, &pNewStr, &newlen)
|
||||
cout << "nodesNodeToString " << pNewStr << endl;
|
||||
cout << "new node: " << pNewStr << endl;
|
||||
}
|
||||
nodesDestroyNode(pNode);
|
||||
taosMemoryFreeClear(pNewStr);
|
||||
|
||||
string str(pStr, len);
|
||||
|
|
|
@ -134,8 +134,7 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code)
|
|||
schedMsg.thandle = execParam;
|
||||
schedMsg.msg = code;
|
||||
|
||||
taosScheduleTask(&pTaskQueue, &schedMsg);
|
||||
return 0;
|
||||
return taosScheduleTask(&pTaskQueue, &schedMsg);
|
||||
}
|
||||
|
||||
void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
|
||||
|
@ -472,5 +471,3 @@ int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) {
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -559,7 +559,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) {
|
|||
|
||||
// QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
|
||||
|
||||
code = qStringToSubplan(qwMsg->msg, &plan);
|
||||
code = qMsgToSubplan(qwMsg->msg, qwMsg->msgLen, &plan);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code));
|
||||
|
@ -968,7 +968,7 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) {
|
|||
DataSinkHandle sinkHandle = NULL;
|
||||
SQWTaskCtx ctx = {0};
|
||||
|
||||
code = qStringToSubplan(qwMsg->msg, &plan);
|
||||
code = qMsgToSubplan(qwMsg->msg, qwMsg->msgLen, &plan);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code));
|
||||
|
|
|
@ -430,12 +430,14 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
|
|||
|
||||
code = schDoTaskRedirect(pJob, pTask, pData, rspCode);
|
||||
taosMemoryFree(pData->pData);
|
||||
taosMemoryFree(pData->pEpSet);
|
||||
|
||||
SCH_RET(code);
|
||||
|
||||
_return:
|
||||
|
||||
taosMemoryFree(pData->pData);
|
||||
taosMemoryFree(pData->pEpSet);
|
||||
|
||||
SCH_RET(schProcessOnTaskFailure(pJob, pTask, code));
|
||||
}
|
||||
|
@ -860,7 +862,7 @@ int32_t schLaunchTaskImpl(void *param) {
|
|||
SSubplan *plan = pTask->plan;
|
||||
|
||||
if (NULL == pTask->msg) { // TODO add more detailed reason for failure
|
||||
code = qSubPlanToString(plan, &pTask->msg, &pTask->msgLen);
|
||||
code = qSubPlanToMsg(plan, &pTask->msg, &pTask->msgLen);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg,
|
||||
pTask->msgLen);
|
||||
|
|
|
@ -35,6 +35,10 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (streamStateBegin(pState) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
@ -44,8 +48,9 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
|
|||
return pState;
|
||||
|
||||
_err:
|
||||
if (pState->pStateDb) tdbTbClose(pState->pStateDb);
|
||||
if (pState->db) tdbClose(pState->db);
|
||||
tdbTbClose(pState->pStateDb);
|
||||
tdbTbClose(pState->pFuncStateDb);
|
||||
tdbClose(pState->db);
|
||||
taosMemoryFree(pState);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -53,6 +58,7 @@ _err:
|
|||
void streamStateClose(SStreamState* pState) {
|
||||
tdbCommit(pState->db, &pState->txn);
|
||||
tdbTbClose(pState->pStateDb);
|
||||
tdbTbClose(pState->pFuncStateDb);
|
||||
tdbClose(pState->db);
|
||||
|
||||
taosMemoryFree(pState);
|
||||
|
@ -101,6 +107,17 @@ int32_t streamStateAbort(SStreamState* pState) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) {
|
||||
return tdbTbUpsert(pState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->txn);
|
||||
}
|
||||
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) {
|
||||
return tdbTbGet(pState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
|
||||
}
|
||||
|
||||
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) {
|
||||
return tdbTbDelete(pState->pFuncStateDb, key, sizeof(STupleKey), &pState->txn);
|
||||
}
|
||||
|
||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
||||
return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
|
||||
}
|
||||
|
|
|
@ -374,10 +374,12 @@ void cliHandleResp(SCliConn* conn) {
|
|||
|
||||
if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) {
|
||||
tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn);
|
||||
transFreeMsg(transMsg.pCont);
|
||||
return;
|
||||
}
|
||||
if (CONN_RELEASE_BY_SERVER(conn) && transMsg.info.ahandle == NULL) {
|
||||
tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn);
|
||||
transFreeMsg(transMsg.pCont);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -393,7 +395,7 @@ void cliHandleResp(SCliConn* conn) {
|
|||
}
|
||||
|
||||
if (CONN_NO_PERSIST_BY_APP(conn)) {
|
||||
addConnToPool(pThrd->pool, conn);
|
||||
return addConnToPool(pThrd->pool, conn);
|
||||
}
|
||||
|
||||
uv_read_start((uv_stream_t*)conn->stream, cliAllocRecvBufferCb, cliRecvCb);
|
||||
|
|
|
@ -288,6 +288,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer not ready")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_MUST_BE_DELETED, "Topic must be dropped first")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being used by some consumer")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_IN_REBALANCE, "Topic being rebalanced")
|
||||
|
||||
// mnode-stream
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists")
|
||||
|
@ -579,6 +580,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_NUM, "Invalid function par
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_TYPE, "Invalid function para type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid function para value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION, "Not buildin function")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_DUP_TIMESTAMP, "Duplicate timestamps not allowed in function")
|
||||
|
||||
//udf
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping")
|
||||
|
|
|
@ -149,18 +149,18 @@ void *taosProcessSchedQueue(void *scheduler) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
|
||||
int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
|
||||
SSchedQueue *pSched = (SSchedQueue *)queueScheduler;
|
||||
int32_t ret = 0;
|
||||
|
||||
if (pSched == NULL) {
|
||||
uError("sched is not ready, msg:%p is dropped", pMsg);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (atomic_load_8(&pSched->stop)) {
|
||||
uError("sched is already stopped, msg:%p is dropped", pMsg);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = tsem_wait(&pSched->emptySem)) != 0) {
|
||||
|
@ -185,6 +185,7 @@ void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
|
|||
uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno));
|
||||
ASSERT(0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void taosCleanUpScheduler(void *param) {
|
||||
|
|
|
@ -22,9 +22,9 @@ from util.dnodes import *
|
|||
|
||||
class TDTestCase:
|
||||
def caseDescription(self):
|
||||
'''
|
||||
"""
|
||||
[TD-13823] taosBenchmark test cases
|
||||
'''
|
||||
"""
|
||||
return
|
||||
|
||||
def init(self, conn, logSql):
|
||||
|
@ -34,19 +34,19 @@ class TDTestCase:
|
|||
def getPath(self, tool="taosBenchmark"):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
if "community" in selfPath:
|
||||
projPath = selfPath[: selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[: selfPath.find("tests")]
|
||||
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ((tool) in files):
|
||||
if (tool) in files:
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
if "packaging" not in rootRealPath:
|
||||
paths.append(os.path.join(root, tool))
|
||||
break
|
||||
if (len(paths) == 0):
|
||||
if len(paths) == 0:
|
||||
tdLog.exit("taosBenchmark not found!")
|
||||
return
|
||||
else:
|
||||
|
@ -77,14 +77,16 @@ class TDTestCase:
|
|||
tdSql.checkData(4, 3, "TAG")
|
||||
tdSql.checkData(5, 0, "location")
|
||||
tdSql.checkData(5, 1, "VARCHAR")
|
||||
tdSql.checkData(5, 2, 16)
|
||||
tdSql.checkData(5, 2, 24)
|
||||
tdSql.checkData(5, 3, "TAG")
|
||||
|
||||
tdSql.query("select count(*) from test.meters where groupid >= 0")
|
||||
tdSql.checkData(0, 0, 10000)
|
||||
|
||||
tdSql.query("select count(*) from test.meters where location = 'San Francisco' or location = 'Los Angles' or location = 'San Diego' or location = 'San Jose' or \
|
||||
location = 'Palo Alto' or location = 'Campbell' or location = 'Mountain View' or location = 'Sunnyvale' or location = 'Santa Clara' or location = 'Cupertino' ")
|
||||
tdSql.query(
|
||||
"select count(*) from test.meters where location = 'California.SanFrancisco' or location = 'California.LosAngles' or location = 'California.SanDiego' or location = 'California.SanJose' or \
|
||||
location = 'California.PaloAlto' or location = 'California.Campbell' or location = 'California.MountainView' or location = 'California.Sunnyvale' or location = 'California.SantaClara' or location = 'California.Cupertino' "
|
||||
)
|
||||
tdSql.checkData(0, 0, 10000)
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -38,18 +38,7 @@ class TDTestCase:
|
|||
elif i>=9:
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
|
||||
tdSql.query(f"select max(now()) from {dbname}.stb_1")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select last(ts) from {dbname}.stb_1")
|
||||
lastTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select max(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, lastTs)
|
||||
|
||||
tdSql.query(f"select last(ts) from {dbname}.stb")
|
||||
lastTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select max(ts) from {dbname}.stb")
|
||||
tdSql.checkData(0, 0, lastTs)
|
||||
tdSql.error(f"select max(now()) from {dbname}.stb_1")
|
||||
|
||||
tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
@ -78,13 +67,7 @@ class TDTestCase:
|
|||
elif i>=9:
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
|
||||
tdSql.query(f"select max(now()) from {dbname}.ntb")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select last(ts) from {dbname}.ntb")
|
||||
lastTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select max(ts) from {dbname}.ntb")
|
||||
tdSql.checkData(0, 0, lastTs)
|
||||
tdSql.error(f"select max(now()) from {dbname}.ntb")
|
||||
|
||||
tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
|
|
@ -181,7 +181,7 @@ class TDTestCase:
|
|||
# bug need fix
|
||||
tdSql.checkData(0,1,None)
|
||||
|
||||
tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1")
|
||||
tdSql.query(f"select c1 , twa(c1) from {dbname}.sub_stb_1 partition by c1 order by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(0,1,None)
|
||||
|
||||
|
|
|
@ -37,6 +37,8 @@ class TDTestCase:
|
|||
floatData.append(i + 0.1)
|
||||
|
||||
# max verifacation
|
||||
tdSql.error(f"select min(now()) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col7) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col8) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col9) from {dbname}.stb_1")
|
||||
|
@ -67,20 +69,9 @@ class TDTestCase:
|
|||
tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
||||
tdSql.query(f"select min(now()) from {dbname}.stb_1")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select first(ts) from {dbname}.stb_1")
|
||||
firstTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, firstTs)
|
||||
|
||||
tdSql.query(f"select first(ts) from {dbname}.stb_1")
|
||||
firstTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, firstTs)
|
||||
|
||||
|
||||
tdSql.error(f"select min(now()) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col7) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col8) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col9) from {dbname}.stb_1")
|
||||
|
@ -111,19 +102,8 @@ class TDTestCase:
|
|||
tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
||||
tdSql.query(f"select min(now()) from {dbname}.stb_1")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select first(ts) from {dbname}.stb_1")
|
||||
firstTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, firstTs)
|
||||
|
||||
tdSql.query(f"select first(ts) from {dbname}.stb_1")
|
||||
firstTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, firstTs)
|
||||
|
||||
tdSql.error(f"select min(now()) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.error(f"select min(col7) from {dbname}.ntb")
|
||||
tdSql.error(f"select min(col8) from {dbname}.ntb")
|
||||
tdSql.error(f"select min(col9) from {dbname}.ntb")
|
||||
|
@ -154,19 +134,6 @@ class TDTestCase:
|
|||
tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
||||
tdSql.query(f"select min(now()) from {dbname}.stb_1")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select first(ts) from {dbname}.stb_1")
|
||||
firstTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, firstTs)
|
||||
|
||||
tdSql.query(f"select first(ts) from {dbname}.stb_1")
|
||||
firstTs = tdSql.getData(0, 0)
|
||||
tdSql.query(f"select min(ts) from {dbname}.stb_1")
|
||||
tdSql.checkData(0, 0, firstTs)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -124,7 +124,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,4.500000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ")
|
||||
tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.ct1 ")
|
||||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.checkData(0,1,11111.000000000)
|
||||
tdSql.checkData(0,2,1)
|
||||
|
|
Loading…
Reference in New Issue