Merge branch '3.0' into feature/TD-14481-3.0

This commit is contained in:
Cary Xu 2022-06-08 15:33:20 +08:00
commit b5cf60170c
35 changed files with 1001 additions and 604 deletions

View File

@ -21,7 +21,7 @@ taosBenchmark 有两种安装方式:
### 配置和运行方式
taosBenchmark 支持两种配置方式:[命令行参数](#命令行参数详解) 和 [JSON 配置文件](#配置文件参数详解)。这两种方式是互斥的,在使用配置文件时只能使用一个命令行参数 `-f <json file>` 指定配置文件。在使用命令行参数运行 taosBenchmark 并控制其行为时则不能使用 `-f` 参数而要用其它参数来进行配置。除此之外taosBenchmark 还提供了一种特殊的运行方式,即无参数运行。
taosBenchmark 需要在操作系统的终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数详解) 和 [JSON 配置文件](#配置文件参数详解)。这两种方式是互斥的,在使用配置文件时只能使用一个命令行参数 `-f <json file>` 指定配置文件。在使用命令行参数运行 taosBenchmark 并控制其行为时则不能使用 `-f` 参数而要用其它参数来进行配置。除此之外taosBenchmark 还提供了一种特殊的运行方式,即无参数运行。
taosBenchmark 支持对 TDengine 做完备的性能测试,其所支持的 TDengine 功能分为三大类:写入、查询和订阅。这三种功能之间是互斥的,每次运行 taosBenchmark 只能选择其中之一。值得注意的是,所要测试的功能类型在使用命令行配置方式时是不可配置的,命令行配置方式只能测试写入性能。若要测试 TDengine 的查询和订阅性能,必须使用配置文件的方式,通过配置文件中的参数 `filetype` 指定所要测试的功能类型。

View File

@ -134,7 +134,7 @@ taos --dump-config
| 适用范围 | 仅服务端适用 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽、HTTP 请求量的监控记录,记录信息存储在`LOG`库中。 |
| 取值范围 | 0关闭监控服务 1激活监控服务。 |
| 缺省值 | 0 |
| 缺省值 | 1 |
### monitorInterval

View File

@ -8,31 +8,24 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
## 前置条件
要让 EMQX 能正常添加 TDengine 数据源,需要以下几方面的准备工作。
- TDengine 集群已经部署并正常运行
- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)
- 如果使用后文介绍的模拟写入程序,需要安装合适版本的 Node.js推荐安装 v12
- 如果使用后文介绍的模拟写入程序,需要安装合适版本的 Node.js推荐安装 v12
## 安装并启动 EMQX
用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:<https://www.emqx.io/zh/downloads>。安装后使用 `sudo emqx start``sudo systemctl start emqx` 启动 EMQX 服务。
## 在 TDengine 中为接收 MQTT 数据创建相应数据库和表结构
### 以 Docker 安装 TDengine 为例
## 创建数据库和表
```bash
docker exec -it tdengine bash
taos
```
### 创建数据库和表
在 TDengine 中为接收 MQTT 数据创建相应数据库和表结构。进入 TDengine CLI 复制并执行以下 SQL 语句:
```sql
create database test;
use test;
create table:
CREATE TABLE sensor_data (ts timestamp, temperature float, humidity float, volume float, PM10 float, pm25 float, SO2 float, NO2 float, CO float, sensor_id NCHAR(255), area TINYINT, coll_time timestamp);
CREATE DATABASE test;
USE test;
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
```
注:表结构以博客[数据传输、存储、展现EMQX + TDengine 搭建 MQTT 物联网数据可视化平台](https://www.taosdata.com/blog/2020/08/04/1722.html)为例。后续操作均以此博客场景为例进行,请你根据实际应用场景进行修改。
@ -43,7 +36,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
### 登录 EMQX Dashboard
使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`
使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`
![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp)
@ -55,6 +48,17 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
### 编辑 SQL 字段
复制以下内容输入到 SQL 编辑框:
```sql
SELECT
payload
FROM
"sensor/data"
```
其中 `payload` 代表整个消息体, `sensor/data` 为本规则选取的消息主题。
![TDengine Database EMQX create rule](./emqx/create-rule.webp)
### 新增“动作action handler
@ -65,101 +69,54 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
![TDengine Database EMQX create resource](./emqx/create-resource.webp)
选择“发送数据到 Web 服务并点击“新建资源”按钮:
选择“发送数据到 Web 服务并点击“新建资源”按钮:
### 编辑“资源Resource
选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041。其他属性请保持默认值。
选择“WebHook”并填写“请求 URL”为 taosAdapter 提供 REST 服务的地址,如果是本地启动的 taosadapter 那么默认地址为:
```
http://127.0.0.1:6041/rest/sql
```
其他属性请保持默认值。
![TDengine Database EMQX edit resource](./emqx/edit-resource.webp)
### 编辑“动作action
编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。
编辑资源配置,增加 Authorization 认证的键/值配对项。默认用户名和密码对应的 Authorization 值为:
```
Basic cm9vdDp0YW9zZGF0YQ==
```
相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。
在消息体中输入规则引擎替换模板:
```sql
INSERT INTO test.sensor_data VALUES(
now,
${payload.temperature},
${payload.humidity},
${payload.volume},
${payload.PM10},
${payload.pm25},
${payload.SO2},
${payload.NO2},
${payload.CO},
'${payload.id}',
${payload.area},
${payload.ts}
)
```
![TDengine Database EMQX edit action](./emqx/edit-action.webp)
最后点击左下方的 “Create” 按钮,保存规则。
## 编写模拟测试程序
```javascript
// mock.js
const mqtt = require('mqtt')
const Mock = require('mockjs')
const EMQX_SERVER = 'mqtt://localhost:1883'
const CLIENT_NUM = 10
const STEP = 5000 // 模拟采集时间间隔 ms
const AWAIT = 5000 // 每次发送完后休眠时间,防止消息速率过快 ms
const CLIENT_POOL = []
startMock()
function sleep(timer = 100) {
return new Promise(resolve => {
setTimeout(resolve, timer)
})
}
async function startMock() {
const now = Date.now()
for (let i = 0; i < CLIENT_NUM; i++) {
const client = await createClient(`mock_client_${i}`)
CLIENT_POOL.push(client)
}
// last 24h every 5s
const last = 24 * 3600 * 1000
for (let ts = now - last; ts <= now; ts += STEP) {
for (const client of CLIENT_POOL) {
const mockData = generateMockData()
const data = {
...mockData,
id: client.clientId,
area: 0,
ts,
}
client.publish('sensor/data', JSON.stringify(data))
}
const dateStr = new Date(ts).toLocaleTimeString()
console.log(`${dateStr} send success.`)
await sleep(AWAIT)
}
console.log(`Done, use ${(Date.now() - now) / 1000}s`)
}
/**
* Init a virtual mqtt client
* @param {string} clientId ClientID
*/
function createClient(clientId) {
return new Promise((resolve, reject) => {
const client = mqtt.connect(EMQX_SERVER, {
clientId,
})
client.on('connect', () => {
console.log(`client ${clientId} connected`)
resolve(client)
})
client.on('reconnect', () => {
console.log('reconnect')
})
client.on('error', (e) => {
console.error(e)
reject(e)
})
})
}
/**
* Generate mock data
*/
function generateMockData() {
return {
"temperature": parseFloat(Mock.Random.float(22, 100).toFixed(2)),
"humidity": parseFloat(Mock.Random.float(12, 86).toFixed(2)),
"volume": parseFloat(Mock.Random.float(20, 200).toFixed(2)),
"PM10": parseFloat(Mock.Random.float(0, 300).toFixed(2)),
"pm25": parseFloat(Mock.Random.float(0, 300).toFixed(2)),
"SO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"NO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"CO": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"area": Mock.Random.integer(0, 20),
"ts": 1596157444170,
}
}
{{#include docs-examples/other/mock.js}}
```
注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。
@ -189,4 +146,3 @@ node mock.js
TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。
EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。

View File

@ -21,7 +21,7 @@ There are two ways to install taosBenchmark:
### Configuration and running methods
taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file.

View File

@ -134,7 +134,7 @@ TDengine uses 13 continuous ports, both TCP and UDP, starting with the port spec
| Applicable | Server Only |
| Meaning | The switch for monitoring inside server. The workload of the hosts, including CPU, memory, disk, network, TTP requests, are collected and stored in a system builtin database `LOG` |
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
| Default Value | 0 |
| Default Value | 1 |
### monitorInterval

View File

@ -16,22 +16,15 @@ The following preparations are required for EMQX to add TDengine data sources co
Depending on the current operating system, users can download the installation package from the [EMQX official website](https://www.emqx.io/downloads) and execute the installation. After installation, use `sudo emqx start` or `sudo systemctl start emqx` to start the EMQX service.
## Create the appropriate database and table schema in TDengine for receiving MQTT data
### Take the Docker installation of TDengine as an example
## Create Database and Table
```bash
docker exec -it tdengine bash
taos
```
### Create Database and Table
In this step we create the appropriate database and table schema in TDengine for receiving MQTT data. Open TDengine CLI and execute SQL bellow:
```sql
CREATE DATABASE test;
USE test;
CREATE TABLE sensor_data (ts timestamp, temperature float, humidity float, volume float, PM10 float, pm25 float, SO2 float, NO2 float, CO float, sensor_id NCHAR(255), area TINYINT, coll_time timestamp);
CREATE DATABASE test;
USE test;
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
```
Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
@ -54,6 +47,15 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
### Edit SQL fields
Copy SQL bellow and paste it to the SQL edit area
```sql
SELECT
payload
FROM
"sensor/data"
```
![TDengine Database EMQX create rule](./emqx/create-rule.webp)
### Add "action handler"
@ -68,97 +70,45 @@ Select "Data to Web Service" and click the "New Resource" button.
### Edit "Resource"
Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values.
Select "WebHook" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values.
![TDengine Database EMQX edit resource](./emqx/edit-resource.webp)
### Edit "action"
Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body.
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is
```
Basic cm9vdDp0YW9zZGF0YQ==
```
Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details.
Enter the rule engine replacement template in the message body:
```sql
INSERT INTO test.sensor_data VALUES(
now,
${payload.temperature},
${payload.humidity},
${payload.volume},
${payload.PM10},
${payload.pm25},
${payload.SO2},
${payload.NO2},
${payload.CO},
'${payload.id}',
${payload.area},
${payload.ts}
)
```
![TDengine Database EMQX edit action](./emqx/edit-action.webp)
Finally, click the "Create" button at bottom left corner saving the rule.
## Compose program to mock data
```javascript
// mock.js
const mqtt = require('mqtt')
const Mock = require('mockjs')
const EMQX_SERVER = 'mqtt://localhost:1883'
const CLIENT_NUM = 10
const STEP = 5000 // Data interval in ms
const AWAIT = 5000 // Sleep time after data be written once to avoid data writing too fast
const CLIENT_POOL = []
startMock()
function sleep(timer = 100) {
return new Promise(resolve => {
setTimeout(resolve, timer)
})
}
async function startMock() {
const now = Date.now()
for (let i = 0; i < CLIENT_NUM; i++) {
const client = await createClient(`mock_client_${i}`)
CLIENT_POOL.push(client)
}
// last 24h every 5s
const last = 24 * 3600 * 1000
for (let ts = now - last; ts <= now; ts += STEP) {
for (const client of CLIENT_POOL) {
const mockData = generateMockData()
const data = {
...mockData,
id: client.clientId,
area: 0,
ts,
}
client.publish('sensor/data', JSON.stringify(data))
}
const dateStr = new Date(ts).toLocaleTimeString()
console.log(`${dateStr} send success.`)
await sleep(AWAIT)
}
console.log(`Done, use ${(Date.now() - now) / 1000}s`)
}
/**
* Init a virtual mqtt client
* @param {string} clientId ClientID
*/
function createClient(clientId) {
return new Promise((resolve, reject) => {
const client = mqtt.connect(EMQX_SERVER, {
clientId,
})
client.on('connect', () => {
console.log(`client ${clientId} connected`)
resolve(client)
})
client.on('reconnect', () => {
console.log('reconnect')
})
client.on('error', (e) => {
console.error(e)
reject(e)
})
})
}
/**
* Generate mock data
*/
function generateMockData() {
return {
"temperature": parseFloat(Mock.Random.float(22, 100).toFixed(2)),
"humidity": parseFloat(Mock.Random.float(12, 86).toFixed(2)),
"volume": parseFloat(Mock.Random.float(20, 200).toFixed(2)),
"PM10": parseFloat(Mock.Random.float(0, 300).toFixed(2)),
"pm25": parseFloat(Mock.Random.float(0, 300).toFixed(2)),
"SO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"NO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"CO": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"area": Mock.Random.integer(0, 20),
"ts": 1596157444170,
}
}
{{#include docs-examples/other/mock.js}}
```
Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients.

View File

@ -0,0 +1,78 @@
// mock.js
const mqtt = require('mqtt')
const Mock = require('mockjs')
const EMQX_SERVER = 'mqtt://localhost:1883'
const CLIENT_NUM = 10
const STEP = 5000 // Data interval in ms
const AWAIT = 5000 // Sleep time after data be written once to avoid data writing too fast
const CLIENT_POOL = []
startMock()
function sleep(timer = 100) {
return new Promise(resolve => {
setTimeout(resolve, timer)
})
}
async function startMock() {
const now = Date.now()
for (let i = 0; i < CLIENT_NUM; i++) {
const client = await createClient(`mock_client_${i}`)
CLIENT_POOL.push(client)
}
// last 24h every 5s
const last = 24 * 3600 * 1000
for (let ts = now - last; ts <= now; ts += STEP) {
for (const client of CLIENT_POOL) {
const mockData = generateMockData()
const data = {
...mockData,
id: client.clientId,
area: 0,
ts,
}
client.publish('sensor/data', JSON.stringify(data))
}
const dateStr = new Date(ts).toLocaleTimeString()
console.log(`${dateStr} send success.`)
await sleep(AWAIT)
}
console.log(`Done, use ${(Date.now() - now) / 1000}s`)
}
/**
* Init a virtual mqtt client
* @param {string} clientId ClientID
*/
function createClient(clientId) {
return new Promise((resolve, reject) => {
const client = mqtt.connect(EMQX_SERVER, {
clientId,
})
client.on('connect', () => {
console.log(`client ${clientId} connected`)
resolve(client)
})
client.on('reconnect', () => {
console.log('reconnect')
})
client.on('error', (e) => {
console.error(e)
reject(e)
})
})
}
/**
* Generate mock data
*/
function generateMockData() {
return {
"temperature": parseFloat(Mock.Random.float(22, 100).toFixed(2)),
"humidity": parseFloat(Mock.Random.float(12, 86).toFixed(2)),
"volume": parseFloat(Mock.Random.float(20, 200).toFixed(2)),
"PM10": parseFloat(Mock.Random.float(0, 300).toFixed(2)),
"pm25": parseFloat(Mock.Random.float(0, 300).toFixed(2)),
"SO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"NO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"CO": parseFloat(Mock.Random.float(0, 50).toFixed(2)),
"area": Mock.Random.integer(0, 20),
"ts": 1596157444170,
}
}

View File

@ -207,8 +207,8 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_JOIN,
QUERY_NODE_PHYSICAL_PLAN_AGG,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
@ -218,11 +218,11 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,

View File

@ -188,6 +188,13 @@ SDiskbasedBufStatis getDBufStatis(const SDiskbasedBuf* pBuf);
*/
void dBufPrintStatis(const SDiskbasedBuf* pBuf);
/**
* Set all of page buffer are not need
* @param pBuf
* @return
*/
void clearDiskbasedBuf(SDiskbasedBuf* pBuf);
#ifdef __cplusplus
}
#endif

View File

@ -1131,6 +1131,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, size_t existRows, ui
if (IS_VAR_DATA_TYPE(pColumn->info.type)) {
char* tmp = taosMemoryRealloc(pColumn->varmeta.offset, sizeof(int32_t) * numOfRows);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -1156,6 +1157,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, size_t existRows, ui
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
memset(tmp + pColumn->info.bytes * existRows, 0, pColumn->info.bytes * (numOfRows - existRows));
pColumn->pData = tmp;
}
@ -1214,7 +1216,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
pBlock->info.numOfCols = numOfCols;
pBlock->info.hasVarCol = pDataBlock->info.hasVarCol;
pBlock->info.rowSize = pDataBlock->info.rows;
pBlock->info.rowSize = pDataBlock->info.rowSize;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData colInfo = {0};
@ -1500,7 +1502,7 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) {
for (int32_t k = 0; k < colNum; k++) {
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
if (pColInfoData->hasNull) {
if (colDataIsNull(pColInfoData, rows, j, NULL)) {
printf(" %15s |", "NULL");
continue;
}

View File

@ -47,6 +47,9 @@ extern "C" {
#define EXPLAIN_TIME_WINDOWS_FORMAT "Time Window: interval=%" PRId64 "%c offset=%" PRId64 "%c sliding=%" PRId64 "%c"
#define EXPLAIN_WINDOW_FORMAT "Window: gap=%" PRId64
#define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f"
#define EXPLAIN_MERGE_FORMAT "Merge"
#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: "
#define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms"
#define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms"

View File

@ -133,12 +133,12 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
pPhysiChildren = pPrjNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_JOIN: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode;
pPhysiChildren = pJoinNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_AGG: {
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: {
SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode;
pPhysiChildren = pAggNode->node.pChildren;
break;
@ -158,12 +158,12 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
pPhysiChildren = pIntNode->window.node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: {
SSessionWinodwPhysiNode *pSessNode = (SSessionWinodwPhysiNode *)pNode;
pPhysiChildren = pSessNode->window.node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: {
SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode;
pPhysiChildren = pStateNode->window.node.pChildren;
break;
@ -173,6 +173,11 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
pPhysiChildren = partitionPhysiNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_MERGE: {
SMergePhysiNode *mergePhysiNode = (SMergePhysiNode *)pNode;
pPhysiChildren = mergePhysiNode->node.pChildren;
break;
}
default:
qError("not supported physical node type %d", pNode->type);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
@ -513,7 +518,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_JOIN: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_JOIN_FORMAT, EXPLAIN_JOIN_STRING(pJoinNode->joinType));
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
@ -553,7 +558,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_AGG: {
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: {
SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
@ -744,7 +749,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: {
SSessionWinodwPhysiNode *pSessNode = (SSessionWinodwPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_SESSION_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
@ -782,7 +787,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: {
SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT,
@ -857,6 +862,50 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_MERGE: {
SMergePhysiNode *pMergeNode = (SMergePhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
SDataBlockDescNode *pDescNode = pMergeNode->node.pOutputDataBlockDesc;
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
nodesGetOutputNumFromSlotList(pMergeNode->node.pOutputDataBlockDesc->pSlots));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pMergeNode->node.pOutputDataBlockDesc->outputRowSize);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
for (int32_t i = 0; i < LIST_LENGTH(pMergeNode->pMergeKeys); ++i) {
SOrderByExprNode *ptn = nodesListGetNode(pMergeNode->pMergeKeys, i);
EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
if (pMergeNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pMergeNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
default:
qError("not supported physical node type %d", pNode->type);
return TSDB_CODE_QRY_APP_ERROR;

View File

@ -488,6 +488,8 @@ typedef struct SStreamFinalIntervalOperatorInfo {
int32_t order; // current SSDataBlock scan order
STimeWindowAggSupp twAggSup;
SArray* pChildren;
SSDataBlock* pUpdateRes;
SPhysiNode* pPhyNode; // create new child
} SStreamFinalIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
@ -785,7 +787,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SNode* pCondition, SExecTaskInfo* pTaskInfo);
SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols,
SArray* pIndexMap, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams,
SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock,
SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pSortInfo, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo);
@ -793,9 +795,8 @@ SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);

View File

@ -130,6 +130,12 @@ bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colId);
*/
void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
/**
*
* @param pVHandle
* @return
*/
uint64_t tsortGetGroupId(STupleHandle* pVHandle);
/**
*
* @param pSortHandle

View File

@ -3936,7 +3936,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
}
pOperator->name = "TableAggregate";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_AGG;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_AGG;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
@ -4582,7 +4582,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SLimit slimit = {.limit = pProjPhyNode->slimit, .offset = pProjPhyNode->soffset};
pOptr = createProjectOperatorInfo(ops[0], pExprInfo, num, pResBlock, &limit, &slimit,
pProjPhyNode->node.pConditions, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_AGG == type) {
} else if (QUERY_NODE_PHYSICAL_PLAN_HASH_AGG == type) {
SAggPhysiNode* pAggNode = (SAggPhysiNode*)pPhyNode;
SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num);
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
@ -4634,6 +4634,12 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
int32_t children = 8;
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == type) {
int32_t children = 0;
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode;
@ -4660,9 +4666,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
int32_t numOfOutputCols = 0;
SArray* pColList =
extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID);
pOptr = createMultiwaySortMergeOperatorInfo(ops, size, pResBlock, sortInfo, pColList, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW == type) {
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, 0);
SSDataBlock* pInputDataBlock = createResDataBlock(pChildNode->pOutputDataBlockDesc);
pOptr = createMultiwaySortMergeOperatorInfo(ops, size, pInputDataBlock, pResBlock, sortInfo, pColList, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION == type) {
SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode;
STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark,
@ -4674,7 +4681,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr =
createSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW == type) {
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION == type) {
SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode;
STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark,
@ -4694,7 +4701,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &num);
pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW == type) {
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) {
SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
STimeWindowAggSupp as = {.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};
@ -4706,9 +4713,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
SColumn col = extractColumnFromColumnNode(pColNode);
pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW == type) {
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_JOIN == type) {
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
SJoinPhysiNode* pJoinNode = (SJoinPhysiNode*)pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
@ -5100,12 +5107,12 @@ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) {
return TDB_CODE_SUCCESS;
}
int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo) {
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pTaskInfo) {
SExecTaskInfo* pTask = *(SExecTaskInfo**)pTaskInfo;
switch (pNode->type) {
case QUERY_NODE_PHYSICAL_PLAN_DELETE: {
SDeleterParam *pDeleterParam = taosMemoryCalloc(1, sizeof(SDeleterParam));
SDeleterParam* pDeleterParam = taosMemoryCalloc(1, sizeof(SDeleterParam));
if (NULL == pDeleterParam) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -5116,7 +5123,7 @@ int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pT
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < tbNum; ++i) {
STableKeyInfo *pTable = taosArrayGet(pTask->tableqinfoList.pTableList, i);
STableKeyInfo* pTable = taosArrayGet(pTask->tableqinfoList.pTableList, i);
taosArrayPush(pDeleterParam->pUidList, &pTable->uid);
}
@ -5130,7 +5137,6 @@ int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pT
return TSDB_CODE_SUCCESS;
}
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
EOPTR_EXEC_MODEL model) {
uint64_t queryId = pPlan->id.queryId;

View File

@ -13,20 +13,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "executorimpl.h"
#include "function.h"
#include "os.h"
#include "querynodes.h"
#include "tdatablock.h"
#include "tmsg.h"
#include "executorimpl.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "thash.h"
#include "tmsg.h"
#include "ttypes.h"
static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
static void destroyMergeJoinOperator(void* param, int32_t numOfOutput);
static void extractTimeCondition(SJoinOperatorInfo *Info, SLogicConditionNode* pLogicConditionNode);
static void extractTimeCondition(SJoinOperatorInfo* Info, SLogicConditionNode* pLogicConditionNode);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo,
int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition,
@ -41,7 +41,7 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
pInfo->pRes = pResBlock;
pOperator->name = "MergeJoinOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_JOIN;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExprInfo;
@ -54,7 +54,7 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft);
setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight);
} else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) {
extractTimeCondition(pInfo, (SLogicConditionNode*) pOnCondition);
extractTimeCondition(pInfo, (SLogicConditionNode*)pOnCondition);
}
pOperator->fpSet =
@ -66,7 +66,7 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
return pOperator;
_error:
_error:
taosMemoryFree(pInfo);
taosMemoryFree(pOperator);
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
@ -180,10 +180,10 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
return (pRes->info.rows > 0) ? pRes : NULL;
}
static void extractTimeCondition(SJoinOperatorInfo *pInfo, SLogicConditionNode* pLogicConditionNode) {
static void extractTimeCondition(SJoinOperatorInfo* pInfo, SLogicConditionNode* pLogicConditionNode) {
int32_t len = LIST_LENGTH(pLogicConditionNode->pParameterList);
for(int32_t i = 0; i < len; ++i) {
for (int32_t i = 0; i < len; ++i) {
SNode* pNode = nodesListGetNode(pLogicConditionNode->pParameterList, i);
if (nodeType(pNode) == QUERY_NODE_OPERATOR) {
SOperatorNode* pn1 = (SOperatorNode*)pNode;

View File

@ -42,10 +42,11 @@ static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capac
static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
const char* dbName);
static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock);
static bool processBlockWithProbability(const SSampleExecInfo *pInfo);
static void addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr,
SSDataBlock* pBlock);
static bool processBlockWithProbability(const SSampleExecInfo* pInfo);
bool processBlockWithProbability(const SSampleExecInfo *pInfo) {
bool processBlockWithProbability(const SSampleExecInfo* pInfo) {
#if 0
if (pInfo->sampleRatio == 1) {
return true;
@ -261,7 +262,8 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
// currently only the tbname pseudo column
if (pTableScanInfo->numOfPseudoExpr > 0) {
addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock);
addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr,
pBlock);
}
int64_t st = taosGetTimestampMs();
@ -295,7 +297,8 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction
taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow);
}
void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) {
void addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr,
SSDataBlock* pBlock) {
// currently only the tbname pseudo column
if (numOfPseudoExpr == 0) {
return;
@ -391,10 +394,10 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, 0);
int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals);
uint64_t *groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
if (groupId) {
pBlock->info.groupId = *groupId;
}else if(len != 0){
} else if (len != 0) {
pBlock->info.groupId = calcGroupId(pTableScanInfo->keyBuf, len);
taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &pBlock->info.groupId, sizeof(uint64_t));
}
@ -483,7 +486,8 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->scanFlag = REPEAT_SCAN;
qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
qDebug("%s start to repeat descending order scan data blocks due to query func required",
GET_TASKID(pTaskInfo));
for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) {
STimeWindow* pWin = &pTableScanInfo->cond.twindows[i];
qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
@ -525,7 +529,7 @@ static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
tsdbCleanupReadHandle(pTableScanInfo->dataReader);
taosArrayDestroy(pTableScanInfo->pGroupCols);
for(int i = 0; i < taosArrayGetSize(pTableScanInfo->pGroupColVals); i++){
for (int i = 0; i < taosArrayGetSize(pTableScanInfo->pGroupColVals); i++) {
SGroupKeys key = *(SGroupKeys*)taosArrayGet(pTableScanInfo->pGroupColVals, i);
taosMemoryFree(key.pData);
}
@ -562,11 +566,11 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
pInfo->readHandle = *readHandle;
pInfo->interval = extractIntervalInfo(pTableScanNode);
pInfo->sample.sampleRatio= pTableScanNode->ratio;
pInfo->sample.sampleRatio = pTableScanNode->ratio;
pInfo->sample.seed = taosGetTimestampSec();
pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
@ -604,7 +608,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
pOperator->cost.openCost = 0;
return pOperator;
_error:
_error:
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
@ -723,16 +727,19 @@ static void doClearBufferedBlocks(SStreamBlockScanInfo* pInfo) {
}
static bool isSessionWindow(SStreamBlockScanInfo* pInfo) {
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW;
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
}
static bool isStateWindow(SStreamBlockScanInfo* pInfo) {
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW;
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
}
static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
SSDataBlock* pSDB = pInfo->pUpdateRes;
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX,};
STimeWindow win = {
.skey = INT64_MIN,
.ekey = INT64_MAX,
};
bool needRead = false;
if (!isStateWindow(pInfo) && pInfo->updateResIndex < pSDB->info.rows) {
SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, pInfo->primaryTsIndex);
@ -759,7 +766,7 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
SArray* pWins = pInfo->sessionSup.pStreamAggSup->pScanWindow;
int32_t size = taosArrayGetSize(pWins);
if (pInfo->scanWinIndex < size) {
win = *(STimeWindow *)taosArrayGet(pWins, pInfo->scanWinIndex);
win = *(STimeWindow*)taosArrayGet(pWins, pInfo->scanWinIndex);
pInfo->scanWinIndex++;
needRead = true;
} else {
@ -790,8 +797,8 @@ static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo) {
return pResult;
}
static void getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible,
SSDataBlock* pBlock, SSDataBlock* pUpdateBlock) {
static void getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible, SSDataBlock* pBlock,
SSDataBlock* pUpdateBlock) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* ts = (TSKEY*)pColDataInfo->pData;
@ -859,8 +866,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
return pInfo->pUpdateRes;
} else {
if (isStateWindow(pInfo) &&
taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) {
if (isStateWindow(pInfo) && taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
pInfo->updateResIndex = pInfo->pUpdateRes->info.rows;
prepareDataScan(pInfo);
@ -956,16 +962,15 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
if (rows == 0) {
pOperator->status = OP_EXEC_DONE;
} else if (pInfo->pUpdateInfo) {
SSDataBlock* upRes = createOneDataBlock(pInfo->pRes, false);
getUpdateDataBlock(pInfo, true, pInfo->pRes, upRes);
if (upRes) {
pInfo->pUpdateRes = upRes;
if (upRes->info.type == STREAM_REPROCESS) {
blockDataCleanup(pInfo->pUpdateRes);
getUpdateDataBlock(pInfo, true, pInfo->pRes, pInfo->pUpdateRes);
if (pInfo->pUpdateRes->info.rows > 0) {
if (pInfo->pUpdateRes->info.type == STREAM_REPROCESS) {
pInfo->updateResIndex = 0;
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (upRes->info.type == STREAM_INVERT) {
} else if (pInfo->pUpdateRes->info.type == STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
return upRes;
return pInfo->pUpdateRes;
}
}
}
@ -974,8 +979,8 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
}
SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, SArray* pTableIdList,
STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
STimeWindowAggSupp* pTwSup) {
SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@ -992,7 +997,8 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan
STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info;
int32_t numOfCols = 0;
pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
pInfo->pColMatchInfo =
extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo);
SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t));
@ -1025,8 +1031,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan
}
if (isSmaStream(pTableScanNode->triggerType)) {
pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval,
pTableScanNode->filesFactor);
pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval, pTableScanNode->filesFactor);
}
if (pSTInfo->interval.interval > 0 && pDataReader) {
@ -1044,13 +1049,13 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan
pInfo->tableUid = pScanPhyNode->uid;
pInfo->streamBlockReader = pHandle->reader;
pInfo->pRes = createResDataBlock(pDescNode);
pInfo->pUpdateRes = createResDataBlock(pDescNode);
pInfo->pCondition = pScanPhyNode->node.pConditions;
pInfo->pDataReader = pDataReader;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
pInfo->pOperatorDumy = pTableScanDummy;
pInfo->interval = pSTInfo->interval;
pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1};
pOperator->name = "StreamBlockScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
pOperator->blocking = false;
@ -1059,8 +1064,8 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan
pOperator->numOfExprs = pInfo->pRes->info.numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL,
NULL, operatorDummyCloseFn, NULL, NULL, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL);
return pOperator;
@ -1445,8 +1450,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
}
SRetrieveMetaTableRsp* pTableRsp = pInfo->pRsp;
setDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pOperator->numOfExprs, startTs, NULL, pInfo->scanCols);
setDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen,
pOperator->numOfExprs, startTs, NULL, pInfo->scanCols);
// todo log the filter info
doFilterResult(pInfo);
@ -1519,7 +1524,8 @@ int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbT
return numOfRows;
}
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, SExecTaskInfo* pTaskInfo) {
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode,
SExecTaskInfo* pTaskInfo) {
SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -1567,7 +1573,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
return pOperator;
_error:
_error:
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
@ -1687,16 +1693,16 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val);
char *data = NULL;
if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL){
data = tTagValToData((const STagVal *)p, false);
}else {
char* data = NULL;
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
data = tTagValToData((const STagVal*)p, false);
} else {
data = (char*)p;
}
colDataAppend(pDst, count, data, (data == NULL));
if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL
&& IS_VAR_DATA_TYPE(((const STagVal *)p)->type) && data != NULL){
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) &&
data != NULL) {
taosMemoryFree(data);
}
}
@ -1726,7 +1732,8 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = blockDataDestroy(pInfo->pRes);
}
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) {
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) {
STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -1743,7 +1750,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
pInfo->pTableList = pTableListInfo;
pInfo->pColMatchInfo = colList;
pInfo->pRes = createResDataBlock(pDescNode);;
pInfo->pRes = createResDataBlock(pDescNode);
;
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pOperator->name = "TagScanOperator";

View File

@ -84,7 +84,6 @@ void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) {
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity,
SArray* pColMatchInfo) {
blockDataCleanup(pDataBlock);
ASSERT(taosArrayGetSize(pColMatchInfo) == pDataBlock->info.numOfCols);
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
if (p == NULL) {
@ -230,7 +229,12 @@ typedef struct SMultiwaySortMergeOperatorInfo {
SSortHandle* pSortHandle;
SArray* pColMatchInfo; // for index map from table scan output
SSDataBlock* pInputBlock;
int64_t startTs; // sort start time
bool hasGroupId;
uint64_t groupId;
STupleHandle *prefetchedTuple;
} SMultiwaySortMergeOperatorInfo;
int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) {
@ -246,14 +250,14 @@ int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) {
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_MULTISOURCE_MERGE,
pInfo->bufPageSize, numOfBufPage, NULL, pTaskInfo->id.str);
pInfo->bufPageSize, numOfBufPage, pInfo->pInputBlock, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL);
for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
SSortSource ps = {0};
ps.param = pOperator->pDownstream[i];
tsortAddSource(pInfo->pSortHandle, &ps);
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
ps->param = pOperator->pDownstream[i];
tsortAddSource(pInfo->pSortHandle, ps);
}
int32_t code = tsortOpen(pInfo->pSortHandle);
@ -269,6 +273,70 @@ int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity,
SArray* pColMatchInfo, SMultiwaySortMergeOperatorInfo* pInfo) {
blockDataCleanup(pDataBlock);
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
if (p == NULL) {
return NULL;
}
blockDataEnsureCapacity(p, capacity);
while (1) {
STupleHandle* pTupleHandle = NULL;
if (pInfo->prefetchedTuple == NULL) {
pTupleHandle = tsortNextTuple(pHandle);
} else {
pTupleHandle = pInfo->prefetchedTuple;
pInfo->prefetchedTuple = NULL;
}
if (pTupleHandle == NULL) {
break;
}
uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle);
if (!pInfo->hasGroupId) {
pInfo->groupId = tupleGroupId;
pInfo->hasGroupId = true;
appendOneRowToDataBlock(p, pTupleHandle);
} else if (pInfo->groupId == tupleGroupId) {
appendOneRowToDataBlock(p, pTupleHandle);
} else {
pInfo->prefetchedTuple = pTupleHandle;
pInfo->groupId = tupleGroupId;
break;
}
if (p->info.rows >= capacity) {
break;
}
}
if (p->info.rows > 0) {
int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i);
ASSERT(pmInfo->matchType == COL_MATCH_FROM_SLOT_ID);
SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId);
SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->targetSlotId);
colDataAssign(pDst, pSrc, p->info.rows);
}
pDataBlock->info.rows = p->info.rows;
pDataBlock->info.capacity = p->info.rows;
}
blockDataDestroy(p);
return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
}
SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@ -283,7 +351,11 @@ SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) {
}
SSDataBlock* pBlock =
getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
getMultiwaySortedBlockData(pInfo->pSortHandle,
pInfo->binfo.pRes,
pOperator->resultInfo.capacity,
pInfo->pColMatchInfo,
pInfo);
if (pBlock != NULL) {
pOperator->resultInfo.totalRows += pBlock->info.rows;
@ -296,6 +368,7 @@ SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) {
void destroyMultiwaySortMergeOperatorInfo(void* param, int32_t numOfOutput) {
SMultiwaySortMergeOperatorInfo * pInfo = (SMultiwaySortMergeOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock);
taosArrayDestroy(pInfo->pSortInfo);
taosArrayDestroy(pInfo->pColMatchInfo);
@ -313,7 +386,7 @@ int32_t getMultiwaySortMergeExplainExecInfo(SOperatorInfo* pOptr, void** pOptrEx
return TSDB_CODE_SUCCESS;
}
SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams,
SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock,
SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo,
SExecTaskInfo* pTaskInfo) {
SMultiwaySortMergeOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SMultiwaySortMergeOperatorInfo));
@ -330,6 +403,7 @@ SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams,
pInfo->pSortInfo = pSortInfo;
pInfo->pColMatchInfo = pColMatchColInfo;
pInfo->pInputBlock = pInputBlock;
pOperator->name = "MultiwaySortMerge";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE;
pOperator->blocking = true;

View File

@ -23,7 +23,6 @@ typedef enum SResultTsInterpType {
RESULT_ROW_END_INTERP = 2,
} SResultTsInterpType;
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator);
static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator);
static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo);
@ -778,6 +777,22 @@ static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated
return TSDB_CODE_SUCCESS;
}
static void removeResult(SArray* pUpdated, TSKEY key) {
int32_t size = taosArrayGetSize(pUpdated);
int32_t index = binarySearch(pUpdated, size, key, TSDB_ORDER_DESC, getReskey);
if (index >= 0 && key == getReskey(pUpdated, index)) {
taosArrayRemove(pUpdated, index);
}
}
static void removeResults(SArray* pWins, SArray* pUpdated) {
int32_t size = taosArrayGetSize(pWins);
for (int32_t i = 0; i < size; i++) {
STimeWindow* pW = taosArrayGet(pWins, i);
removeResult(pUpdated, pW->skey);
}
}
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
int32_t scanFlag, SArray* pUpdated) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
@ -1212,6 +1227,7 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, int
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
(SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
ASSERT(p1);
doClearWindowImpl(p1, pSup->pResultBuf, pBinfo, numOfOutput);
}
@ -1363,6 +1379,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(pChildOp);
}
}
nodesDestroyNode(pInfo->pPhyNode);
}
static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
@ -1485,69 +1502,6 @@ _error:
return NULL;
}
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval,
int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp,
SExecTaskInfo* pTaskInfo) {
SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
pInfo->twAggSup = *pTwAggSupp;
pInfo->primaryTsIndex = primaryTsSlotId;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(pOperator, 4096);
int32_t code =
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1);
int32_t numOfChild = 8; // Todo(liuyao) get it from phy plan
pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo));
for (int32_t i = 0; i < numOfChild; i++) {
SSDataBlock* chRes = createOneDataBlock(pResBlock, false);
SOperatorInfo* pChildOp = createIntervalOperatorInfo(NULL, pExprInfo, numOfCols, chRes, pInterval, primaryTsSlotId,
pTwAggSupp, pTaskInfo);
if (pChildOp && chRes) {
taosArrayPush(pInfo->pChildren, &pChildOp);
continue;
}
goto _error;
}
pOperator->name = "StreamFinalIntervalOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
return pOperator;
_error:
destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols);
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
@ -1836,7 +1790,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
pInfo->tsSlotId = tsSlotId;
pOperator->name = "StateWindowOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExpr;
@ -1888,7 +1842,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo
pInfo->winSup.prevTs = INT64_MIN;
pInfo->reptScan = false;
pOperator->name = "SessionWindowAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExprInfo;
@ -1913,12 +1867,12 @@ _error:
return NULL;
}
static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) {
static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock,
int32_t tableGroupId, SArray* pUpdated) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
int32_t numOfOutput = pOperatorInfo->numOfExprs;
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
int32_t step = 1;
bool ascScan = true;
TSKEY* tsCols = NULL;
@ -1929,7 +1883,7 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
tsCols = (int64_t*)pColDataInfo->pData;
} else {
return pUpdated;
return ;
}
int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1);
@ -1946,13 +1900,13 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB
pos->groupId = tableGroupId;
pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
*(int64_t*)pos->key = pResult->win.skey;
taosArrayPush(pUpdated, &pos);
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL,
TSDB_ORDER_ASC);
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos,
nextWin.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) {
saveResult(pResult, tableGroupId, pUpdated);
}
// window start(end) key interpolation
// disable it temporarily
// doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos,
// forwardRows);
// doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
tsCols, pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
@ -1962,7 +1916,6 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB
break;
}
}
return pUpdated;
}
bool isFinalInterval(SStreamFinalIntervalOperatorInfo* pInfo) { return pInfo->pChildren != NULL; }
@ -2006,24 +1959,72 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SArra
}
}
static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) {
taosHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
cleanupResultRowInfo(&pInfo->binfo.resultRowInfo);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 1);
}
static void clearUpdateDataBlock(SSDataBlock* pBlock) {
if (pBlock->info.rows <= 0) {
return;
}
blockDataCleanup(pBlock);
}
static void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex) {
ASSERT(pDest->info.capacity >= pSource->info.rows);
clearUpdateDataBlock(pDest);
SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, 0);
SColumnInfoData* pSourceCol = taosArrayGet(pSource->pDataBlock, tsColIndex);
// copy timestamp column
colDataAssign(pDestCol, pSourceCol, pSource->info.rows);
for (int32_t i = 1; i < pDest->info.numOfCols; i++) {
SColumnInfoData* pCol = taosArrayGet(pDest->pDataBlock, i);
colDataAppendNNULL(pCol, 0, pSource->info.rows);
}
pDest->info.rows = pSource->info.rows;
blockDataUpdateTsWindow(pDest, 0);
}
static int32_t getChildIndex(SSDataBlock* pBlock) {
// if (pBlock->info.type != STREAM_INVALID && pBlock->info.rows < 4) { // for test
// return pBlock->info.rows - 1;
// }
return 0;
}
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = NULL;
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
SArray* pClosed = taosArrayInit(4, POINTER_BYTES);
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
} else if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) {
if (pInfo->binfo.pRes->info.rows == 0) {
pOperator->status = OP_EXEC_DONE;
if (isFinalInterval(pInfo) || pInfo->pUpdateRes->info.rows == 0) {
if (!isFinalInterval(pInfo)) {
// semi interval operator clear disk buffer
clearStreamIntervalOperator(pInfo);
}
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
return NULL;
}
// process the rest of the data
pOperator->status = OP_OPENED;
return pInfo->pUpdateRes;
}
return pInfo->binfo.pRes;
}
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
clearUpdateDataBlock(pInfo->pUpdateRes);
break;
}
@ -2033,31 +2034,149 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, pInfo->primaryTsIndex, pOperator->numOfExprs,
pBlock, pUpWins);
if (isFinalInterval(pInfo)) {
int32_t childIndex = 0; // Todo(liuyao) get child id from SSDataBlock
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SIntervalAggOperatorInfo* pChildInfo = pChildOp->info;
doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval, pChildInfo->primaryTsIndex,
pChildOp->numOfExprs, pBlock, NULL);
rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId, pOperator->numOfExprs,
pOperator->pTaskInfo);
}
doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval,
pChildInfo->primaryTsIndex, pChildOp->numOfExprs, pBlock, NULL);
rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId,
pOperator->numOfExprs, pOperator->pTaskInfo);
taosArrayDestroy(pUpWins);
continue;
}
removeResults(pUpWins, pUpdated);
copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
taosArrayDestroy(pUpWins);
break;
}
if (isFinalInterval(pInfo)) {
int32_t chIndex = 1; // Todo(liuyao) get it from SSDataBlock
int32_t chIndex = getChildIndex(pBlock);
int32_t size = taosArrayGetSize(pInfo->pChildren);
// if chIndex + 1 - size > 0, add new child
for (int32_t i = 0; i < chIndex + 1 - size; i++) {
SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0);
if (!pChildOp) {
longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
taosArrayPush(pInfo->pChildren, &pChildOp);
}
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
doStreamIntervalAgg(pChildOp);
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
setInputDataBlock(pChildOp, pChInfo->binfo.pCtx, pBlock, pChInfo->order, MAIN_SCAN, true);
doHashInterval(pChildOp, pBlock, pBlock->info.groupId, NULL);
}
pUpdated = doHashInterval(pOperator, pBlock, 0);
doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdated);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
}
if (isFinalInterval(pInfo)) {
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup,
&pInfo->interval, pClosed);
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed,
pInfo->binfo.rowCellInfoOffset);
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
taosArrayAddAll(pUpdated, pClosed);
}
}
taosArrayDestroy(pClosed);
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
pOperator->status = OP_RES_TO_RETURN;
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
if (pInfo->binfo.pRes->info.rows == 0) {
pOperator->status = OP_EXEC_DONE;
if (pInfo->pUpdateRes->info.rows == 0) {
return NULL;
}
// process the rest of the data
pOperator->status = OP_OPENED;
return pInfo->pUpdateRes;
}
return pInfo->binfo.pRes;
}
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = (SInterval) {.interval = pIntervalPhyNode->interval,
.sliding = pIntervalPhyNode->sliding,
.intervalUnit = pIntervalPhyNode->intervalUnit,
.slidingUnit = pIntervalPhyNode->slidingUnit,
.offset = pIntervalPhyNode->offset,
.precision =
((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pIntervalPhyNode->window.watermark,
.calTrigger = pIntervalPhyNode->window.triggerType,
.maxTs = INT64_MIN,
.winMap = NULL, };
pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(pOperator, 4096);
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &numOfCols);
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
int32_t code = initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols,
pResBlock, keyBufSize, pTaskInfo->id.str);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1);
pInfo->pChildren = NULL;
if (numOfChild > 0) {
pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo));
for (int32_t i = 0; i < numOfChild; i++) {
SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pPhyNode, pTaskInfo, 0);
if (pChildOp) {
taosArrayPush(pInfo->pChildren, &pChildOp);
continue;
}
goto _error;
}
}
// semi interval operator does not catch result
if (!isFinalInterval(pInfo)) {
pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
}
pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);\
pInfo->pUpdateRes->info.type = STREAM_REPROCESS;
blockDataEnsureCapacity(pInfo->pUpdateRes, 128);
pInfo->pPhyNode = nodesCloneNode(pPhyNode);
pOperator->name = "StreamFinalIntervalOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL,
destroyStreamFinalIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow,
NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
return pOperator;
_error:
destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols);
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
void destroyStreamAggSupporter(SStreamAggSupporter* pSup) {
@ -2151,7 +2270,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SEx
pInfo->pChildren = NULL;
pOperator->name = "StreamSessionWindowAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->pExpr = pExprInfo;
@ -2638,7 +2757,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
goto _error;
}
pOperator->name = "StreamFinalSessionWindowAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION;
int32_t numOfChild = 1; // Todo(liuyao) get it from phy plan
pInfo = pOperator->info;
pInfo->pChildren = taosArrayInit(8, sizeof(void*));
@ -3029,7 +3148,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pChildren = NULL;
pOperator->name = "StreamStateAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->numOfExprs = numOfCols;

View File

@ -225,6 +225,10 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int
for (int32_t i = 0; i < cmpParam->numOfSources; ++i) {
SSortSource* pSource = cmpParam->pSources[i];
pSource->src.pBlock = pHandle->fetchfp(pSource->param);
if (pSource->src.pBlock == NULL) {
pSource->src.rowIndex = -1;
++pHandle->numOfCompletedSources;
}
}
}
@ -361,13 +365,21 @@ int32_t msortComparFn(const void *pLeft, const void *pRight, void *param) {
bool leftNull = false;
if (pLeftColInfoData->hasNull) {
leftNull = colDataIsNull(pLeftColInfoData, pLeftBlock->info.rows, pLeftSource->src.rowIndex, pLeftBlock->pBlockAgg[pOrder->slotId]);
if (pLeftBlock->pBlockAgg == NULL) {
leftNull = colDataIsNull_s(pLeftColInfoData, pLeftSource->src.rowIndex);
} else {
leftNull = colDataIsNull(pLeftColInfoData, pLeftBlock->info.rows, pLeftSource->src.rowIndex, pLeftBlock->pBlockAgg[i]);
}
}
SColumnInfoData* pRightColInfoData = TARRAY_GET_ELEM(pRightBlock->pDataBlock, pOrder->slotId);
bool rightNull = false;
if (pRightColInfoData->hasNull) {
rightNull = colDataIsNull(pRightColInfoData, pRightBlock->info.rows, pRightSource->src.rowIndex, pRightBlock->pBlockAgg[pOrder->slotId]);
if (pLeftBlock->pBlockAgg == NULL) {
rightNull = colDataIsNull_s(pRightColInfoData, pRightSource->src.rowIndex);
} else {
rightNull = colDataIsNull(pRightColInfoData, pRightBlock->info.rows, pRightSource->src.rowIndex, pRightBlock->pBlockAgg[i]);
}
}
if (leftNull && rightNull) {
@ -408,7 +420,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
pHandle->totalElapsed = taosGetTimestampUs() - pHandle->startTs;
qDebug("%s %d rounds mergesort required to complete the sort, first-round sorted data size:%"PRIzu", sort elapsed:%"PRId64", total elapsed:%"PRId64,
pHandle->idStr, (int32_t) (sortPass + 1), getTotalBufSize(pHandle->pBuf), pHandle->sortElapsed, pHandle->totalElapsed);
pHandle->idStr, (int32_t) (sortPass + 1), pHandle->pBuf ? getTotalBufSize(pHandle->pBuf) : 0, pHandle->sortElapsed, pHandle->totalElapsed);
int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize);
blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows);
@ -697,6 +709,10 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) {
return colDataGetData(pColInfo, pVHandle->rowIndex);
}
uint64_t tsortGetGroupId(STupleHandle* pVHandle) {
return pVHandle->pBlock->info.groupId;
}
SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) {
SSortExecInfo info = {0};

View File

@ -67,6 +67,7 @@ bool leastSQRFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInf
int32_t leastSQRFunction(SqlFunctionCtx* pCtx);
int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx);
int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool getPercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool percentileFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);

View File

@ -1118,7 +1118,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.initFunc = leastSQRFunctionSetup,
.processFunc = leastSQRFunction,
.finalizeFunc = leastSQRFinalize,
.invertFunc = leastSQRInvertFunction,
.invertFunc = NULL,
.combineFunc = leastSQRCombine,
},
{
.name = "avg",

View File

@ -1799,17 +1799,17 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
param[1][1] = (double)pInfo->num;
param[1][0] = param[0][1];
param[0][0] -= param[1][0] * (param[0][1] / param[1][1]);
param[0][2] -= param[1][2] * (param[0][1] / param[1][1]);
param[0][1] = 0;
param[1][2] -= param[0][2] * (param[1][0] / param[0][0]);
param[1][0] = 0;
param[0][2] /= param[0][0];
double param00 = param[0][0] - param[1][0] * (param[0][1] / param[1][1]);
double param02 = param[0][2] - param[1][2] * (param[0][1] / param[1][1]);
// param[0][1] = 0;
double param12 = param[1][2] - param02 * (param[1][0] / param00);
// param[1][0] = 0;
param02 /= param00;
param[1][2] /= param[1][1];
param12 /= param[1][1];
char buf[64] = {0};
size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param[0][2], param[1][2]);
size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param02, param12);
varDataSetLen(buf, len);
colDataAppend(pCol, currentRow, buf, pResInfo->isNullRes);
@ -1822,6 +1822,27 @@ int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx) {
return TSDB_CODE_SUCCESS;
}
int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SLeastSQRInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
int32_t type = pDestCtx->input.pData[0]->info.type;
double (*pDparam)[3] = pDBuf->matrix;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SLeastSQRInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
double (*pSparam)[3] = pSBuf->matrix;
for (int32_t i = 0; i < pSBuf->num; i++) {
pDparam[0][0] += pDBuf->startVal * pDBuf->startVal;
pDparam[0][1] += pDBuf->startVal;
pDBuf->startVal += pDBuf->stepVal;
}
pDparam[0][2] += pSparam[0][2] + pDBuf->num * pDBuf->stepVal * pSparam[1][2];
pDparam[1][2] += pSparam[1][2];
pDBuf->num += pSBuf->num;
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
return TSDB_CODE_SUCCESS;
}
bool getPercentileFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SPercentileInfo);
return true;

View File

@ -456,6 +456,49 @@ static SNode* physiNodeCopy(const SPhysiNode* pSrc, SPhysiNode* pDst) {
return (SNode*)pDst;
}
static SNode* physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, physiNodeCopy);
CLONE_NODE_LIST_FIELD(pScanCols);
CLONE_NODE_LIST_FIELD(pScanPseudoCols);
COPY_SCALAR_FIELD(uid);
COPY_SCALAR_FIELD(suid);
COPY_SCALAR_FIELD(tableType);
COPY_OBJECT_FIELD(tableName, sizeof(SName));
return (SNode*)pDst;
}
static SNode* physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) {
return physiScanCopy(pSrc, pDst);
}
static SNode* physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(scan, physiScanCopy);
COPY_OBJECT_FIELD(scanSeq[0], sizeof(uint8_t) * 2);
COPY_OBJECT_FIELD(scanRange, sizeof(STimeWindow));
COPY_SCALAR_FIELD(ratio);
COPY_SCALAR_FIELD(dataRequired);
CLONE_NODE_LIST_FIELD(pDynamicScanFuncs);
CLONE_NODE_LIST_FIELD(pPartitionKeys);
COPY_SCALAR_FIELD(interval);
COPY_SCALAR_FIELD(offset);
COPY_SCALAR_FIELD(sliding);
COPY_SCALAR_FIELD(intervalUnit);
COPY_SCALAR_FIELD(slidingUnit);
COPY_SCALAR_FIELD(triggerType);
COPY_SCALAR_FIELD(watermark);
COPY_SCALAR_FIELD(tsColId);
COPY_SCALAR_FIELD(filesFactor);
return (SNode*)pDst;
}
static SNode* physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSystemTableScanPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(scan, physiScanCopy);
COPY_OBJECT_FIELD(mgmtEpSet, sizeof(SEpSet));
COPY_SCALAR_FIELD(showRewrite);
COPY_SCALAR_FIELD(accountId);
return (SNode*)pDst;
}
static SNode* physiWindowCopy(const SWinodwPhysiNode* pSrc, SWinodwPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, physiNodeCopy);
CLONE_NODE_LIST_FIELD(pExprs);
@ -603,6 +646,14 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) {
return logicIndefRowsFuncCopy((const SIndefRowsFuncLogicNode*)pNode, (SIndefRowsFuncLogicNode*)pDst);
case QUERY_NODE_LOGIC_SUBPLAN:
return logicSubplanCopy((const SLogicSubplan*)pNode, (SLogicSubplan*)pDst);
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
return physiTagScanCopy((const STagScanPhysiNode*)pNode, (STagScanPhysiNode*)pDst);
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
return physiTableScanCopy((const STableScanPhysiNode*)pNode, (STableScanPhysiNode*)pDst);
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
return physiSysTableScanCopy((const SSystemTableScanPhysiNode*)pNode, (SSystemTableScanPhysiNode*)pDst);
case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:

View File

@ -220,9 +220,9 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiSystemTableScan";
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return "PhysiProject";
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
return "PhysiJoin";
case QUERY_NODE_PHYSICAL_PLAN_AGG:
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
return "PhysiAgg";
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return "PhysiExchange";
@ -242,13 +242,13 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiStreamSemiInterval";
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return "PhysiFill";
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
return "PhysiSessionWindow";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
return "PhysiStreamSessionWindow";
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
return "PhysiStateWindow";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return "PhysiStreamStateWindow";
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return "PhysiPartition";
@ -2441,6 +2441,7 @@ static const char* jkValueLiteralSize = "LiteralSize";
static const char* jkValueLiteral = "Literal";
static const char* jkValueDuration = "Duration";
static const char* jkValueTranslate = "Translate";
static const char* jkValueNotReserved = "NotReserved";
static const char* jkValueDatum = "Datum";
static int32_t datumToJson(const void* pObj, SJson* pJson) {
@ -2513,6 +2514,9 @@ static int32_t valueNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkValueTranslate, pNode->translate);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkValueNotReserved, pNode->notReserved);
}
if (TSDB_CODE_SUCCESS == code && pNode->translate) {
code = datumToJson(pNode, pJson);
}
@ -2634,6 +2638,9 @@ static int32_t jsonToValueNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkValueTranslate, &pNode->translate);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkValueNotReserved, &pNode->notReserved);
}
if (TSDB_CODE_SUCCESS == code && pNode->translate) {
code = jsonToDatum(pJson, pNode);
}
@ -3875,9 +3882,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return physiSysTableScanNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return physiProjectNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
return physiJoinNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_AGG:
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
return physiAggNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return physiExchangeNodeToJson(pObj, pJson);
@ -3893,11 +3900,11 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return physiIntervalNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return physiFillNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
return physiSessionWindowNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return physiStateWindowNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return physiPartitionNodeToJson(pObj, pJson);
@ -4008,9 +4015,9 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToPhysiSysTableScanNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return jsonToPhysiProjectNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
return jsonToPhysiJoinNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_AGG:
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
return jsonToPhysiAggNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return jsonToPhysiExchangeNode(pJson, pObj);
@ -4026,11 +4033,11 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToPhysiIntervalNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return jsonToPhysiFillNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
return jsonToPhysiSessionWindowNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return jsonToPhysiStateWindowNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return jsonToPhysiPartitionNode(pJson, pObj);

View File

@ -467,7 +467,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_JOIN: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
@ -478,7 +478,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_AGG: {
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: {
SAggPhysiNode* pAgg = (SAggPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
@ -518,12 +518,12 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext);
break;
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext);
break;
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: {
SStateWinodwPhysiNode* pState = (SStateWinodwPhysiNode*)pNode;
res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {

View File

@ -255,9 +255,9 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SSystemTableScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return makeNode(type, sizeof(SProjectPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
return makeNode(type, sizeof(SJoinPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_AGG:
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
return makeNode(type, sizeof(SAggPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return makeNode(type, sizeof(SExchangePhysiNode));
@ -277,13 +277,13 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return makeNode(type, sizeof(SFillPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
return makeNode(type, sizeof(SSessionWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
return makeNode(type, sizeof(SStreamSessionWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
return makeNode(type, sizeof(SStateWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return makeNode(type, sizeof(SStreamStateWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return makeNode(type, sizeof(SPartitionPhysiNode));
@ -657,14 +657,14 @@ void nodesDestroyNode(SNodeptr pNode) {
nodesDestroyList(pPhyNode->pProjections);
break;
}
case QUERY_NODE_PHYSICAL_PLAN_JOIN: {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyNode(pPhyNode->pOnConditions);
nodesDestroyList(pPhyNode->pTargets);
break;
}
case QUERY_NODE_PHYSICAL_PLAN_AGG: {
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: {
SAggPhysiNode* pPhyNode = (SAggPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyList(pPhyNode->pExprs);
@ -689,8 +689,8 @@ void nodesDestroyNode(SNodeptr pNode) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:

View File

@ -556,7 +556,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan,
static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SJoinLogicNode* pJoinLogicNode,
SPhysiNode** pPhyNode) {
SJoinPhysiNode* pJoin =
(SJoinPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pJoinLogicNode, QUERY_NODE_PHYSICAL_PLAN_JOIN);
(SJoinPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pJoinLogicNode, QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN);
if (NULL == pJoin) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -738,7 +738,8 @@ static int32_t rewritePrecalcExpr(SPhysiPlanContext* pCxt, SNode* pNode, SNodeLi
static int32_t createAggPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SAggLogicNode* pAggLogicNode,
SPhysiNode** pPhyNode) {
SAggPhysiNode* pAgg = (SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_AGG);
SAggPhysiNode* pAgg =
(SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_HASH_AGG);
if (NULL == pAgg) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -996,8 +997,7 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList*
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)makePhysiNode(
pCxt, (SLogicNode*)pWindowLogicNode,
(pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW
: QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW));
(pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION : QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION));
if (NULL == pSession) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -1009,10 +1009,9 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList*
static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
SStateWinodwPhysiNode* pState =
(SStateWinodwPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pWindowLogicNode,
(pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW
: QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW));
SStateWinodwPhysiNode* pState = (SStateWinodwPhysiNode*)makePhysiNode(
pCxt, (SLogicNode*)pWindowLogicNode,
(pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE : QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE));
if (NULL == pState) {
return TSDB_CODE_OUT_OF_MEMORY;
}

View File

@ -166,8 +166,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
}
return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
}
// case QUERY_NODE_LOGIC_PLAN_SORT:
// return stbSplHasMultiTbScan(streamQuery, pNode);
case QUERY_NODE_LOGIC_PLAN_SORT:
return stbSplHasMultiTbScan(streamQuery, pNode);
case QUERY_NODE_LOGIC_PLAN_SCAN:
return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode);
default:
@ -340,7 +340,7 @@ static int32_t stbSplCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pParent
return code;
}
static int32_t stbSplSplitWindowNodeForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
SLogicNode* pPartWindow = NULL;
int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow);
if (TSDB_CODE_SUCCESS == code) {
@ -363,7 +363,7 @@ static int32_t stbSplSplitWindowNodeForBatch(SSplitContext* pCxt, SStableSplitIn
return code;
}
static int32_t stbSplSplitWindowNodeForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
static int32_t stbSplSplitIntervalForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
SLogicNode* pPartWindow = NULL;
int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow);
if (TSDB_CODE_SUCCESS == code) {
@ -379,14 +379,30 @@ static int32_t stbSplSplitWindowNodeForStream(SSplitContext* pCxt, SStableSplitI
return code;
}
static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
static int32_t stbSplSplitInterval(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
if (pCxt->pPlanCxt->streamQuery) {
return stbSplSplitWindowNodeForStream(pCxt, pInfo);
return stbSplSplitIntervalForStream(pCxt, pInfo);
} else {
return stbSplSplitWindowNodeForBatch(pCxt, pInfo);
return stbSplSplitIntervalForBatch(pCxt, pInfo);
}
}
static int32_t stbSplSplitSession(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
switch (((SWindowLogicNode*)pInfo->pSplitNode)->winType) {
case WINDOW_TYPE_INTERVAL:
return stbSplSplitInterval(pCxt, pInfo);
case WINDOW_TYPE_SESSION:
return stbSplSplitSession(pCxt, pInfo);
default:
break;
}
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
static int32_t stbSplCreatePartAggNode(SAggLogicNode* pMergeAgg, SLogicNode** pOutput) {
SNodeList* pFunc = pMergeAgg->pAggFuncs;
pMergeAgg->pAggFuncs = NULL;

View File

@ -634,6 +634,7 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut) {
return vectorConvertFromVarData(pIn, pOut, inType, outType);
}
pOut->numOfRows = pIn->numOfRows;
switch (outType) {
case TSDB_DATA_TYPE_BOOL: {
for (int32_t i = 0; i < pIn->numOfRows; ++i) {

View File

@ -651,3 +651,31 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) {
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages,
ps->loadBytes / (1024.0 * ps->loadPages));
}
void clearDiskbasedBuf(SDiskbasedBuf* pBuf) {
SArray** p = taosHashIterate(pBuf->groupSet, NULL);
while (p) {
size_t n = taosArrayGetSize(*p);
for (int32_t i = 0; i < n; ++i) {
SPageInfo* pi = taosArrayGetP(*p, i);
taosMemoryFreeClear(pi->pData);
taosMemoryFreeClear(pi);
}
taosArrayDestroy(*p);
p = taosHashIterate(pBuf->groupSet, p);
}
tdListEmpty(pBuf->lruList);
tdListEmpty(pBuf->freePgList);
taosArrayClear(pBuf->emptyDummyIdList);
taosArrayClear(pBuf->pFree);
taosHashClear(pBuf->groupSet);
taosHashClear(pBuf->all);
pBuf->numOfPages = 0; // all pages are in buffer in the first place
pBuf->totalBufSize = 0;
pBuf->allocateId = -1;
pBuf->fileSize = 0;
}

View File

@ -72,6 +72,10 @@
./test.sh -f tsim/stream/basic2.sim
# ./test.sh -f tsim/stream/session0.sim
# ./test.sh -f tsim/stream/session1.sim
# ./test.sh -f tsim/stream/state0.sim
# ./test.sh -f tsim/stream/triggerInterval0.sim
# ./test.sh -f tsim/stream/triggerSession0.sim
# ---- transaction
./test.sh -f tsim/trans/lossdata1.sim

View File

@ -66,6 +66,7 @@ if $rows != 0 then
return -1
endi
sql select * from tb1 where null;
print $rows
if $rows != 0 then
return -1
endi

View File

@ -23,89 +23,98 @@ sql insert into t1 values(1648791223001,10,2,3,1.1,2);
sql insert into t1 values(1648791233002,3,2,3,2.1,3);
sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4);
sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6);
$loop_count = 0
loop0:
sleep 300
sql select * from streamt order by s desc;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
# row 0
if $data01 != 3 then
print ======$data01
return -1
print ======data01=$data01
goto loop0
endi
if $data02 != 3 then
print ======$data02
return -1
print ======data02=$data02
goto loop0
endi
if $data03 != 3 then
print ======$data03
return -1
print ======data03=$data03
goto loop0
endi
if $data04 != 2.100000000 then
print ======$data04
print ======data04=$data04
return -1
endi
if $data05 != 0.000000000 then
print ======$data05
print ======data05=$data05
return -1
endi
if $data06 != 3 then
print ======$data05
print ======data06=$data06
return -1
endi
if $data07 != 2.100000000 then
print ======$data05
print ======data07=$data07
return -1
endi
if $data08 != 6 then
print ======$data05
print ======data08=$data08
return -1
endi
# row 1
if $data11 != 3 then
print ======$data11
return -1
print ======data11=$data11
goto loop0
endi
if $data12 != 10 then
print ======$data12
return -1
print ======data12=$data12
goto loop0
endi
if $data13 != 10 then
print ======$data13
return -1
print ======data13=$data13
goto loop0
endi
if $data14 != 1.100000000 then
print ======$data14
print ======data14=$data14
return -1
endi
if $data15 != 0.000000000 then
print ======$data15
print ======data15=$data15
return -1
endi
if $data16 != 10 then
print ======$data15
print ======data16=$data16
return -1
endi
if $data17 != 1.100000000 then
print ======$data17
print ======data17=$data17
return -1
endi
if $data18 != 5 then
print ======$data18
print ======data18=$data18
return -1
endi
@ -115,23 +124,31 @@ sql insert into t1 values(1648791233002,3,2,3,2.1,9);
sql insert into t1 values(1648791243003,4,2,3,3.1,10);
sql insert into t1 values(1648791213002,4,2,3,4.1,11) ;
sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13);
$loop_count = 0
loop1:
sleep 300
sql select * from streamt order by s desc ;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
# row 0
if $data01 != 7 then
print ======$data01
return -1
print =====data01=$data01
goto loop1
endi
if $data02 != 9 then
print ======$data02
return -1
print =====data02=$data02
goto loop1
endi
if $data03 != 4 then
print ======$data03
return -1
print =====data03=$data03
goto loop1
endi
if $data04 != 1.100000000 then

View File

@ -20,21 +20,33 @@ sql create stream streams1 trigger at_once into streamt1 as select _wstartts,
sql insert into t1 values(1648791213000,1,2,3,1.0,1);
sql insert into t1 values(1648791213000,1,2,3,1.0,2);
$loop_count = 0
loop0:
sql select * from streamt1 order by c desc;
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 1 then
print ======$rows
return -1;
print =====rows=$rows
goto loop0
endi
sql insert into t1 values(1648791214000,1,2,3,1.0,3);
$loop_count = 0
loop00:
sql select * from streamt1 order by c desc;
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 1 then
print ======$rows
return -1;
print =====rows=$rows
goto loop00
endi
sql insert into t1 values(1648791213010,2,2,3,1.0,4);
@ -44,27 +56,25 @@ $loop_count = 0
loop1:
sql select * from streamt1 where c >=4 order by `_wstartts`;
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 3 then
print ======$rows
print =====rows=$rows
goto loop1
return -1
endi
# row 0
if $data01 != 1 then
print ======$data01
return -1
print =====data01=$data01
goto loop1
endi
if $data02 != 1 then
print ======$data02
return -1
print =====data02=$data02
goto loop1
endi
if $data03 != 1 then
@ -151,11 +161,10 @@ endi
sql insert into t1 values(1648791213011,1,2,3,1.0,7);
loop2:
$loop_count = 0
loop2:
sql select * from streamt1 where c in (5,4,7) order by `_wstartts`;
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
@ -163,57 +172,51 @@ endi
# row 2
if $data21 != 2 then
print ======$data21
print =====data21=$data21
goto loop2
return -1
endi
if $data22 != 2 then
print ======$data22
print =====data22=$data22
goto loop2
return -1
endi
if $data23 != 2 then
print ======$data23
goto loop2
return -1
endi
if $data24 != 1 then
print ======$data24
goto loop2
return -1
endi
if $data25 != 3 then
print ======$data25
goto loop2
return -1
endi
if $data26 != 7 then
print ======$data26
goto loop2
return -1
endi
sql insert into t1 values(1648791213011,1,2,3,1.0,8);
loop21:
$loop_count = 0
loop21:
sql select * from streamt1 where c in (5,4,8) order by `_wstartts`;
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $data26 != 8 then
print ======$data26
print =====data26=$data26
goto loop21
return -1
endi
@ -222,11 +225,10 @@ sql insert into t1 values(1648791213020,3,2,3,1.0,10);
sql insert into t1 values(1648791214000,1,2,3,1.0,11);
sql insert into t1 values(1648791213011,10,20,10,10.0,12);
loop3:
$loop_count = 0
loop3:
sql select * from streamt1 where c in (5,4,10,11,12) order by `_wstartts`;
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
@ -234,112 +236,100 @@ endi
# row 2
if $data21 != 1 then
print ======$data21
print =====data21=$data21
goto loop3
return -1
endi
if $data22 != 1 then
print ======$data22
print =====data22=$data22
goto loop3
return -1
endi
if $data23 != 10 then
print ======$data23
goto loop3
return -1
endi
if $data24 != 10 then
print ======$data24
goto loop3
return -1
endi
if $data25 != 10 then
print ======$data25
goto loop3
return -1
endi
if $data26 != 12 then
print ======$data26
goto loop3
return -1
endi
# row 3
if $data31 != 1 then
print ======$data31
print =====data31=$data31
goto loop3
return -1
endi
if $data32 != 1 then
print ======$data32
print =====data32=$data32
goto loop3
return -1
endi
if $data33 != 3 then
print ======$data33
goto loop3
return -1
endi
if $data34 != 3 then
print ======$data34
goto loop3
return -1
endi
if $data35 != 3 then
print ======$data35
goto loop3
return -1
endi
if $data36 != 10 then
print ======$data36
goto loop3
return -1
endi
# row 4
if $data41 != 1 then
print ======$data41
print =====data41=$data41
goto loop3
return -1
endi
if $data42 != 1 then
print ======$data42
print =====data42=$data42
goto loop3
return -1
endi
if $data43 != 1 then
print ======$data43
goto loop3
return -1
endi
if $data44 != 1 then
print ======$data44
goto loop3
return -1
endi
if $data45 != 3 then
print ======$data45
goto loop3
return -1
endi
if $data46 != 11 then
print ======$data46
goto loop3
return -1
endi
@ -347,8 +337,8 @@ sql insert into t1 values(1648791213030,3,12,12,12.0,13);
sql insert into t1 values(1648791214040,1,13,13,13.0,14);
sql insert into t1 values(1648791213030,3,14,14,14.0,15) (1648791214020,15,15,15,15.0,16);
loop4:
$loop_count = 0
loop4:
sql select * from streamt1 where c in (14,15,16) order by `_wstartts`;
sleep 300
@ -358,119 +348,104 @@ if $loop_count == 10 then
endi
if $rows != 3 then
print ======$rows
goto loop4
return -1;
print ====loop4=rows=$rows
# goto loop4
endi
# row 0
if $data01 != 2 then
print ======$data01
print =====data01=$data01
goto loop4
return -1
endi
if $data02 != 2 then
print ======$data02
goto loop4
return -1
endi
if $data03 != 6 then
print ======$data03
goto loop4
return -1
endi
if $data04 != 3 then
print ======$data04
goto loop4
return -1
endi
if $data05 != 3 then
print ======$data05
goto loop4
return -1
endi
if $data06 != 15 then
print ======$data06
goto loop4
return -1
endi
# row 1
if $data11 != 1 then
print ======$data11
print =====data11=$data11
goto loop4
return -1
endi
if $data12 != 1 then
print ======$data12
print =====data12=$data12
goto loop4
return -1
endi
if $data13 != 15 then
print ======$data13
goto loop4
return -1
endi
if $data14 != 15 then
print ======$data14
goto loop4
return -1
endi
if $data15 != 15 then
print ======$data15
goto loop4
return -1
endi
if $data16 != 16 then
print ======$data16
goto loop4
return -1
endi
# row 2
if $data21 != 1 then
print ======$data21
print =====data21=$data21
goto loop4
return -1
endi
if $data22 != 1 then
print ======$data22
print =====data22=$data22
goto loop4
return -1
endi
if $data23 != 1 then
print ======$data23
goto loop4
return -1
endi
if $data24 != 1 then
print ======$data24
goto loop4
return -1
endi
if $data25 != 13 then
print ======$data25
goto loop4
return -1
endi
if $data26 != 14 then
print ======$data26
goto loop4
return -1
endi