Merge branch 'td31284_klxu' into td31284

This commit is contained in:
kailixu 2024-11-07 14:30:38 +08:00
commit 880b54ee16
205 changed files with 30571 additions and 9683 deletions

2
.gitignore vendored
View File

@ -159,4 +159,6 @@ pcre2.h
zconf.h
version.h
geos_c.h
source/libs/parser/src/sql.c
include/common/ttokenauto.h

View File

@ -0,0 +1,11 @@
# lemon
ExternalProject_Add(
lemon
SOURCE_DIR ${TD_CONTRIB_DIR}/lemon
CONFIGURE_COMMAND ""
BUILD_COMMAND "${C_COMPILER_LEMON}" -o ${TD_CONTRIB_DIR}/lemon/lemon ${TD_CONTRIB_DIR}/lemon/lemon.c
INSTALL_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_ALWAYS 1
)

View File

@ -184,6 +184,17 @@ if(${BUILD_PCRE2})
cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
find_program(C_COMPILER_LEMON NAMES gcc)
if(C_COMPILER_LEMON)
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
else()
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
endif()
# lemon
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .

6038
contrib/lemon/lemon.c Normal file

File diff suppressed because it is too large Load Diff

1086
contrib/lemon/lempar.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -23,17 +23,18 @@ docker pull tdengine/tdengine:3.3.3.0
然后只需执行下面的命令:
```shell
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine
docker run -d -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine
```
注意TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
注意TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043 为 taosKeeper 使用端口。6044-6049 TCP 端口为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
6044 和 6045 UDP 端口为 statsd 和 collectd 格式写入接口可根据需要选择是否打开。6060 为 taosExplorer 使用端口。具体端口使用情况请参考[网络端口要求](../../operation/planning#网络端口要求)。
如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令:
```shell
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \
-p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine
-p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine
```
:::note

View File

@ -106,7 +106,7 @@ CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16;
```
该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下:
- `PRECISION 'ms'` 这个数据库的时序数据使用毫秒ms精度的时间戳
- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除
- `KEEP 3650`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除
- `DURATION 10` :每 10 天的数据放在一个数据文件中
- `BUFFER 16` :写入使用大小为 16MB 的内存池。

View File

@ -116,10 +116,11 @@ create stream if not exists count_history_s fill_history 1 into count_history as
### 流计算的触发模式
在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE。
在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE。
1. AT_ONCE写入立即触发。
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)。
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV 、NULL、 NONE、VALUE。
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。

View File

@ -7,12 +7,12 @@ sidebar_label: "ARIMA"
## 功能概述
ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
ARIMA模型是一种自回归模型只需要自变量即可预测后续的值。ARIMA模型要求时序数据是**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
ARIMA 即自回归移动平均模型Autoregressive Integrated Moving Average, ARIMA,也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
ARIMA 模型是一种自回归模型只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
>平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
以下参数可以动态输入控制预测过程中生成 合适的 ARIMA 模型。
以下参数可以动态输入控制预测过程中生成合适的 ARIMA 模型。
- p= 自回归模型阶数
- d= 差分阶数
@ -21,9 +21,9 @@ ARIMA模型是一种自回归模型只需要自变量即可预测后续的值
### 参数
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数名称|说明|必填项|
|---|---|---|
|period|输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0 将使用非季节性/周期性的 ARIMA 模型预测|选填|
|参数|说明|必填项|
|---|---|-----|
|period|输入时间序列每个周期包含的数据点个数,如果不设置该参数或该参数设置为 0将使用非季节性/周期性的 ARIMA 模型预测|选填|
|start_p|自回归模型阶数的起始值0 开始的整数,不推荐大于 10|选填|
|max_p|自回归模型阶数的结束值0 开始的整数,不推荐大于 10|选填|
|start_q|移动平均模型阶数的起始值0 开始的整数,不推荐大于 10|选填|
@ -40,11 +40,11 @@ FORECAST(i32, "algo=arima,alpha=95,period=10, start_p=1, max_p=5, start_q=1, max
```json5
{
"rows": fc_rows, // 预测结果的行数
"rows": fc_rows, // 返回结果的行数
"period": period, // 返回结果的周期性,同输入
"alpha": alpha, // 返回结果的置信区间,同输入
"algo": "arima", // 返回结果使用的算法
"mse":mse, // 拟合输入时序数据时候生成模型的最小均方误差(MSE)
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
"res": res // 列模式的结果
}
```

View File

@ -6,17 +6,17 @@ sidebar_label: "HoltWinters"
本节讲述 HoltWinters 算法模型的使用方法。
## 功能概述
HoltWinters模型又称为多次指数平滑模型EMA含有线性趋势和周期波动的非平稳序列适用,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。
HoltWinters 模型又称为多次指数平滑模型EMA适用于含有线性趋势和周期波动的非平稳序列,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。
HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。
该模型对于返回数据也不提供计算的置信区间范围结果。在 95% 置信区间的上下界结果与预测结果相同。
该模型对于返回数据不提供计算的置信区间范围结果,在 95% 置信区间的上下界结果与预测结果相同。
### 参数
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数名称|说明|必填项|
分析平台中使用自动化的 HoltWinters 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数|说明|必填项|
|---|---|---|
|period| 输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0 将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填|
|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0将使用一次简单指数平滑方式进行数据拟合并据此进行未来数据的预测|选填|
|trend|趋势模型使用加法模型还是乘法模型|选填|
|seasonal|季节性采用加法模型还是乘法模型|选填|
@ -30,7 +30,7 @@ FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul")
```json5
{
"rows": rows, // 结果的行数
"rows": rows, // 返回结果的行数
"period": period, // 返回结果的周期性,该结果与输入的周期性相同,如果没有周期性,该值为 0
"algo": 'holtwinters' // 返回结果使用的计算模型
"mse": mse, // 最小均方误差minmum square error

View File

@ -13,7 +13,7 @@ sidebar_label: "Anomaly-detection"
- k-sigma<sup>[1]</sup>: 即 ***689599.7 rule*** 。***k***值默认为 3即序列均值的 3 倍标准差范围为边界超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点.
|参数名称|说明|是否必选|默认值|
|参数|说明|是否必选|默认值|
|---|---|---|---|
|k|标准差倍数|选填|3|
@ -24,13 +24,13 @@ sidebar_label: "Anomaly-detection"
- SHESD<sup>[4]</sup> 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5%
|参数名称|说明|是否必选|默认值|
|参数|说明|是否必选|默认值|
|---|---|---|---|
|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
### 基于数据密度的检测方法
LOF<sup>[5]</sup>: 局部离群因子(LOF又叫局部异常因子)算法是Breunig于2000年提出的一种基于密度的局部离群点检测算法该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况首先计算每个数据点的一个局部可达密度然后通过局部可达密度进一步计算得到每个数据点的一个离群因子该离群因子即标识了一个数据点的离群程度因子值越大表示离群程度越高因子值越小表示离群程度越低。最后输出离群程度最大的top(n)个点。
LOF<sup>[5]</sup>: 局部离群因子LOF又叫局部异常因子算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。
### 基于深度学习的检测方法

View File

@ -3,7 +3,7 @@ title: "addins"
sidebar_label: "addins"
---
本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台, 并能够通过 SQL 语句进行调用。
本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。
## 目录结构
@ -27,33 +27,35 @@ sidebar_label: "addins"
### 类继承约定
异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`.
预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
### 类属性初始化
每个算法实现的类需要静态初始化两个类属性,分别是
每个算法实现的类需要静态初始化两个类属性,分别是
`name`: 的触发调用关键词,全小写英文字母。
`desc`算法的描述信息
- `name`:触发调用的关键词,全小写英文字母
- `desc`:算法的描述信息
### 核心方法输入与输出约定
`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。
异常检测输出结果
`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100] 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
预测输出结果
对于预测算法,`AbstractForecastService` 的对象属性说明如下:
|属性名称|说明|默认值|
|---|---|---|
|period|输入时序数据的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
|start_ts|预测数据的开始时间| 0|
|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
|start_ts|预测结果的开始时间| 0|
|time_step|预测结果的两个数据点之间时间间隔|0 |
|fc_rows|预测结果数量| 0 |
|return_conf|返回结果中是否包含执行区间范围,如果算法计算结果不包含置信区间,那么上界和下界与自身相同| 1|
|conf|执行区间分位数 0.05|
|fc_rows|预测结果数量| 0 |
|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
|conf|置信区间分位数 0.05|
预测返回结果如下:
@ -111,6 +113,7 @@ class _IqrService(AbstractAnomalyDetectionService):
## 单元测试
在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
```python
def test_iqr(self):
""" 测试 _IqrService 类 """
@ -137,7 +140,7 @@ def test_iqr(self):
## 需要模型的算法
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立了, autoencoder的目录使用该算法针对不同数据集训练得到的模型均需要放置在该目录下。
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
训练完成后的模型,使用 joblib 进行保存。

View File

@ -5,7 +5,7 @@ title: 数据分析功能
## 概述
TDengine 通过 ANode(AnalysisNode) 是提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,从而拓展 TDengine 的功能,支持时间序列高级分析功能
ANodeAnalysis Node是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。
ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
下图是数据分析的技术架构示意图。
@ -13,10 +13,10 @@ ANode 是无状态的数据分析节点,集群中可以存在多个 ANode节
## 安装部署
### 环境准备
ANode 要求节点上准备有 Python 3.10 及以上版本以及相应的Python包自动安装组件 Pip ,同时请确保能够正常连接互联网。
ANode 要求节点上准备有 Python 3.10 及以上版本以及相应的 Python 包自动安装组件 Pip同时请确保能够正常连接互联网。
### 安装及卸载
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengien 的安装流程一致。
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。
```bash
tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
@ -50,7 +50,7 @@ systemctl status taosanoded
### 配置说明
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 的配置和 uWSGI 的配置在同一个配置文件中,具体如下:
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下:
```ini
[uwsgi]
@ -68,9 +68,9 @@ http = 127.0.0.1:6050
chdir = /usr/local/taos/taosanode/lib
# initialize python file
wsgi-file = /usr/local/taos/taosanode/lib/app.py
wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
# invoke app model
# call module of uWSGI
callable = app
# auto remove unix Socket and pid file when stopping
@ -92,25 +92,27 @@ master = true
processes = 2
# pid file
pidfile = /usr/local/taos/taosanode/uwsgi.pid
pidfile = /usr/local/taos/taosanode/taosanode.pid
# enable threads
enable-threads = true
# the number of threads for each process
threads=2
threads = 4
# memory useage report
memory-report = true
# smooth restart
reload-mercy = 10
# conflict with systemctl, so do NOT uncomment this
# daemonize = /var/log/taos/taosanode/taosanode.log
# set log
# log directory
logto = /var/log/taos/taosanode/taosanode.log
# monitor server
# wWSGI monitor port
stats = 127.0.0.1:8387
# python virtual environment directory
@ -125,6 +127,9 @@ model-dir=/usr/local/taos/taosanode/model/
# default log level
log-level = DEBUG
# draw the query results
draw-result = 0
```
**提示**
@ -137,7 +142,7 @@ log-level = DEBUG
```sql
CREATE ANODE {node_url}
```
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
#### 查看 ANode
列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`
@ -151,7 +156,7 @@ SHOW ANODES;
SHOW ANODES FULL;
```
#### 强制刷新 TDengine 集群中分析算法缓存
#### 强制刷新集群中分析算法缓存
```SQL
UPDATE ANODE {node_id}
UPDATE ALL ANODES
@ -167,24 +172,23 @@ DROP ANODE {anode_id}
#### 白噪声检查
平台提供 Restful的服务检测输入时间序列是否是白噪声时间序列White Noise Data, WND白噪声时间序列及随机数序列。
此外,分析平台要求输入的数据不能是 , 因此针对的所有数据均默认进行 白噪声检查。当前白噪声检查采用通行的 `Ljung-Box`检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列White Noise Data, WND和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
#### 数据重采样和时间戳对齐
数据分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
- 数据时间戳对齐。由于真实数据时间可能并非严格按照查询指定的时间戳输入。此时数据平台将自动将数据的时间间隔按照指定的时间间隔进行对齐。例如有输入时间序列:[11, 22, 29, 41],用户指定时间间隔为 10那么该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
- 数据时间重采样。用户输入的时间序列其采样频率超过了指定的查询需要获得结果的时间间隔,例如输入原始数据是 5 但是输出结果的频率是 10. [0 5 10 15 20 25 30],那么该输入数据列将重采用为间隔 为 10 的输入序列,其结果如下 [0, 10, 2030]。[5, 15, 25] 处的数据将被丢弃。
- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5输出结果的频率是 10输入时间序列 [0 5 10 15 20 25 30] 将被重采用为间隔 为 10 的序列 [0, 10, 2030][5, 15, 25] 处的数据将被丢弃。
需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
#### 时序数据异常检测
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
@ -201,10 +205,10 @@ algo=expr1
"}
```
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列输入,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
2. `options`:字符串。其中使用 K/V 调用异常检测的算法,及与算法相关的参数。采用 逗号分隔的K/V字符串表示其中的字符串不需要使用单引号、双引号、或转意号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma, k=2` 表示进行异常检测的算法是 ksigma该算法接受的输入参数是 2。
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma该算法接受的输入参数是 2。
3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
4. 输入数据默认进行白噪声检查,如果检查结果是输入数据是白噪声,将不会有任何(异常)窗口信息返回。
4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
**参数说明**
|参数|含义|默认值|
@ -212,7 +216,7 @@ algo=expr1
|algo|异常检测调用的算法|iqr|
|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1默认值为 1表示进行白噪声检查|
异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
1. `_WSTART` 异常窗口开始时间戳
2. `_WEND`:异常窗口结束时间戳
3. `_WDURATION`:异常窗口持续时间
@ -233,7 +237,7 @@ ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
```
taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
_wstart | _wend | count(*) |
============================================================================
====================================================================
2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
Query OK, 1 row(s) in set (0.028946s)
```
@ -281,7 +285,7 @@ algo=expr1
|rows|预测结果的记录数|10|
1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
2. 更改参数 `START`:返回预测结果的起始时间,改变这个起始时间不会影响返回的预测数值,只影响起始时间。
2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。

View File

@ -146,11 +146,17 @@ TDengine 的多级存储功能在使用上还具备以下优点。
下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。
| 接口或组件 | 端口 |
| :---------------: | :--------: |
| 原生接口taosc | 6030 |
| RESTful 接口 | 6041 |
| WebSocket 接口 | 6041 |
| taosKeeper | 6043 |
| taosX | 6050, 6055 |
| taosExplorer | 6060 |
| 接口或组件名称 | 端口 | 协议 |
|:-----------------------------------------:|:----------:|:--------:|
| 原生接口taosc | 6030 | TCP |
| RESTful 接口 | 6041 | TCP |
| WebSocket 接口 | 6041 | TCP |
| taosKeeper | 6043 | TCP |
| statsd 格式写入接口 | 6044 | TCP/UDP |
| collectd 格式写入接口 | 6045 | TCP/UDP |
| openTSDB Telnet 格式写入接口 | 6046 | TCP |
| collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP |
| icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP |
| tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP |
| taosX | 6050, 6055 | TCP |
| taosExplorer | 6060 | TCP |

View File

@ -163,3 +163,15 @@ s3BucketName td-test
- 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
- 最大支持的 S3 服务配置数为 10
### 不依赖 Flexify 服务
用户界面同 S3不同的地方在于下面三个参数的配置
| # | 参数 | 示例值 | 描述 |
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
| 3 | s3BucketName | test-container | Container name |
其中 fd2d01c73 是账户 ID微软 Blob 存储服务只支持 Https 协议,不支持 Http。

View File

@ -22,34 +22,45 @@ taosKeeper 有两种安装方式:
taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:命令行参数、环境变量 和 配置文件。优先级为:命令行参数、环境变量、配置文件参数。 一般我们推荐使用配置文件。
### 命令行参数和环境变量
命令行参数 和 环境变量说明可以参考命令 `taoskeeper --help` 的输出。下面是一个例子:
```shell
Usage of taosKeeper v3.3.2.0:
--debug enable debug mode. Env "TAOS_KEEPER_DEBUG"
-P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000)
Usage of taoskeeper v3.3.3.0:
-R, --RotationInterval string interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL" (default "15s")
-c, --config string config path default /etc/taos/taoskeeper.toml
--drop string run taoskeeper in command mode, only support old_taosd_metric_stables.
--environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"
--fromTime string parameter of transfer, example: 2020-01-01T00:00:00+08:00 (default "2020-01-01T00:00:00+08:00")
--gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000)
-h, --help Print this help message and exit
--instanceId int instance ID. Env "TAOS_KEEPER_INSTANCE_ID" (default 64)
--log.compress whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"
--log.keepDays uint log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS" (default 30)
--log.level string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos")
--log.reservedDiskSize string reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE" (default "1GB")
--log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log")
--metrics.database.options.buffer int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER" (default 64)
--metrics.database.options.cachemodel string database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL" (default "both")
--metrics.database.options.keep int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP" (default 90)
--metrics.database.options.vgroups int database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS" (default 1)
--metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"
--metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"
-P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043)
--tdengine.host string TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST" (default "127.0.0.1")
--tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata")
--tdengine.port int TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT" (default 6041)
--tdengine.username string TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME" (default "root")
--tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata")
--tdengine.usessl TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"
--metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"
--metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log")
--metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"
--environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"
--log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5)
--log.rotationTime duration log rotation time. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "100000000")
-c, --config string config path default /etc/taos/taoskeeper.toml
--transfer string run taoskeeper in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit
-V, --version Print the version and exit
-h, --help Print this help message and exit
```
### 配置文件
taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置文件。
@ -57,20 +68,18 @@ taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置
若既不指定 taosKeeper 配置文件,且 `/etc/taos/taoskeeper.toml` 也不存在,将使用默认配置。
**下面是配置文件的示例:**
```toml
# Start with debug middleware for gin
debug = false
# Listen port, default is 6043
```toml
# The ID of the currently running taoskeeper instance, default is 64.
instanceId = 64
# Listening port, default is 6043.
port = 6043
# log level
loglevel = "info"
# go pool size
# Go pool size
gopoolsize = 50000
# interval for metrics
# Interval for metrics
RotationInterval = "15s"
[tdengine]
@ -81,20 +90,21 @@ password = "taosdata"
usessl = false
[metrics]
# metrics prefix in metrics names.
# Metrics prefix in metrics names.
prefix = "taos"
# export some tables that are not super table
# Export some tables that are not super table.
tables = []
# database for storing metrics data
# Database for storing metrics data.
[metrics.database]
name = "log"
# database options for db storing metrics data
# Database options for db storing metrics data.
[metrics.database.options]
vgroups = 1
buffer = 64
KEEP = 90
keep = 90
cachemodel = "both"
[environment]
@ -102,9 +112,19 @@ cachemodel = "both"
incgroup = false
[log]
rotationCount = 5
rotationTime = "24h"
rotationSize = 100000000
# The directory where log files are stored.
# path = "/var/log/taos"
level = "info"
# Number of log file rotations before deletion.
rotationCount = 30
# The number of days to retain log files.
keepDays = 30
# The maximum size of a log file before rotation.
rotationSize = "1GB"
# If set to true, log files will be compressed.
compress = false
# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit.
reservedDiskSize = "1GB"
```
## 启动
@ -118,7 +138,6 @@ monitorFqdn localhost # taoskeeper 服务的 FQDN
TDengine 监控配置相关,具体请参考:[TDengine 监控配置](../../../operation/monitor)。
<Tabs>
<TabItem label="Linux" value="linux">
@ -188,7 +207,6 @@ Active: inactive (dead)
</TabItem>
</Tabs>
## 健康检查
可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码:
@ -208,7 +226,6 @@ Content-Length: 21
{"version":"3.3.2.3"}
```
## 数据收集与监控
taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中(默认的监控数据是 `log`),这些监控数据可以用来配置 TDengine 监控。
@ -216,6 +233,7 @@ taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产
### 查看监控数据
可以查看 `log` 库下的超级表,每个超级表都对应一组监控指标,具体指标不再赘述。
```shell
taos> use log;
Database changed.
@ -251,17 +269,14 @@ taos> select last_row(*) from taosd_dnodes_info;
Query OK, 1 row(s) in set (0.003168s)
```
### 使用 TDInsight 配置监控
收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/)
收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/)。
## 集成 Prometheus
taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。
### 导出监控指标
下面通过 `curl` 命令展示 `/metrics` 接口返回的数据格式:
@ -298,9 +313,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### taosd 集群
##### 监控信息支持的标签
- `cluster_id` 集群 id
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ----------------------------------- | ------- | ------------------------------------- |
| taos_cluster_info_connections_total | counter | 总连接数 |
@ -328,11 +345,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### dnode
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ------------------------------ | ------- | ---------------------------------------------------------------------------------------- |
| taos_d_info_status | gauge | dnode 状态,标签 value 表示状态, ready 表示正常, offline 表示下线, unknown 表示未知。 |
@ -361,6 +380,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 数据目录
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
@ -368,6 +388,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- `data_dir_level`:数据目录级别
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| --------------------------------- | ----- | -------------------- |
| taos_taosd_dnodes_data_dirs_avail | gauge | 可用空间(单位 Byte) |
@ -377,12 +398,14 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 日志目录
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
- `log_dir_name`:日志目录名
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| -------------------------------- | ----- | -------------------- |
| taos_taosd_dnodes_log_dirs_avail | gauge | 可用空间(单位 Byte) |
@ -392,11 +415,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 日志数量
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ---------------------- | ------- | ------------ |
| taos_log_summary_debug | counter | 调试日志数量 |
@ -404,14 +429,15 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
| taos_log_summary_info | counter | 信息日志数量 |
| taos_log_summary_trace | counter | 跟踪日志数量 |
#### taosadapter
##### 监控信息支持的标签
- `endpoint`:端点
- `req_type`请求类型0 表示 rest1 表示 websocket
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| -------------------------------------- | ------- | -------------------- |
| taos_adapter_requests_fail | counter | 失败的请求数 |
@ -433,9 +459,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### taoskeeper
##### 监控信息支持的标签
- `identify` 节点 endpoint
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ----------------------- | ----- | ------------------------------------- |
| taos_keeper_monitor_cpu | gauge | taoskeeper CPU 使用率(取值范围 0~1 |
@ -444,6 +472,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 其他 taosd 集群监控项
##### taos_m_info_role
- **标签**:
- `cluster_id`: 集群 id
- `mnode_ep`: mnode 端点
@ -453,6 +482,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: mnode 角色
##### taos_taos_sql_req_count
- **标签**:
- `cluster_id`: 集群 id
- `result`: 请求结果(取值范围: Success, Failed
@ -462,6 +492,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: SQL 请求数量
##### taos_taosd_sql_req_count
- **标签**:
- `cluster_id`: 集群 id
- `dnode_ep`: dnode 端点
@ -474,6 +505,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: SQL 请求数量
##### taos_taosd_vgroups_info_status
- **标签**:
- `cluster_id`: 集群 id
- `database_name`: 数据库名称
@ -482,6 +514,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: 虚拟组状态。 0 为 unsynced表示没有leader选出1 为 ready。
##### taos_taosd_vgroups_info_tables_num
- **标签**:
- `cluster_id`: 集群 id
- `database_name`: 数据库名称
@ -490,6 +523,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: 虚拟组表数量
##### taos_taosd_vnodes_info_role
- **标签**:
- `cluster_id`: 集群 id
- `database_name`: 数据库名称
@ -499,7 +533,6 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **类型**: gauge
- **含义**: 虚拟节点角色
### 抽取配置
Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。
@ -521,8 +554,6 @@ scrape_configs:
在 Grafana Dashboard 菜单点击 `import`dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。
## taosKeeper 监控指标
taosKeeper 也会将自己采集的监控数据写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。

View File

@ -1569,7 +1569,7 @@ COUNT({* | expr})
ELAPSED(ts_primary_key [, time_unit])
```
**功能说明**elapsed函数表达了统计周期内连续的时间长度和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下统计在给定时间范围内的每个窗口内有数据覆盖的时间范围如果没有INTERVAL子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED返回的并不是时间范围的绝对值而是绝对值除以time_unit所得到的单位个数。
**功能说明**elapsed 函数表达了统计周期内连续的时间长度,和 twa 函数配合使用可以计算统计曲线下的面积。在通过 INTERVAL 子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有 INTERVAL 子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED 返回的并不是时间范围的绝对值,而是绝对值除以 time_unit 所得到的单位个数。流计算仅在 FORCE_WINDOW_CLOSE 模式下支持该函数。
**返回结果类型**DOUBLE。
@ -1829,7 +1829,7 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。
- INTERP SQL 查询需要同时与 RANGEEVERY 和 FILL 关键字一起使用;流计算不能使用 RANGE需要 EVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2) 字段来指定,需满足 timestamp1 \<= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
@ -2180,7 +2180,7 @@ STATEDURATION(expr, oper, val, unit)
TWA(expr)
```
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。流计算仅在 FORCE_WINDOW_CLOSE 模式下支持该函数。
**返回数据类型**DOUBLE。

View File

@ -143,13 +143,14 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE
1. AT_ONCE写入立即触发
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY 必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV 、NULL、NONE、VALUE。
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。

View File

@ -154,6 +154,7 @@ typedef enum EStreamType {
STREAM_TRANS_STATE,
STREAM_MID_RETRIEVE,
STREAM_PARTITION_DELETE_DATA,
STREAM_GET_RESULT,
} EStreamType;
#pragma pack(push, 1)
@ -383,6 +384,10 @@ typedef struct STUidTagInfo {
#define TABLE_NAME_COLUMN_INDEX 6
#define PRIMARY_KEY_COLUMN_INDEX 7
//steam get result block column
#define DATA_TS_COLUMN_INDEX 0
#define DATA_VERSION_COLUMN_INDEX 1
// stream create table block column
#define UD_TABLE_NAME_COLUMN_INDEX 0
#define UD_GROUPID_COLUMN_INDEX 1

View File

@ -189,7 +189,12 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint
int32_t getJsonValueLen(const char* data);
// For the VAR_DATA_TYPE type, new data is inserted strictly according to the position of SVarColAttr.length.
// If the same row is inserted repeatedly, data holes will result.
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
// For the VAR_DATA_TYPE type, if a row already has data before inserting it (judged by offset != -1),
// it will be inserted at the original position and the old data will be overwritten.
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows,
bool trimValue);
@ -233,7 +238,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo);
* @brief find how many rows already in order start from first row
*/
int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo);
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk);
int32_t blockDataCheck(const SSDataBlock* pDataBlock);
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
@ -266,7 +271,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId)
int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData);
int32_t blockGetEncodeSize(const SSDataBlock* pBlock);
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataLen, int32_t numOfCols);
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos);
// for debug

View File

@ -142,6 +142,7 @@ extern bool tsMonitorForceV2;
// audit
extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable;
extern bool tsEnableAuditDelete;
extern int32_t tsAuditInterval;
// telem
@ -153,6 +154,12 @@ extern bool tsEnableCrashReport;
extern char *tsTelemUri;
extern char *tsClientCrashReportUri;
extern char *tsSvrCrashReportUri;
extern int8_t tsSafetyCheckLevel;
enum {
TSDB_SAFETY_CHECK_LEVELL_NEVER = 0,
TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1,
TSDB_SAFETY_CHECK_LEVELL_BYROW = 2,
};
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
@ -194,10 +201,10 @@ extern int32_t tsMinIntervalTime;
extern int32_t tsMaxInsertBatchRows;
// build info
extern char version[];
extern char compatible_version[];
extern char gitinfo[];
extern char buildinfo[];
extern char td_version[];
extern char td_compatible_version[];
extern char td_gitinfo[];
extern char td_buildinfo[];
// lossy
extern char tsLossyColumns[];

View File

@ -467,9 +467,11 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY,
QUERY_NODE_PHYSICAL_PLAN_STREAM_ANOMALY,
QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC,
} ENodeType;
typedef struct {
@ -1022,6 +1024,7 @@ typedef struct {
char sDetailVer[128];
int64_t whiteListVer;
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SConnectRsp;
int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp);
@ -1215,6 +1218,7 @@ typedef struct {
int32_t bytes;
int8_t type;
uint8_t pk;
bool noData;
} SColumnInfo;
typedef struct STimeWindow {
@ -1826,6 +1830,17 @@ int32_t tSerializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
void tFreeSStatisReq(SStatisReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
char table[TSDB_TABLE_NAME_LEN];
char operation[AUDIT_OPERATION_LEN];
int32_t sqlLen;
char* pSql;
} SAuditReq;
int32_t tSerializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
int32_t tDeserializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
void tFreeSAuditReq(SAuditReq* pReq);
typedef struct {
int32_t dnodeId;
int64_t clusterId;
@ -2817,6 +2832,8 @@ typedef struct {
#define STREAM_TRIGGER_AT_ONCE 1
#define STREAM_TRIGGER_WINDOW_CLOSE 2
#define STREAM_TRIGGER_MAX_DELAY 3
#define STREAM_TRIGGER_FORCE_WINDOW_CLOSE 4
#define STREAM_DEFAULT_IGNORE_EXPIRED 1
#define STREAM_FILL_HISTORY_ON 1
#define STREAM_FILL_HISTORY_OFF 0
@ -3414,6 +3431,7 @@ typedef struct {
int32_t svrTimestamp;
SArray* rsps; // SArray<SClientHbRsp>
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SClientHbBatchRsp;
static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); }

View File

@ -259,6 +259,7 @@
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8

View File

@ -16,394 +16,7 @@
#ifndef _TD_COMMON_TOKEN_H_
#define _TD_COMMON_TOKEN_H_
#define TK_OR 1
#define TK_AND 2
#define TK_UNION 3
#define TK_ALL 4
#define TK_MINUS 5
#define TK_EXCEPT 6
#define TK_INTERSECT 7
#define TK_NK_BITAND 8
#define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12
#define TK_NK_MINUS 13
#define TK_NK_STAR 14
#define TK_NK_SLASH 15
#define TK_NK_REM 16
#define TK_NK_CONCAT 17
#define TK_CREATE 18
#define TK_ACCOUNT 19
#define TK_NK_ID 20
#define TK_PASS 21
#define TK_NK_STRING 22
#define TK_ALTER 23
#define TK_PPS 24
#define TK_TSERIES 25
#define TK_STORAGE 26
#define TK_STREAMS 27
#define TK_QTIME 28
#define TK_DBS 29
#define TK_USERS 30
#define TK_CONNS 31
#define TK_STATE 32
#define TK_NK_COMMA 33
#define TK_HOST 34
#define TK_IS_IMPORT 35
#define TK_NK_INTEGER 36
#define TK_CREATEDB 37
#define TK_USER 38
#define TK_ENABLE 39
#define TK_SYSINFO 40
#define TK_ADD 41
#define TK_DROP 42
#define TK_GRANT 43
#define TK_ON 44
#define TK_TO 45
#define TK_REVOKE 46
#define TK_FROM 47
#define TK_SUBSCRIBE 48
#define TK_READ 49
#define TK_WRITE 50
#define TK_NK_DOT 51
#define TK_WITH 52
#define TK_ENCRYPT_KEY 53
#define TK_ANODE 54
#define TK_UPDATE 55
#define TK_ANODES 56
#define TK_DNODE 57
#define TK_PORT 58
#define TK_DNODES 59
#define TK_RESTORE 60
#define TK_NK_IPTOKEN 61
#define TK_FORCE 62
#define TK_UNSAFE 63
#define TK_CLUSTER 64
#define TK_LOCAL 65
#define TK_QNODE 66
#define TK_BNODE 67
#define TK_SNODE 68
#define TK_MNODE 69
#define TK_VNODE 70
#define TK_DATABASE 71
#define TK_USE 72
#define TK_FLUSH 73
#define TK_TRIM 74
#define TK_S3MIGRATE 75
#define TK_COMPACT 76
#define TK_IF 77
#define TK_NOT 78
#define TK_EXISTS 79
#define TK_BUFFER 80
#define TK_CACHEMODEL 81
#define TK_CACHESIZE 82
#define TK_COMP 83
#define TK_DURATION 84
#define TK_NK_VARIABLE 85
#define TK_MAXROWS 86
#define TK_MINROWS 87
#define TK_KEEP 88
#define TK_PAGES 89
#define TK_PAGESIZE 90
#define TK_TSDB_PAGESIZE 91
#define TK_PRECISION 92
#define TK_REPLICA 93
#define TK_VGROUPS 94
#define TK_SINGLE_STABLE 95
#define TK_RETENTIONS 96
#define TK_SCHEMALESS 97
#define TK_WAL_LEVEL 98
#define TK_WAL_FSYNC_PERIOD 99
#define TK_WAL_RETENTION_PERIOD 100
#define TK_WAL_RETENTION_SIZE 101
#define TK_WAL_ROLL_PERIOD 102
#define TK_WAL_SEGMENT_SIZE 103
#define TK_STT_TRIGGER 104
#define TK_TABLE_PREFIX 105
#define TK_TABLE_SUFFIX 106
#define TK_S3_CHUNKPAGES 107
#define TK_S3_KEEPLOCAL 108
#define TK_S3_COMPACT 109
#define TK_KEEP_TIME_OFFSET 110
#define TK_ENCRYPT_ALGORITHM 111
#define TK_NK_COLON 112
#define TK_BWLIMIT 113
#define TK_START 114
#define TK_TIMESTAMP 115
#define TK_END 116
#define TK_TABLE 117
#define TK_NK_LP 118
#define TK_NK_RP 119
#define TK_USING 120
#define TK_FILE 121
#define TK_STABLE 122
#define TK_COLUMN 123
#define TK_MODIFY 124
#define TK_RENAME 125
#define TK_TAG 126
#define TK_SET 127
#define TK_NK_EQ 128
#define TK_TAGS 129
#define TK_BOOL 130
#define TK_TINYINT 131
#define TK_SMALLINT 132
#define TK_INT 133
#define TK_INTEGER 134
#define TK_BIGINT 135
#define TK_FLOAT 136
#define TK_DOUBLE 137
#define TK_BINARY 138
#define TK_NCHAR 139
#define TK_UNSIGNED 140
#define TK_JSON 141
#define TK_VARCHAR 142
#define TK_MEDIUMBLOB 143
#define TK_BLOB 144
#define TK_VARBINARY 145
#define TK_GEOMETRY 146
#define TK_DECIMAL 147
#define TK_COMMENT 148
#define TK_MAX_DELAY 149
#define TK_WATERMARK 150
#define TK_ROLLUP 151
#define TK_TTL 152
#define TK_SMA 153
#define TK_DELETE_MARK 154
#define TK_FIRST 155
#define TK_LAST 156
#define TK_SHOW 157
#define TK_FULL 158
#define TK_PRIVILEGES 159
#define TK_DATABASES 160
#define TK_TABLES 161
#define TK_STABLES 162
#define TK_MNODES 163
#define TK_QNODES 164
#define TK_ARBGROUPS 165
#define TK_FUNCTIONS 166
#define TK_INDEXES 167
#define TK_ACCOUNTS 168
#define TK_APPS 169
#define TK_CONNECTIONS 170
#define TK_LICENCES 171
#define TK_GRANTS 172
#define TK_LOGS 173
#define TK_MACHINES 174
#define TK_ENCRYPTIONS 175
#define TK_QUERIES 176
#define TK_SCORES 177
#define TK_TOPICS 178
#define TK_VARIABLES 179
#define TK_BNODES 180
#define TK_SNODES 181
#define TK_TRANSACTIONS 182
#define TK_DISTRIBUTED 183
#define TK_CONSUMERS 184
#define TK_SUBSCRIPTIONS 185
#define TK_VNODES 186
#define TK_ALIVE 187
#define TK_VIEWS 188
#define TK_VIEW 189
#define TK_COMPACTS 190
#define TK_NORMAL 191
#define TK_CHILD 192
#define TK_LIKE 193
#define TK_TBNAME 194
#define TK_QTAGS 195
#define TK_AS 196
#define TK_SYSTEM 197
#define TK_TSMA 198
#define TK_INTERVAL 199
#define TK_RECURSIVE 200
#define TK_TSMAS 201
#define TK_FUNCTION 202
#define TK_INDEX 203
#define TK_COUNT 204
#define TK_LAST_ROW 205
#define TK_META 206
#define TK_ONLY 207
#define TK_TOPIC 208
#define TK_CONSUMER 209
#define TK_GROUP 210
#define TK_DESC 211
#define TK_DESCRIBE 212
#define TK_RESET 213
#define TK_QUERY 214
#define TK_CACHE 215
#define TK_EXPLAIN 216
#define TK_ANALYZE 217
#define TK_VERBOSE 218
#define TK_NK_BOOL 219
#define TK_RATIO 220
#define TK_NK_FLOAT 221
#define TK_OUTPUTTYPE 222
#define TK_AGGREGATE 223
#define TK_BUFSIZE 224
#define TK_LANGUAGE 225
#define TK_REPLACE 226
#define TK_STREAM 227
#define TK_INTO 228
#define TK_PAUSE 229
#define TK_RESUME 230
#define TK_PRIMARY 231
#define TK_KEY 232
#define TK_TRIGGER 233
#define TK_AT_ONCE 234
#define TK_WINDOW_CLOSE 235
#define TK_IGNORE 236
#define TK_EXPIRED 237
#define TK_FILL_HISTORY 238
#define TK_SUBTABLE 239
#define TK_UNTREATED 240
#define TK_KILL 241
#define TK_CONNECTION 242
#define TK_TRANSACTION 243
#define TK_BALANCE 244
#define TK_VGROUP 245
#define TK_LEADER 246
#define TK_MERGE 247
#define TK_REDISTRIBUTE 248
#define TK_SPLIT 249
#define TK_DELETE 250
#define TK_INSERT 251
#define TK_NK_BIN 252
#define TK_NK_HEX 253
#define TK_NULL 254
#define TK_NK_QUESTION 255
#define TK_NK_ALIAS 256
#define TK_NK_ARROW 257
#define TK_ROWTS 258
#define TK_QSTART 259
#define TK_QEND 260
#define TK_QDURATION 261
#define TK_WSTART 262
#define TK_WEND 263
#define TK_WDURATION 264
#define TK_IROWTS 265
#define TK_ISFILLED 266
#define TK_FLOW 267
#define TK_FHIGH 268
#define TK_FROWTS 269
#define TK_CAST 270
#define TK_POSITION 271
#define TK_IN 272
#define TK_FOR 273
#define TK_NOW 274
#define TK_TODAY 275
#define TK_RAND 276
#define TK_SUBSTR 277
#define TK_SUBSTRING 278
#define TK_BOTH 279
#define TK_TRAILING 280
#define TK_LEADING 281
#define TK_TIMEZONE 282
#define TK_CLIENT_VERSION 283
#define TK_SERVER_VERSION 284
#define TK_SERVER_STATUS 285
#define TK_CURRENT_USER 286
#define TK_PI 287
#define TK_CASE 288
#define TK_WHEN 289
#define TK_THEN 290
#define TK_ELSE 291
#define TK_BETWEEN 292
#define TK_IS 293
#define TK_NK_LT 294
#define TK_NK_GT 295
#define TK_NK_LE 296
#define TK_NK_GE 297
#define TK_NK_NE 298
#define TK_MATCH 299
#define TK_NMATCH 300
#define TK_CONTAINS 301
#define TK_JOIN 302
#define TK_INNER 303
#define TK_LEFT 304
#define TK_RIGHT 305
#define TK_OUTER 306
#define TK_SEMI 307
#define TK_ANTI 308
#define TK_ASOF 309
#define TK_WINDOW 310
#define TK_WINDOW_OFFSET 311
#define TK_JLIMIT 312
#define TK_SELECT 313
#define TK_NK_HINT 314
#define TK_DISTINCT 315
#define TK_WHERE 316
#define TK_PARTITION 317
#define TK_BY 318
#define TK_SESSION 319
#define TK_STATE_WINDOW 320
#define TK_EVENT_WINDOW 321
#define TK_COUNT_WINDOW 322
#define TK_ANOMALY_WINDOW 323
#define TK_SLIDING 324
#define TK_FILL 325
#define TK_VALUE 326
#define TK_VALUE_F 327
#define TK_NONE 328
#define TK_PREV 329
#define TK_NULL_F 330
#define TK_LINEAR 331
#define TK_NEXT 332
#define TK_HAVING 333
#define TK_RANGE 334
#define TK_EVERY 335
#define TK_ORDER 336
#define TK_SLIMIT 337
#define TK_SOFFSET 338
#define TK_LIMIT 339
#define TK_OFFSET 340
#define TK_ASC 341
#define TK_NULLS 342
#define TK_ABORT 343
#define TK_AFTER 344
#define TK_ATTACH 345
#define TK_BEFORE 346
#define TK_BEGIN 347
#define TK_BITAND 348
#define TK_BITNOT 349
#define TK_BITOR 350
#define TK_BLOCKS 351
#define TK_CHANGE 352
#define TK_COMMA 353
#define TK_CONCAT 354
#define TK_CONFLICT 355
#define TK_COPY 356
#define TK_DEFERRED 357
#define TK_DELIMITERS 358
#define TK_DETACH 359
#define TK_DIVIDE 360
#define TK_DOT 361
#define TK_EACH 362
#define TK_FAIL 363
#define TK_GLOB 364
#define TK_ID 365
#define TK_IMMEDIATE 366
#define TK_IMPORT 367
#define TK_INITIALLY 368
#define TK_INSTEAD 369
#define TK_ISNULL 370
#define TK_MODULES 371
#define TK_NK_BITNOT 372
#define TK_NK_SEMI 373
#define TK_NOTNULL 374
#define TK_OF 375
#define TK_PLUS 376
#define TK_PRIVILEGE 377
#define TK_RAISE 378
#define TK_RESTRICT 379
#define TK_ROW 380
#define TK_STAR 381
#define TK_STATEMENT 382
#define TK_STRICT 383
#define TK_STRING 384
#define TK_TIMES 385
#define TK_VALUES 386
#define TK_VARIABLE 387
#define TK_WAL 388
#include "ttokenauto.h"
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601

View File

@ -298,6 +298,7 @@ typedef struct {
#define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX)
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
#define IS_INVALID_TYPE(_t) ((_t) < TSDB_DATA_TYPE_NULL || (_t) >= TSDB_DATA_TYPE_MAX)
#define IS_CONVERT_AS_SIGNED(_t) \
(IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))

View File

@ -29,7 +29,6 @@ extern "C" {
#endif
#define AUDIT_DETAIL_MAX 65472
#define AUDIT_OPERATION_LEN 20
typedef struct {
const char *server;

View File

@ -222,8 +222,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
bool qStreamScanhistoryFinished(qTaskInfo_t tinfo);
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
void resetTaskInfo(qTaskInfo_t tinfo);
void qResetTaskInfoCode(qTaskInfo_t tinfo);
int32_t qGetStreamIntervalExecInfo(qTaskInfo_t tinfo, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow);
int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo);
int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo);

View File

@ -41,6 +41,8 @@ extern "C" {
#define STREAM_STATE_BUFF_HASH 1
#define STREAM_STATE_BUFF_SORT 2
#define STREAM_STATE_BUFF_HASH_SORT 3
#define STREAM_STATE_BUFF_HASH_SEARCH 4
typedef struct SMeta SMeta;
typedef TSKEY (*GetTsFun)(void*);
@ -325,6 +327,9 @@ typedef struct {
int64_t number;
void* pStreamFileState;
int32_t buffIndex;
int32_t hashIter;
void* pHashData;
int64_t minGpId;
} SStreamStateCur;
typedef struct SStateStore {
@ -337,6 +342,8 @@ typedef struct SStateStore {
void (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used);
void (*streamStateClearBuff)(SStreamState* pState, void* pVal);
void (*streamStateFreeVal)(void* val);
int32_t (*streamStateGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode);
@ -349,8 +356,15 @@ typedef struct SStateStore {
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t (*streamStateFillAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void (*streamStateFillDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateFillGetNext)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStateFillGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
void (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
void (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
@ -361,9 +375,12 @@ typedef struct SStateStore {
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur);
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateFillGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
void (*streamStateSetFillInfo)(SStreamState* pState);
void (*streamStateClearExpiredState)(SStreamState* pState);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
@ -400,8 +417,8 @@ typedef struct SStateStore {
SUpdateInfo** ppInfo);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen);
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(SEncoder* pEncoder, const SUpdateInfo* pInfo);
int32_t (*updateInfoDeserialize)(SDecoder* pDeCoder, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
@ -412,6 +429,11 @@ typedef struct SStateStore {
GetTsFun fp, void* pFile, TSKEY delMark, const char* id, int64_t ckId, int8_t type,
struct SStreamFileState** ppFileState);
int32_t (*streamStateGroupPut)(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
SStreamStateCur* (*streamStateGroupGetCur)(SStreamState* pState);
void (*streamStateGroupCurNext)(SStreamStateCur* pCur);
int32_t (*streamStateGroupGetKVByCur)(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);

View File

@ -292,6 +292,7 @@ bool fmIsElapsedFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType, int32_t pkBytes);
int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc);
int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** pFunc);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc);

View File

@ -24,6 +24,7 @@ extern "C" {
#include "thash.h"
#include "query.h"
#include "tqueue.h"
#include "clientInt.h"
typedef enum {
SQL_RESULT_SUCCESS = 0,
@ -81,6 +82,8 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name,
void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values);
const char* monitorResultStr(SQL_RESULT_CODE code);
int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data);
void clientOperateReport(SRequestObj* pRequest);
#ifdef __cplusplus
}
#endif

View File

@ -194,14 +194,26 @@ typedef struct SIndefRowsFuncLogicNode {
bool isTimeLineFunc;
} SIndefRowsFuncLogicNode;
typedef struct SStreamNodeOption {
int8_t triggerType;
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
int8_t igCheckUpdate;
int8_t destHasPrimaryKey;
} SStreamNodeOption;
typedef struct SInterpFuncLogicNode {
SLogicNode node;
SNodeList* pFuncs;
STimeWindow timeRange;
int64_t interval;
int8_t intervalUnit;
int8_t precision;
EFillMode fillMode;
SNode* pFillValues; // SNodeListNode
SNode* pTimeSeries; // SColumnNode
SStreamNodeOption streamNodeOption;
} SInterpFuncLogicNode;
typedef struct SForecastFuncLogicNode {
@ -511,11 +523,15 @@ typedef struct SInterpFuncPhysiNode {
STimeWindow timeRange;
int64_t interval;
int8_t intervalUnit;
int8_t precision;
EFillMode fillMode;
SNode* pFillValues; // SNodeListNode
SNode* pTimeSeries; // SColumnNode
SStreamNodeOption streamNodeOption;
} SInterpFuncPhysiNode;
typedef SInterpFuncPhysiNode SStreamInterpFuncPhysiNode;
typedef struct SForecastFuncPhysiNode {
SPhysiNode node;
SNodeList* pExprs;
@ -650,7 +666,7 @@ typedef struct SWindowPhysiNode {
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
int8_t destHasPrimayKey;
int8_t destHasPrimaryKey;
bool mergeDataBlock;
} SWindowPhysiNode;

View File

@ -457,6 +457,7 @@ typedef struct SSelectStmt {
bool hasCountFunc;
bool hasUdaf;
bool hasStateKey;
bool hasTwaOrElapsedFunc;
bool onlyHasKeepOrderFunc;
bool groupSort;
bool tagScan;

View File

@ -105,6 +105,7 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara
int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t timeZoneStrLen();
int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -49,6 +49,8 @@ void streamStateClear(SStreamState* pState);
void streamStateSetNumber(SStreamState* pState, int32_t number, int32_t tsIdex);
void streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t streamStateGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
// session window
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen,
@ -75,8 +77,14 @@ int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, ch
// fill
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode);
int32_t streamStateFillAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateFillGetNext(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t streamStateFillGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
@ -96,15 +104,25 @@ SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey*
void streamStateFreeCur(SStreamStateCur* pCur);
void streamStateResetCur(SStreamStateCur* pCur);
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateFillGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
// twa
void streamStateSetFillInfo(SStreamState* pState);
void streamStateClearExpiredState(SStreamState* pState);
void streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode);
// group id
int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
SStreamStateCur* streamStateGroupGetCur(SStreamState* pState);
void streamStateGroupCurNext(SStreamStateCur* pCur);
int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
void streamStateReloadInfo(SStreamState* pState, TSKEY ts);
void streamStateCopyBackend(SStreamState* src, SStreamState* dst);

View File

@ -114,7 +114,7 @@ enum {
enum {
TASK_TRIGGER_STATUS__INACTIVE = 1,
TASK_TRIGGER_STATUS__ACTIVE,
TASK_TRIGGER_STATUS__MAY_ACTIVE,
};
typedef enum {
@ -295,9 +295,10 @@ typedef struct SStreamStatus {
int32_t schedIdleTime; // idle time before invoke again
int64_t lastExecTs; // last exec time stamp
int32_t inScanHistorySentinel;
bool appendTranstateBlock; // has append the transfer state data block already
bool appendTranstateBlock; // has appended the transfer state data block already
bool removeBackendFiles; // remove backend files on disk when free stream tasks
SConsenChkptInfo consenChkptInfo;
STimeWindow latestForceWindow; // latest generated time window, only valid in
} SStreamStatus;
typedef struct SDataRange {
@ -310,10 +311,12 @@ typedef struct SSTaskBasicInfo {
SEpSet epSet;
SEpSet mnodeEpset; // mnode epset for send heartbeat
int32_t selfChildId;
int32_t totalLevel;
int32_t trigger;
int8_t taskLevel;
int8_t fillHistory; // is fill history task or not
int64_t delaySchedParam; // in msec
int64_t watermark; // extracted from operators
SInterval interval;
} SSTaskBasicInfo;
typedef struct SStreamRetrieveReq SStreamRetrieveReq;
@ -544,8 +547,9 @@ typedef struct STaskUpdateEntry {
typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask);
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int32_t trigger,
int64_t triggerParam, SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5,
SStreamTask** pTask);
void tFreeStreamTask(void* pTask);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);

View File

@ -16,8 +16,6 @@
#ifndef _STREAM_FILE_STATE_H_
#define _STREAM_FILE_STATE_H_
#include "os.h"
#include "storageapi.h"
#include "tarray.h"
#include "tdef.h"
@ -37,7 +35,7 @@ typedef void (*_state_buff_cleanup_fn)(void* pRowBuff);
typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num);
typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey);
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen);
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen);
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal,
@ -45,6 +43,8 @@ typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, i
typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2);
typedef int (*__session_compare_fn_t)(const void* pWin, const void* pDatas, int pos);
int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp,
void* pFile, TSKEY delMark, const char* taskId, int64_t checkpointId, int8_t type,
struct SStreamFileState** ppFileState);
@ -54,6 +54,8 @@ bool needClearDiskBuff(SStreamFileState* pFileState);
void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used);
void streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t addRowBuffIfNotExist(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
@ -71,9 +73,11 @@ int32_t streamFileStateGetSelectRowSize(SStreamFileState* pFileState);
void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts);
void* getRowStateBuff(SStreamFileState* pFileState);
void* getSearchBuff(SStreamFileState* pFileState);
void* getStateFileStore(SStreamFileState* pFileState);
bool isDeteled(SStreamFileState* pFileState, TSKEY ts);
bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap);
TSKEY getFlushMark(SStreamFileState* pFileState);
SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState);
int32_t getRowStateRowSize(SStreamFileState* pFileState);
@ -94,6 +98,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
void sessionWinStateClear(SStreamFileState* pFileState);
void sessionWinStateCleanup(void* pBuff);
SStreamStateCur* createStateCursor(SStreamFileState* pFileState);
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
@ -103,6 +108,8 @@ void sessionWinStateMoveToNext(SStreamStateCur* pCur);
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey,
range_cmpr_fn cmpFn);
int32_t binarySearch(void* keyList, int num, const void* key, __session_compare_fn_t cmpFn);
// state window
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen, int32_t* pWinCode);
@ -117,6 +124,34 @@ int32_t getSessionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyL
int32_t* pWinCode);
int32_t getFunctionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
// time slice
int32_t getHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t hashSortFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen);
int32_t hashSortFileRemoveFn(SStreamFileState* pFileState, const void* pKey);
void clearSearchBuff(SStreamFileState* pFileState);
int32_t getHashSortNextRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t getHashSortPrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t recoverFillSnapshot(SStreamFileState* pFileState, int64_t ckId);
void deleteHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey);
//group
int32_t streamFileStateGroupPut(SStreamFileState* pFileState, int64_t groupId, void* value, int32_t vLen);
void streamFileStateGroupCurNext(SStreamStateCur* pCur);
int32_t streamFileStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
SSHashObj* getGroupIdCache(SStreamFileState* pFileState);
int fillStateKeyCompare(const void* pWin1, const void* pDatas, int pos);
int32_t getRowStatePrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t addSearchItem(SStreamFileState* pFileState, SArray* pWinStates, const SWinKey* pKey);
//twa
void setFillInfo(SStreamFileState* pFileState);
void clearExpiredState(SStreamFileState* pFileState);
int32_t addArrayBuffIfNotExist(SSHashObj* pSearchBuff, uint64_t groupId, SArray** ppResStates);
#ifdef __cplusplus
}
#endif

View File

@ -36,8 +36,8 @@ bool updateInfoIsTableInserted(SUpdateInfo* pInfo, int64_t tbUid);
void updateInfoDestroy(SUpdateInfo* pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo* pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo* pInfo);
int32_t updateInfoSerialize(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen);
int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo);
int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo);
void windowSBfDelete(SUpdateInfo* pInfo, uint64_t count);
int32_t windowSBfAdd(SUpdateInfo* pInfo, uint64_t count);
bool isIncrementalTimeStamp(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts, void* pPkVal, int32_t len);

View File

@ -653,6 +653,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
#define MONITOR_TAG_VALUE_LEN 300
#define MONITOR_METRIC_NAME_LEN 100
#define AUDIT_OPERATION_LEN 20
typedef enum {
ANAL_ALGO_TYPE_ANOMALY_DETECT = 0,
ANAL_ALGO_TYPE_FORECAST = 1,

View File

@ -20,11 +20,11 @@
extern "C" {
#endif
extern char version[];
extern char compatible_version[];
extern char gitinfo[];
extern char gitinfoOfInternal[];
extern char buildinfo[];
extern char td_version[];
extern char td_compatible_version[];
extern char td_gitinfo[];
extern char td_gitinfoOfInternal[];
extern char td_buildinfo[];
#ifdef __cplusplus
}

View File

@ -185,7 +185,14 @@ function kill_process() {
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}rm -rf ${install_main_dir}/cfg || :
${csudo}rm -rf ${install_main_dir}/bin || :
${csudo}rm -rf ${install_main_dir}/driver || :
${csudo}rm -rf ${install_main_dir}/examples || :
${csudo}rm -rf ${install_main_dir}/include || :
${csudo}rm -rf ${install_main_dir}/share || :
${csudo}rm -rf ${install_main_dir}/log || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin

View File

@ -0,0 +1,4 @@
TDengine client is installed successfully. Please open a terminal and execute the commands below:
To configure TDengine client, sudo vi /etc/taos/taos.cfg
To access TDengine command line interface, taos -h YouServerName

View File

@ -108,6 +108,10 @@ typedef struct SQueryExecMetric {
int64_t execCostUs;
} SQueryExecMetric;
typedef struct {
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SAppInstServerCFG;
struct SAppInstInfo {
int64_t numOfConns;
SCorEpSet mgmtEp;
@ -121,7 +125,7 @@ struct SAppInstInfo {
void* pTransporter;
SAppHbMgr* pAppHbMgr;
char* instKey;
SMonitorParas monitorParas;
SAppInstServerCFG serverCfg;
};
typedef struct SAppInfo {
@ -297,8 +301,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4);
int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);

View File

@ -166,11 +166,11 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType)));
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(
json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows)));
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen) {
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen];
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = '\0';
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen) {
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen];
pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = '\0';
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = tmp;
pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = tmp;
} else {
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
}
@ -284,7 +284,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
}
}
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) {
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) {
sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
@ -294,15 +294,15 @@ static void deregisterRequest(SRequestObj *pRequest) {
}
}
if ((duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThreshold * 1000000UL ||
duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->monitorParas.tsSlowLogExceptDb)) {
if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL ||
duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
if (pTscObj->pAppInfo->monitorParas.tsSlowLogScope & reqType) {
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s",
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration,
pRequest->sqlstr);
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) {
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration);
if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) {
tscError("failed to generate write slow log");
@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread,
rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout;
int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
if (TSDB_CODE_SUCCESS != code) {
tscError("invalid version string.");
return code;
@ -689,7 +689,7 @@ void doDestroyRequest(void *p) {
int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
if (TSDB_CODE_SUCCESS != code) {
tscError("failed to remove request from hash, code:%s", tstrerror(code));
tscWarn("failed to remove request from hash, code:%s", tstrerror(code));
}
schedulerFreeJob(&pRequest->body.queryJob, 0);

View File

@ -605,7 +605,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
return code;
}
pInst->monitorParas = pRsp.monitorParas;
pInst->serverCfg.monitorParas = pRsp.monitorParas;
pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete;
tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId,
pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope);

View File

@ -1700,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf
tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app));
tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user));
tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd));
tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer));
tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer));
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
void* pReq = taosMemoryMalloc(contLen);
@ -2084,12 +2084,12 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
return TSDB_CODE_SUCCESS;
}
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) {
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
int32_t idx = -1;
iconv_t conv = taosAcquireConv(&idx, C2M);
if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR;
for (int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
int32_t type = pResultInfo->fields[i].type;
int32_t bytes = pResultInfo->fields[i].bytes;
@ -2103,7 +2103,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
pResultInfo->convertBuf[i] = p;
SResultColumn* pCol = &pResultInfo->pCol[i];
for (int32_t j = 0; j < numOfRows; ++j) {
for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) {
if (pCol->offset[j] != -1) {
char* pStart = pCol->offset[j] + pCol->pData;
@ -2136,10 +2136,13 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
numOfCols * (sizeof(int8_t) + sizeof(int32_t));
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) {
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t numOfRows = pResultInfo->numOfRows;
int32_t numOfCols = pResultInfo->numOfCols;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
@ -2198,10 +2201,16 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
}
pStart += colLen;
}
// Ensure the complete structure of the block, including the blankfill field,
// even though it is not used on the client side.
len += sizeof(bool);
return len;
}
static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
int32_t numOfRows = pResultInfo->numOfRows;
int32_t numOfCols = pResultInfo->numOfCols;
bool needConvert = false;
for (int32_t i = 0; i < numOfCols; ++i) {
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
@ -2218,7 +2227,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
int32_t dataLen = estimateJsonLen(pResultInfo);
if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -2341,27 +2350,36 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
pStart1 += colLen1;
}
// Ensure the complete structure of the block, including the blankfill field,
// even though it is not used on the client side.
// (void)memcpy(pStart1, pStart, sizeof(bool));
totalLen += sizeof(bool);
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
pResultInfo->pData = pResultInfo->convertJson;
return TSDB_CODE_SUCCESS;
}
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4) {
if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) {
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) {
if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) {
tscError("setResultDataPtr paras error");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (numOfRows == 0) {
if (pResultInfo->numOfRows == 0) {
return TSDB_CODE_SUCCESS;
}
if (pResultInfo->pData == NULL) {
tscError("setResultDataPtr error: pData is NULL");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
int32_t code = doPrepareResPtr(pResultInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
code = doConvertJson(pResultInfo, numOfCols, numOfRows);
code = doConvertJson(pResultInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -2381,9 +2399,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
int32_t cols = *(int32_t*)p;
p += sizeof(int32_t);
if (rows != numOfRows || cols != numOfCols) {
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols,
numOfCols);
if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) {
tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols,
pResultInfo->numOfCols);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -2394,7 +2412,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
p += sizeof(uint64_t);
// check fields
for (int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
int8_t type = *(int8_t*)p;
p += sizeof(int8_t);
@ -2403,10 +2421,14 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
}
int32_t* colLength = (int32_t*)p;
p += sizeof(int32_t) * numOfCols;
p += sizeof(int32_t) * pResultInfo->numOfCols;
char* pStart = p;
for (int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
if ((pStart - pResultInfo->pData) >= dataLen) {
tscError("setResultDataPtr invalid offset over dataLen %d", dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (blockVersion == BLOCK_VERSION_1) {
colLength[i] = htonl(colLength[i]);
}
@ -2414,10 +2436,13 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (IS_INVALID_TYPE(pResultInfo->fields[i].type)) {
tscError("invalid type %d", pResultInfo->fields[i].type);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
pResultInfo->pCol[i].offset = (int32_t*)pStart;
pStart += numOfRows * sizeof(int32_t);
pStart += pResultInfo->numOfRows * sizeof(int32_t);
} else {
pResultInfo->pCol[i].nullbitmap = pStart;
pStart += BitmapLen(pResultInfo->numOfRows);
@ -2430,11 +2455,17 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
pStart += colLength[i];
}
p = pStart;
// bool blankFill = *(bool*)p;
p += sizeof(bool);
int32_t offset = p - pResultInfo->pData;
if (offset > dataLen) {
tscError("invalid offset %d, dataLen %d", offset, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (convertUcs4) {
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
code = doConvertUCS4(pResultInfo, colLength);
}
return code;
@ -2547,7 +2578,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR
pResultInfo->totalRows += pResultInfo->numOfRows;
int32_t code =
setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4);
setResultDataPtr(pResultInfo, convertUcs4);
return code;
}
@ -2576,7 +2607,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
rpcInit.connLimitNum = connLimitNum;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
rpcInit.readTimeout = tsReadTimeout;
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) {
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) {
tscError("faild to convert taos version from str to int, errcode:%s", terrstr());
goto _OVER;
}
@ -2842,6 +2873,7 @@ void syncQueryFn(void* param, void* res, int32_t code) {
if (pParam->pRequest) {
pParam->pRequest->code = code;
clientOperateReport(pParam->pRequest);
}
if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) {

View File

@ -669,7 +669,7 @@ const char *taos_data_type(int type) {
}
}
const char *taos_get_client_info() { return version; }
const char *taos_get_client_info() { return td_version; }
// return int32_t
int taos_affected_rows(TAOS_RES *res) {
@ -2159,4 +2159,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) {
return 0;
}
char *getBuildInfo() { return buildinfo; }
char *getBuildInfo() { return td_buildinfo; }

View File

@ -2,8 +2,6 @@
#include "cJSON.h"
#include "clientInt.h"
#include "clientLog.h"
#include "os.h"
#include "tglobal.h"
#include "tmisce.h"
#include "tqueue.h"
#include "ttime.h"
@ -19,6 +17,7 @@ STaosQueue* monitorQueue;
SHashObj* monitorSlowLogHash;
char tmpSlowLogPath[PATH_MAX] = {0};
TdThread monitorThread;
extern bool tsEnableAuditDelete;
static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) {
int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir);
@ -216,7 +215,7 @@ static void reportSendProcess(void* param, void* tmrId) {
SEpSet ep = getEpSet_s(&pInst->mgmtEp);
generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep);
bool reset =
taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
taosTmrReset(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset);
taosRUnLockLatch(&monitorLock);
}
@ -289,7 +288,7 @@ void monitorCreateClient(int64_t clusterId) {
goto fail;
}
pMonitor->timer =
taosTmrStart(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
taosTmrStart(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
if (pMonitor->timer == NULL) {
tscError("failed to start timer");
goto fail;
@ -660,7 +659,7 @@ static void monitorSendAllSlowLog() {
taosHashCancelIterate(monitorSlowLogHash, pIter);
return;
}
if (t - pClient->lastCheckTime > pInst->monitorParas.tsMonitorInterval * 1000) {
if (t - pClient->lastCheckTime > pInst->serverCfg.monitorParas.tsMonitorInterval * 1000) {
pClient->lastCheckTime = t;
} else {
continue;
@ -686,7 +685,7 @@ static void monitorSendAllSlowLog() {
static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) {
SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId);
if (pInst == NULL || !pInst->monitorParas.tsEnableMonitor) {
if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) {
tscInfo("[monitor] monitor is disabled, skip send slow log");
return;
}
@ -933,3 +932,100 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) {
}
return 0;
}
int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
tscDebug("[del report]delete reportCB code:%d", code);
return 0;
}
int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) {
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
tscError("[del report]failed to allocate memory for sendInfo");
return terrno;
}
sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL};
sendInfo->requestId = requestId;
sendInfo->requestObjRefId = 0;
sendInfo->param = NULL;
sendInfo->fp = reportCB;
sendInfo->msgType = TDMT_MND_AUDIT;
SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
if (code != 0) {
tscError("[del report]failed to send msg to server, code:%d", code);
taosMemoryFree(sendInfo);
return code;
}
return TSDB_CODE_SUCCESS;
}
static void reportDeleteSql(SRequestObj* pRequest) {
SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot;
STscObj* pTscObj = pRequest->pTscObj;
if (pTscObj == NULL || pTscObj->pAppInfo == NULL) {
tscError("[del report]invalid tsc obj");
return;
}
if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) {
tscDebug("[del report]audit delete is disabled");
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
tscDebug("[del report]delete request result code:%d", pRequest->code);
return;
}
if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) {
tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable));
return;
}
SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable;
SAuditReq req;
req.pSql = pRequest->sqlstr;
req.sqlLen = pRequest->sqlLen;
TAOS_UNUSED(tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName));
TAOS_UNUSED(tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName));
TAOS_UNUSED(tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"));
int32_t tlen = tSerializeSAuditReq(NULL, 0, &req);
void* pReq = taosMemoryCalloc(1, tlen);
if (pReq == NULL) {
tscError("[del report]failed to allocate memory for req");
return;
}
if (tSerializeSAuditReq(pReq, tlen, &req) < 0) {
tscError("[del report]failed to serialize req");
taosMemoryFree(pReq);
return;
}
int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId);
if (code != 0) {
tscError("[del report]failed to send audit info, code:%d", code);
taosMemoryFree(pReq);
return;
}
tscDebug("[del report]delete data, sql:%s", req.pSql);
}
void clientOperateReport(SRequestObj* pRequest) {
if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) {
tscError("[del report]invalid request");
return;
}
if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) {
reportDeleteSql(pRequest);
}
}

View File

@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
goto End;
}
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer);
goto End;
}
@ -135,7 +135,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
// update the appInstInfo
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
pTscObj->pAppInfo->monitorParas = connectRsp.monitorParas;
pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas;
pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete;
tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d",
connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope);
lastClusterId = connectRsp.clusterId;
@ -588,7 +589,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
return code;
}
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
code = terrno;
@ -603,7 +605,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, SHOW_VARIABLES_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, SHOW_VARIABLES_RESULT_COLS);
if(len < 0) {
uError("buildShowVariablesRsp error, len:%d", len);
code = terrno;
@ -741,7 +743,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
return code;
}
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
code = terrno;
@ -757,7 +760,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, COMPACT_DB_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, COMPACT_DB_RESULT_COLS);
if(len < 0) {
uError("buildRetriveTableRspForCompactDb error, len:%d", len);
code = terrno;

View File

@ -1570,7 +1570,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
SHashObj* pVgHash = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest));
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
rows, pData, tbname, fields, numFields);
@ -1631,7 +1631,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
SHashObj* pVgHash = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest));
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname);
@ -1835,7 +1835,7 @@ end:
static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) {
int32_t code = 0;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, pRequest));
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, pRequest, 0));
(*pRequest)->syncQuery = true;
if (!(*pRequest)->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;

View File

@ -1695,7 +1695,7 @@ END:
}
void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd) {
if (request->pTscObj->pAppInfo->monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) {
if (request->pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) {
int32_t len = 0;
int32_t rlen = 0;
char *p = NULL;
@ -1740,7 +1740,7 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
SSmlHandle *info = NULL;
int cnt = 0;
while (1) {
SML_CHECK_CODE(createRequest(*(int64_t *)taos, TSDB_SQL_INSERT, reqid, &request));
SML_CHECK_CODE(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &request, reqid));
SSmlMsgBuf msg = {request->msgBufLen, request->msgBuf};
request->code = smlBuildSmlInfo(taos, &info);
SML_CHECK_CODE(request->code);

View File

@ -2869,8 +2869,7 @@ int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pRes
pRspObj->resInfo.precision = precision;
pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows;
int32_t code = setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols,
pRspObj->resInfo.numOfRows, convertUcs4);
int32_t code = setResultDataPtr(&pRspObj->resInfo, convertUcs4);
if (code != 0) {
return code;
}

View File

@ -6,6 +6,8 @@ endif()
add_library(common STATIC ${COMMON_SRC})
add_dependencies(common lemon_sql)
if(DEFINED GRANT_CFG_INCLUDE_DIR)
add_definitions(-DGRANTS_CFG)
endif()

View File

@ -165,8 +165,8 @@ static const SSysDbTableSchema userStbsSchema[] = {
static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "stream_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "history_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "stream_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "history_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
@ -190,9 +190,9 @@ static const SSysDbTableSchema streamTaskSchema[] = {
{.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "process_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "process_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "out_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "out_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "process_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "out_total", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "out_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "dispatch_throughput", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "dispatch_total", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},

View File

@ -18,6 +18,7 @@
#include "tcompare.h"
#include "tlog.h"
#include "tname.h"
#include "tglobal.h"
#define MALLOC_ALIGN_BYTES 32
@ -86,7 +87,17 @@ int32_t getJsonValueLen(const char* data) {
return dataLen;
}
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
static int32_t getDataLen(int32_t type, const char* pData) {
int32_t dataLen = 0;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen = getJsonValueLen(pData);
} else {
dataLen = varDataTLen(pData);
}
return dataLen;
}
static int32_t colDataSetValHelp(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
if (isNull || pData == NULL) {
// There is a placehold for each NULL value of binary or nchar type.
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
@ -101,11 +112,9 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
int32_t type = pColumnInfoData->info.type;
if (IS_VAR_DATA_TYPE(type)) {
int32_t dataLen = 0;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen = getJsonValueLen(pData);
} else {
dataLen = varDataTLen(pData);
int32_t dataLen = getDataLen(type, pData);
if (pColumnInfoData->varmeta.offset[rowIndex] > 0) {
pColumnInfoData->varmeta.length = pColumnInfoData->varmeta.offset[rowIndex];
}
SVarColAttr* pAttr = &pColumnInfoData->varmeta;
@ -144,6 +153,18 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
return 0;
}
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
pColumnInfoData->varmeta.offset[rowIndex] = -1;
}
return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull);
}
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull);
}
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx,
const char* pData) {
int32_t type = pColumnInfoData->info.type;
@ -3041,8 +3062,12 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
}
// return length of encoded data, return -1 if failed
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
blockDataCheck(pBlock, false);
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) {
int32_t code = blockDataCheck(pBlock);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return -1;
}
int32_t dataLen = 0;
@ -3106,9 +3131,11 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
size_t metaSize = 0;
if (IS_VAR_DATA_TYPE(pColRes->info.type)) {
metaSize = numOfRows * sizeof(int32_t);
if(dataLen + metaSize > dataBuflen) goto _exit;
memcpy(data, pColRes->varmeta.offset, metaSize);
} else {
metaSize = BitmapLen(numOfRows);
if(dataLen + metaSize > dataBuflen) goto _exit;
memcpy(data, pColRes->nullbitmap, metaSize);
}
@ -3127,12 +3154,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
}
colSizes[col] += colSize;
dataLen += colSize;
if(dataLen > dataBuflen) goto _exit;
(void) memmove(data, pColData, colSize);
data += colSize;
}
} else {
colSizes[col] = colDataGetLength(pColRes, numOfRows);
dataLen += colSizes[col];
if(dataLen > dataBuflen) goto _exit;
if (pColRes->pData != NULL) {
(void) memmove(data, pColRes->pData, colSizes[col]);
}
@ -3156,7 +3185,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
*actualLen = dataLen;
*groupId = pBlock->info.id.groupId;
if (dataLen > dataBuflen) goto _exit;
return dataLen;
_exit:
uError("blockEncode dataLen:%d, dataBuflen:%zu", dataLen, dataBuflen);
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
return -1;
}
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos) {
@ -3286,11 +3322,15 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos
*pEndPos = pStart;
blockDataCheck(pBlock, false);
code = blockDataCheck(pBlock);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return code;
}
return TSDB_CODE_SUCCESS;
}
int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) {
// int32_t totalRows = pBlock->info.rows;
int32_t code = 0;
@ -3498,20 +3538,19 @@ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
return nextRowIdx;
}
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
return;
if (NULL == pDataBlock || pDataBlock->info.rows == 0) {
return;
#define BLOCK_DATA_CHECK_TRESSA(o) \
if (!(o)) { \
uError("blockDataCheck failed! line:%d", __LINE__); \
return TSDB_CODE_INTERNAL_ERROR; \
}
int32_t blockDataCheck(const SSDataBlock* pDataBlock) {
if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER || NULL == pDataBlock || pDataBlock->info.rows == 0) {
return TSDB_CODE_SUCCESS;
}
#define BLOCK_DATA_CHECK_TRESSA(o) ;
//#define BLOCK_DATA_CHECK_TRESSA(o) A S S E R T(o)
BLOCK_DATA_CHECK_TRESSA(pDataBlock->info.rows > 0);
if (!pDataBlock->info.dataLoad && !forceChk) {
return;
if (!pDataBlock->info.dataLoad) {
return TSDB_CODE_SUCCESS;
}
bool isVarType = false;
@ -3522,8 +3561,10 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
for (int32_t i = 0; i < colNum; ++i) {
SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i);
BLOCK_DATA_CHECK_TRESSA(pCol != NULL);
isVarType = IS_VAR_DATA_TYPE(pCol->info.type);
checkRows = pDataBlock->info.rows;
if (pCol->info.noData == true) continue;
if (isVarType) {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset);
@ -3531,25 +3572,37 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
BLOCK_DATA_CHECK_TRESSA(pCol->nullbitmap);
}
nextPos = 0;
nextPos = -1;
for (int64_t r = 0; r < checkRows; ++r) {
if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break;
if (!colDataIsNull_s(pCol, r)) {
BLOCK_DATA_CHECK_TRESSA(pCol->pData);
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen);
if (isVarType) {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.allocLen > 0);
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] < pCol->varmeta.length);
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] <= pCol->varmeta.length);
if (pCol->reassigned) {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0);
} else if (0 == r) {
} else if (0 == r || nextPos == -1) {
nextPos = pCol->varmeta.offset[r];
} else {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos);
}
colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]);
char* pColData = pCol->pData + pCol->varmeta.offset[r];
int32_t colSize = 0;
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
colLen = getJsonValueLen(pColData);
} else {
colLen = varDataTLen(pColData);
}
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
BLOCK_DATA_CHECK_TRESSA(colLen >= CHAR_BYTES);
} else {
BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE);
}
BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes);
if (pCol->reassigned) {
@ -3560,14 +3613,22 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
}
typeValue = *(char*)(pCol->pData + pCol->varmeta.offset[r] + colLen - 1);
} else {
if (TSDB_DATA_TYPE_FLOAT == pCol->info.type) {
float v = 0;
GET_TYPED_DATA(v, float, pCol->info.type, colDataGetNumData(pCol, r));
} else if (TSDB_DATA_TYPE_DOUBLE == pCol->info.type) {
double v = 0;
GET_TYPED_DATA(v, double, pCol->info.type, colDataGetNumData(pCol, r));
} else {
GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r));
}
}
}
}
}
return;
return TSDB_CODE_SUCCESS;
}

View File

@ -119,6 +119,7 @@ bool tsMonitorForceV2 = true;
// audit
bool tsEnableAudit = true;
bool tsEnableAuditCreateTable = true;
bool tsEnableAuditDelete = true;
int32_t tsAuditInterval = 5000;
// telem
@ -139,6 +140,7 @@ bool tsEnableCrashReport = true;
#endif
char *tsClientCrashReportUri = "/ccrashreport";
char *tsSvrCrashReportUri = "/dcrashreport";
int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NORMAL;
// schemaless
bool tsSmlDot2Underline = true;
@ -610,6 +612,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(
cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS);
@ -675,10 +678,10 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", td_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", td_compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", td_gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", td_buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
@ -777,6 +780,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableAuditDelete", tsEnableAuditDelete, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
@ -1305,6 +1309,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark");
tsmaDataDeleteMark = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel");
tsSafetyCheckLevel = pItem->i32;
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
@ -1490,6 +1497,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditCreateTable");
tsEnableAuditCreateTable = pItem->bval;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableAuditDelete");
tsEnableAuditDelete = pItem->bval;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditInterval");
tsAuditInterval = pItem->i32;
@ -2049,7 +2059,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"s3UploadDelaySec", &tsS3UploadDelaySec},
{"supportVnodes", &tsNumOfSupportVnodes},
{"experimental", &tsExperimental},
{"maxTsmaNum", &tsMaxTsmaNum}};
{"maxTsmaNum", &tsMaxTsmaNum},
{"safetyCheckLevel", &tsSafetyCheckLevel}};
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
code = taosCfgSetOption(options, tListLen(options), pItem, false);
@ -2305,7 +2316,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
{"experimental", &tsExperimental},
{"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags},
{"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay},
{"tsmaDataDeleteMark", &tsmaDataDeleteMark}};
{"tsmaDataDeleteMark", &tsmaDataDeleteMark},
{"safetyCheckLevel", &tsSafetyCheckLevel}};
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
code = taosCfgSetOption(options, tListLen(options), pItem, false);

View File

@ -221,10 +221,9 @@ int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t
}
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit);

View File

@ -567,6 +567,7 @@ int32_t tSerializeSClientHbBatchRsp(void *buf, int32_t bufLen, const SClientHbBa
TAOS_CHECK_EXIT(tSerializeSClientHbRsp(&encoder, pRsp));
}
TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pBatchRsp->monitorParas));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pBatchRsp->enableAuditDelete));
tEndEncode(&encoder);
_exit:
@ -609,6 +610,12 @@ int32_t tDeserializeSClientHbBatchRsp(void *buf, int32_t bufLen, SClientHbBatchR
TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pBatchRsp->monitorParas));
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pBatchRsp->enableAuditDelete));
} else {
pBatchRsp->enableAuditDelete = 0;
}
tEndDecode(&decoder);
_exit:
@ -1813,6 +1820,60 @@ _exit:
void tFreeSDropUserReq(SDropUserReq *pReq) { FREESQL(); }
int32_t tSerializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) {
SEncoder encoder = {0};
int32_t code = 0;
int32_t lino;
int32_t tlen;
tEncoderInit(&encoder, buf, bufLen);
TAOS_CHECK_EXIT(tStartEncode(&encoder));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->operation));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->db));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->table));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->sqlLen));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->pSql));
tEndEncode(&encoder);
_exit:
if (code) {
tlen = code;
} else {
tlen = encoder.pos;
}
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) {
SDecoder decoder = {0};
int32_t code = 0;
int32_t lino;
tDecoderInit(&decoder, buf, bufLen);
TAOS_CHECK_EXIT(tStartDecode(&decoder));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->operation));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->db));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->table));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->sqlLen));
if (pReq->sqlLen > 0) {
pReq->pSql = taosMemoryMalloc(pReq->sqlLen + 1);
if (pReq->pSql == NULL) {
TAOS_CHECK_EXIT(terrno);
}
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->pSql));
}
tEndDecode(&decoder);
_exit:
tDecoderClear(&decoder);
return code;
}
void tFreeSAuditReq(SAuditReq *pReq) { taosMemoryFreeClear(pReq->pSql); }
SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) {
if (pIpWhiteList == NULL) return NULL;
@ -6294,6 +6355,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->authVer));
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pRsp->whiteListVer));
TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pRsp->monitorParas));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pRsp->enableAuditDelete));
tEndEncode(&encoder);
_exit:
@ -6345,6 +6407,11 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pRsp->monitorParas));
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pRsp->enableAuditDelete));
} else {
pRsp->enableAuditDelete = 0;
}
tEndDecode(&decoder);
_exit:

View File

@ -297,12 +297,13 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) {
static void dmGenerateGrant() { mndGenerateMachineCode(); }
static void dmPrintVersion() {
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version);
printf("git: %s\n", gitinfo);
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, td_version,
td_compatible_version);
printf("git: %s\n", td_gitinfo);
#ifdef TD_ENTERPRISE
printf("gitOfInternal: %s\n", gitinfoOfInternal);
printf("gitOfInternal: %s\n", td_gitinfoOfInternal);
#endif
printf("build: %s\n", buildinfo);
printf("build: %s\n", td_buildinfo);
}
static void dmPrintHelp() {

View File

@ -548,8 +548,8 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols +
blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(numOfCols);
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeBufSize;
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
if (pRsp == NULL) {
@ -574,7 +574,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
pStart += sizeof(SSysTableSchema);
}
int32_t len = blockEncode(pBlock, pStart, numOfCols);
int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, numOfCols);
if (len < 0) {
dError("failed to retrieve data since %s", tstrerror(code));
blockDataDestroy(pBlock);

View File

@ -212,6 +212,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_AUDIT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;

View File

@ -138,9 +138,9 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId);
int32_t svrVer = 0;
code = taosVersionStrToInt(version, &svrVer);
code = taosVersionStrToInt(td_version, &svrVer);
if (code != 0) {
dError("failed to convert version string:%s to int, code:%d", version, code);
dError("failed to convert version string:%s to int, code:%d", td_version, code);
goto _OVER;
}
if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) {
@ -434,8 +434,8 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
dError("failed to convert version string:%s to int", version);
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", td_version);
}
pTrans->clientRpc = rpcOpen(&rpcInit);
@ -483,8 +483,8 @@ int32_t dmInitStatusClient(SDnode *pDnode) {
rpcInit.startReadTimer = 0;
rpcInit.readTimeout = 0;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
dError("failed to convert version string:%s to int", version);
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", td_version);
}
pTrans->statusRpc = rpcOpen(&rpcInit);
@ -533,8 +533,8 @@ int32_t dmInitSyncClient(SDnode *pDnode) {
rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
dError("failed to convert version string:%s to int", version);
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", td_version);
}
pTrans->syncRpc = rpcOpen(&rpcInit);
@ -588,8 +588,8 @@ int32_t dmInitServer(SDnode *pDnode) {
rpcInit.compressSize = tsCompressMsgSize;
rpcInit.shareConnLimit = tsShareConnLimit * 16;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
dError("failed to convert version string:%s to int", version);
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", td_version);
}
pTrans->serverRpc = rpcOpen(&rpcInit);

View File

@ -54,7 +54,7 @@ void TestClient::DoInit() {
rpcInit.parent = this;
// rpcInit.secret = (char*)secretEncrypt;
// rpcInit.spi = 1;
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
clientRpc = rpcOpen(&rpcInit);
ASSERT(clientRpc);

View File

@ -86,6 +86,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq);
static int32_t mndProcessNotifyReq(SRpcMsg *pReq);
static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessStatisReq(SRpcMsg *pReq);
static int32_t mndProcessAuditReq(SRpcMsg *pReq);
static int32_t mndProcessUpdateDnodeInfoReq(SRpcMsg *pReq);
static int32_t mndProcessCreateEncryptKeyReq(SRpcMsg *pRsp);
static int32_t mndProcessCreateEncryptKeyRsp(SRpcMsg *pRsp);
@ -125,6 +126,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq);
mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq);
mndSetMsgHandle(pMnode, TDMT_MND_STATIS, mndProcessStatisReq);
mndSetMsgHandle(pMnode, TDMT_MND_AUDIT, mndProcessAuditReq);
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ENCRYPT_KEY, mndProcessCreateEncryptKeyReq);
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_ENCRYPT_KEY_RSP, mndProcessCreateEncryptKeyRsp);
mndSetMsgHandle(pMnode, TDMT_MND_UPDATE_DNODE_INFO, mndProcessUpdateDnodeInfoReq);
@ -604,6 +606,24 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
return 0;
}
static int32_t mndProcessAuditReq(SRpcMsg *pReq) {
mTrace("process audit req:%p", pReq);
if (tsEnableAudit && tsEnableAuditDelete) {
SMnode *pMnode = pReq->info.node;
SAuditReq auditReq = {0};
TAOS_CHECK_RETURN(tDeserializeSAuditReq(pReq->pCont, pReq->contLen, &auditReq));
mDebug("received audit req:%s, %s, %s, %s", auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql);
auditAddRecord(pReq, pMnode->clusterId, auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql,
auditReq.sqlLen);
tFreeSAuditReq(&auditReq);
}
return 0;
}
static int32_t mndUpdateDnodeObj(SMnode *pMnode, SDnodeObj *pDnode) {
int32_t code = 0, lino = 0;
SDnodeInfoReq infoReq = {0};

View File

@ -1021,7 +1021,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
}
// cluster info
tstrncpy(pClusterInfo->version, version, sizeof(pClusterInfo->version));
tstrncpy(pClusterInfo->version, td_version, sizeof(pClusterInfo->version));
pClusterInfo->monitor_interval = tsMonitorInterval;
pClusterInfo->connections_total = mndGetNumOfConnections(pMnode);
pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB);

View File

@ -239,8 +239,8 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
goto _OVER;
}
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) {
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version);
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, td_version, 3)) != 0) {
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, td_version);
goto _OVER;
}
@ -305,12 +305,13 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
connectRsp.enableAuditDelete = tsEnableAuditDelete;
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
connectRsp.whiteListVer = pUser->ipWhiteListVer;
(void)strcpy(connectRsp.sVer, version);
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version,
buildinfo, gitinfo);
tstrncpy(connectRsp.sVer, td_version, sizeof(connectRsp.sVer));
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", td_version,
td_buildinfo, td_gitinfo);
mndGetMnodeEpSet(pMnode, &connectRsp.epSet);
int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp);
@ -709,6 +710,7 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) {
tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
batchRsp.enableAuditDelete = tsEnableAuditDelete;
int32_t sz = taosArrayGetSize(batchReq.reqs);
for (int i = 0; i < sz; i++) {
@ -813,7 +815,7 @@ static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) {
int32_t code = 0;
int32_t lino = 0;
SServerVerRsp rsp = {0};
tstrncpy(rsp.ver, version, sizeof(rsp.ver));
tstrncpy(rsp.ver, td_version, sizeof(rsp.ver));
int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp);
if (contLen < 0) {

View File

@ -242,7 +242,7 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
SStreamTask* pTask = NULL;
int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, *pTaskList, pStream->conf.fillHistory,
int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, 0, *pTaskList, pStream->conf.fillHistory,
pStream->subTableWithoutMd5, &pTask);
if (code != 0) {
return code;
@ -356,8 +356,9 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillh
uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
int32_t code = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0,
*pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, pTask);
int32_t code = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, pStream->conf.trigger,
useTriggerParam ? pStream->conf.triggerParam : 0, *pTaskList, pStream->conf.fillHistory,
pStream->subTableWithoutMd5, pTask);
return code;
}
@ -396,18 +397,18 @@ static void setHTasksId(SStreamObj* pStream) {
}
static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, int64_t skey,
SArray* pVerList, SVgObj* pVgroup, bool isFillhistory, bool useTriggerParam) {
SArray* pVerList, SVgObj* pVgroup, bool isHistoryTask, bool useTriggerParam) {
SStreamTask* pTask = NULL;
int32_t code = buildSourceTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask);
int32_t code = buildSourceTask(pStream, pEpset, isHistoryTask, useTriggerParam, &pTask);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
mDebug("doAddSourceTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId,
isFillhistory);
isHistoryTask);
if (pStream->conf.fillHistory) {
haltInitialTaskStatus(pTask, plan, isFillhistory);
haltInitialTaskStatus(pTask, plan, isHistoryTask);
}
streamTaskSetDataRange(pTask, skey, pVerList, pVgroup->vgId);
@ -453,10 +454,12 @@ static SSubplan* getAggSubPlan(const SQueryPlan* pPlan, int index) {
static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset,
int64_t nextWindowSkey, SArray* pVerList, bool useTriggerParam) {
void* pIter = NULL;
int32_t code = 0;
SSdb* pSdb = pMnode->pSdb;
addNewTaskList(pStream);
void* pIter = NULL;
SSdb* pSdb = pMnode->pSdb;
while (1) {
SVgObj* pVgroup;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
@ -469,10 +472,9 @@ static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream
continue;
}
int code =
doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam);
code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam);
if (code != 0) {
mError("create stream task, code:%s", tstrerror(code));
mError("failed to create stream task, code:%s", tstrerror(code));
// todo drop the added source tasks.
sdbRelease(pSdb, pVgroup);
@ -504,9 +506,9 @@ static int32_t buildAggTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillhist
uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
int32_t code =
tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0,
*pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, pAggTask);
int32_t code = tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, pStream->conf.trigger,
useTriggerParam ? pStream->conf.triggerParam : 0, *pTaskList, pStream->conf.fillHistory,
pStream->subTableWithoutMd5, pAggTask);
return code;
}
@ -695,7 +697,8 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
if (terrno != 0) code = terrno;
TAOS_RETURN(code);
}
code = addSourceTask(pMnode, plan, pStream, pEpset, skey, pVerList, numOfPlanLevel == 1);
code = addSourceTask(pMnode, plan, pStream, pEpset, skey, pVerList, (numOfPlanLevel == 1));
if (code != TSDB_CODE_SUCCESS) {
return code;
}

View File

@ -333,8 +333,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
mDebug("show:0x%" PRIx64 ", stop retrieve data, rowsRead:%d numOfRows:%d", pShow->id, rowsRead, pShow->numOfRows);
}
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns +
blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock));
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + dataEncodeBufSize;
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
if (pRsp == NULL) {
@ -361,7 +361,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
pStart += sizeof(SSysTableSchema);
}
int32_t len = blockEncode(pBlock, pStart, pShow->pMeta->numOfColumns);
int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, pShow->pMeta->numOfColumns);
if(len < 0){
mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, tstrerror(code));
code = terrno;

View File

@ -454,17 +454,16 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
pObj->outputSchema.pSchema = pFullSchema;
}
bool hasKey = hasDestPrimaryKey(&pObj->outputSchema);
SPlanContext cxt = {
.pAstRoot = pAst,
.topicQuery = false,
.streamQuery = true,
.triggerType = pObj->conf.trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->conf.trigger,
.triggerType = (pObj->conf.trigger == STREAM_TRIGGER_MAX_DELAY)? STREAM_TRIGGER_WINDOW_CLOSE : pObj->conf.trigger,
.watermark = pObj->conf.watermark,
.igExpired = pObj->conf.igExpired,
.deleteMark = pObj->deleteMark,
.igCheckUpdate = pObj->igCheckUpdate,
.destHasPrimaryKey = hasKey,
.destHasPrimaryKey = hasDestPrimaryKey(&pObj->outputSchema),
};
// using ast and param to build physical plan

View File

@ -877,6 +877,8 @@ static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) {
strcpy(dst, "window close");
} else if (trigger == STREAM_TRIGGER_MAX_DELAY) {
strcpy(dst, "max delay");
} else if (trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) {
strcpy(dst, "force window close");
}
}

View File

@ -115,9 +115,9 @@ static char* mndBuildTelemetryReport(SMnode* pMnode) {
snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), &lino, _OVER);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER);
mndBuildRuntimeInfo(pMnode, pJson);

View File

@ -39,7 +39,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) {
strcpy(connectReq.db, "");
strcpy(connectReq.user, "root");
strcpy(connectReq.passwd, secretEncrypt);
strcpy(connectReq.sVer, version);
strcpy(connectReq.sVer, td_version);
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
void* pReq = rpcMallocCont(contLen);
@ -76,7 +76,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) {
strcpy(connectReq.db, "not_exist_db");
strcpy(connectReq.user, "root");
strcpy(connectReq.passwd, secretEncrypt);
strcpy(connectReq.sVer, version);
strcpy(connectReq.sVer, td_version);
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
void* pReq = rpcMallocCont(contLen);

View File

@ -64,7 +64,7 @@ TEST_F(MndTestShow, 03_ShowMsg_Conn) {
strcpy(connectReq.db, "");
strcpy(connectReq.user, "root");
strcpy(connectReq.passwd, secretEncrypt);
strcpy(connectReq.sVer, version);
strcpy(connectReq.sVer, td_version);
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
void* pReq = rpcMallocCont(contLen);

View File

@ -46,10 +46,14 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateSaveInfo = streamStateSaveInfo;
pStore->streamStateGetInfo = streamStateGetInfo;
pStore->streamStateSetNumber = streamStateSetNumber;
pStore->streamStateGetPrev = streamStateGetPrev;
pStore->streamStateFillPut = streamStateFillPut;
pStore->streamStateFillGet = streamStateFillGet;
pStore->streamStateFillAddIfNotExist = streamStateFillAddIfNotExist;
pStore->streamStateFillDel = streamStateFillDel;
pStore->streamStateFillGetNext = streamStateFillGetNext;
pStore->streamStateFillGetPrev = streamStateFillGetPrev;
pStore->streamStateCurNext = streamStateCurNext;
pStore->streamStateCurPrev = streamStateCurPrev;
@ -60,9 +64,12 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateFillSeekKeyPrev = streamStateFillSeekKeyPrev;
pStore->streamStateFreeCur = streamStateFreeCur;
pStore->streamStateGetGroupKVByCur = streamStateGetGroupKVByCur;
pStore->streamStateFillGetGroupKVByCur = streamStateFillGetGroupKVByCur;
pStore->streamStateGetKVByCur = streamStateGetKVByCur;
pStore->streamStateSetFillInfo = streamStateSetFillInfo;
pStore->streamStateClearExpiredState = streamStateClearExpiredState;
pStore->streamStateSessionAddIfNotExist = streamStateSessionAddIfNotExist;
pStore->streamStateSessionPut = streamStateSessionPut;
pStore->streamStateSessionGet = streamStateSessionGet;
@ -75,11 +82,6 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateCountGetKeyByRange = streamStateCountGetKeyByRange;
pStore->streamStateSessionAllocWinBuffByNextPosition = streamStateSessionAllocWinBuffByNextPosition;
//void initStreamStateAPI(SStorageAPI* pAPI) {
// initStateStoreAPI(&pAPI->stateStore);
// initFunctionStateStore(&pAPI->functionStore);
//}
pStore->updateInfoInit = updateInfoInit;
pStore->updateInfoFillBlockData = updateInfoFillBlockData;
pStore->updateInfoIsUpdated = updateInfoIsUpdated;
@ -100,6 +102,11 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev;
pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext;
pStore->streamStateGroupPut = streamStateGroupPut;
pStore->streamStateGroupGetCur = streamStateGroupGetCur;
pStore->streamStateGroupCurNext = streamStateGroupCurNext;
pStore->streamStateGroupGetKVByCur = streamStateGroupGetKVByCur;
pStore->streamFileStateDestroy = streamFileStateDestroy;
pStore->streamFileStateClear = streamFileStateClear;
pStore->needClearDiskBuff = needClearDiskBuff;

View File

@ -715,6 +715,8 @@ static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); }
int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessVer) {
STQ* pTq = (STQ*)pTqObj;
int32_t vgId = TD_VID(pTq->pVnode);
SCheckpointInfo* pChkInfo = NULL;
tqDebug("s-task:0x%x start to build task", pTask->id.taskId);
int32_t code = streamTaskInit(pTask, pTq->pStreamMeta, &pTq->pVnode->msgCb, nextProcessVer);
@ -766,7 +768,7 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
streamTaskResetUpstreamStageInfo(pTask);
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
pChkInfo = &pTask->chkInfo;
tqSetRestoreVersionInfo(pTask);
char* p = streamTaskGetStatus(pTask).name;
@ -886,13 +888,14 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)pMsg->pCont;
SStreamMeta* pMeta = pTq->pStreamMeta;
int32_t code = TSDB_CODE_SUCCESS;
SStreamTask* pTask = NULL;
SStreamTask* pStreamTask = NULL;
code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask);
if (pTask == NULL) {
tqError("vgId:%d failed to acquire stream task:0x%x during scan history data, task may have been destroyed",
pMeta->vgId, pReq->taskId);
return -1;
return code;
}
// do recovery step1
@ -957,11 +960,11 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
ETaskStatus s = p.state;
if (s == TASK_STATUS__PAUSE) {
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr,
el, pTask->execInfo.step1El, status);
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", id, el,
pTask->execInfo.step1El, status);
} else if (s == TASK_STATUS__STOP || s == TASK_STATUS__DROPPING) {
tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", pTask->id.idStr,
p.name, pTask->execInfo.step1El);
tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", id, p.name,
pTask->execInfo.step1El);
}
}
@ -978,7 +981,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
}
// 1. get the related stream task
SStreamTask* pStreamTask = NULL;
code = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId, &pStreamTask);
if (pStreamTask == NULL) {
tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
@ -989,15 +991,15 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
streamMetaReleaseTask(pMeta, pTask);
return code; // todo: handle failure
return code;
}
if (pStreamTask->info.taskLevel != TASK_LEVEL__SOURCE) {
tqError("s-task:%s fill-history task related stream task level:%d, unexpected", id, pStreamTask->info.taskLevel);
return TSDB_CODE_STREAM_INTERNAL_ERROR;
}
code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, handleStep2Async, pTq);
code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, handleStep2Async, pTq);
streamMetaReleaseTask(pMeta, pStreamTask);
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);

View File

@ -16,7 +16,8 @@
#include "tq.h"
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) {
int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + blockGetEncodeSize(pBlock);
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeBufSize;
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) {
return terrno;
@ -28,7 +29,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t
pRetrieve->compressed = 0;
pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows);
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols);
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeBufSize, numOfCols);
if(actualLen < 0){
taosMemoryFree(buf);
return terrno;

View File

@ -270,7 +270,12 @@ bool handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) {
bool taskReadyForDataFromWal(SStreamTask* pTask) {
// non-source or fill-history tasks don't need to response the WAL scan action.
if ((pTask->info.taskLevel != TASK_LEVEL__SOURCE) || (pTask->status.downstreamReady == 0)) {
SSTaskBasicInfo* pInfo = &pTask->info;
if ((pInfo->taskLevel != TASK_LEVEL__SOURCE) || (pTask->status.downstreamReady == 0)) {
return false;
}
if (pInfo->taskLevel == TASK_LEVEL__SOURCE && pInfo->trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) {
return false;
}
@ -282,7 +287,7 @@ bool taskReadyForDataFromWal(SStreamTask* pTask) {
}
// fill-history task has entered into the last phase, no need to anything
if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) {
if ((pInfo->fillHistory == 1) && pTask->status.appendTranstateBlock) {
// the maximum version of data in the WAL has reached already, the step2 is done
tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr,
pTask->dataRange.range.maxVer);
@ -419,9 +424,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
streamMutexLock(&pTask->lock);
SStreamTaskState pState = streamTaskGetStatus(pTask);
if (pState.state != TASK_STATUS__READY) {
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pState.name);
SStreamTaskState state = streamTaskGetStatus(pTask);
if (state.state != TASK_STATUS__READY) {
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, state.name);
streamMutexUnlock(&pTask->lock);
streamMetaReleaseTask(pStreamMeta, pTask);
continue;

View File

@ -628,6 +628,9 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void*
tmp = taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
TSDB_CHECK_NULL(tmp, code, line, END, terrno)
colDataSetNULL(tmp, i);
tmp = taosArrayGet(pDelBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
TSDB_CHECK_NULL(tmp, code, line, END, terrno)
colDataSetNULL(tmp, i);
}
if (type == 0) {

View File

@ -88,6 +88,8 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) {
}
}
streamSetupScheduleTrigger(pTask);
double el = (taosGetTimestampMs() - st) / 1000.0;
tqDebug("s-task:%s vgId:%d expand stream task completed, elapsed time:%.2fsec", pTask->id.idStr, vgId, el);
@ -614,6 +616,7 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
int32_t taskId = -1;
int64_t streamId = -1;
bool added = false;
int32_t size = sizeof(SStreamTask);
if (tsDisableStream) {
tqInfo("vgId:%d stream disabled, not deploy stream tasks", vgId);
@ -623,7 +626,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
tqDebug("vgId:%d receive new stream task deploy msg, start to build stream task", vgId);
// 1.deserialize msg and build task
int32_t size = sizeof(SStreamTask);
SStreamTask* pTask = taosMemoryCalloc(1, size);
if (pTask == NULL) {
tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId, size);

View File

@ -162,10 +162,14 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateSaveInfo = streamStateSaveInfo;
pStore->streamStateGetInfo = streamStateGetInfo;
pStore->streamStateSetNumber = streamStateSetNumber;
pStore->streamStateGetPrev = streamStateGetPrev;
pStore->streamStateFillPut = streamStateFillPut;
pStore->streamStateFillGet = streamStateFillGet;
pStore->streamStateFillAddIfNotExist = streamStateFillAddIfNotExist;
pStore->streamStateFillDel = streamStateFillDel;
pStore->streamStateFillGetNext = streamStateFillGetNext;
pStore->streamStateFillGetPrev = streamStateFillGetPrev;
pStore->streamStateCurNext = streamStateCurNext;
pStore->streamStateCurPrev = streamStateCurPrev;
@ -176,9 +180,12 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateFillSeekKeyPrev = streamStateFillSeekKeyPrev;
pStore->streamStateFreeCur = streamStateFreeCur;
pStore->streamStateGetGroupKVByCur = streamStateGetGroupKVByCur;
pStore->streamStateFillGetGroupKVByCur = streamStateFillGetGroupKVByCur;
pStore->streamStateGetKVByCur = streamStateGetKVByCur;
pStore->streamStateSetFillInfo = streamStateSetFillInfo;
pStore->streamStateClearExpiredState = streamStateClearExpiredState;
pStore->streamStateSessionAddIfNotExist = streamStateSessionAddIfNotExist;
pStore->streamStateSessionPut = streamStateSessionPut;
pStore->streamStateSessionGet = streamStateSessionGet;
@ -214,6 +221,11 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev;
pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext;
pStore->streamStateGroupPut = streamStateGroupPut;
pStore->streamStateGroupGetCur = streamStateGroupGetCur;
pStore->streamStateGroupCurNext = streamStateGroupCurNext;
pStore->streamStateGroupGetKVByCur = streamStateGroupGetKVByCur;
pStore->streamFileStateDestroy = streamFileStateDestroy;
pStore->streamFileStateClear = streamFileStateClear;
pStore->needClearDiskBuff = needClearDiskBuff;

View File

@ -22,13 +22,16 @@
#include "taoserror.h"
#include "tglobal.h"
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
void azEnd() {}
#if defined(USE_S3)
#include <azure/core.hpp>
#include <azure/storage/blobs.hpp>
#include "td_block_blob_client.hpp"
// Add appropriate using namespace directives
using namespace Azure::Storage;
using namespace Azure::Storage::Blobs;
@ -40,10 +43,6 @@ extern char tsS3BucketName[TSDB_FQDN_LEN];
extern int8_t tsS3Enabled;
extern int8_t tsS3EpNum;
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
void azEnd() {}
static void checkPrint(const char *fmt, ...) {
va_list arg_ptr;
va_start(arg_ptr, fmt);
@ -223,7 +222,6 @@ static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *objec
uint8_t blobContent[] = "Hello Azure!";
// Create the block blob client
// BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
// TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName));
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
blobClient.UploadFrom(file, offset, size);
@ -467,7 +465,7 @@ int32_t azGetObjectToFile(const char *object_name, const char *fileName) {
TAOS_RETURN(code);
}
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
static int32_t azGetObjectsByPrefixImpl(const char *prefix, const char *path) {
const std::string delimiter = "/";
std::string accountName = tsS3AccessKeyId[0];
std::string accountKey = tsS3AccessKeySecret[0];
@ -514,6 +512,23 @@ int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
return 0;
}
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
int32_t code = 0;
try {
code = azGetObjectsByPrefixImpl(prefix, path);
} catch (const std::exception &e) {
azError("%s: Reason Phrase: %s", __func__, e.what());
code = TAOS_SYSTEM_ERROR(EIO);
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
TAOS_RETURN(code);
}
TAOS_RETURN(code);
}
int32_t azDeleteObjects(const char *object_name[], int nobject) {
for (int i = 0; i < nobject; ++i) {
azDeleteObjectsByPrefix(object_name[i]);
@ -524,10 +539,6 @@ int32_t azDeleteObjects(const char *object_name[], int nobject) {
#else
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
void azEnd() {}
int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; }
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) {

View File

@ -35,7 +35,8 @@
extern SConfig* tsCfg;
static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) {
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
return terrno;
@ -49,7 +50,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(numOfCols);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, numOfCols);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols);
if(len < 0) {
taosMemoryFree(*pRsp);
return terrno;

View File

@ -1966,7 +1966,8 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
pBlock->info.rows = rowNum;
int32_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
int32_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, rspSize);
if (NULL == rsp) {
@ -1977,7 +1978,7 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
rsp->completed = 1;
rsp->numOfRows = htobe64((int64_t)rowNum);
int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, taosArrayGetSize(pBlock->pDataBlock));
int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, taosArrayGetSize(pBlock->pDataBlock));
if(len < 0) {
qError("qExplainGetRspFromCtx: blockEncode failed");
QRY_ERR_JRET(terrno);

View File

@ -454,6 +454,29 @@ typedef struct SSteamOpBasicInfo {
bool updateOperatorInfo;
} SSteamOpBasicInfo;
typedef struct SStreamFillSupporter {
int32_t type; // fill type
SInterval interval;
SResultRowData prev;
TSKEY prevOriginKey;
SResultRowData cur;
SResultRowData next;
TSKEY nextOriginKey;
SResultRowData nextNext;
SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
SExprSupp notFillExprSup;
int32_t numOfAllCols; // number of all exprs, including the tags columns
int32_t numOfFillCols;
int32_t numOfNotFillCols;
int32_t rowSize;
SSHashObj* pResMap;
bool hasDelete;
SStorageAPI* pAPI;
STimeWindow winRange;
int32_t pkColBytes;
__compar_fn_t comparePkColFn;
} SStreamFillSupporter;
typedef struct SStreamScanInfo {
SSteamOpBasicInfo basic;
SExprInfo* pPseudoExpr;
@ -477,6 +500,7 @@ typedef struct SStreamScanInfo {
STqReader* tqReader;
uint64_t groupId;
bool igCheckGroupId;
struct SUpdateInfo* pUpdateInfo;
EStreamScanMode scanMode;
@ -493,6 +517,7 @@ typedef struct SStreamScanInfo {
STimeWindow updateWin;
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
SStreamFillSupporter* pFillSup;
// status for tmq
SNodeList* pGroupTags;
SNode* pTagCond;
@ -511,6 +536,8 @@ typedef struct SStreamScanInfo {
SSDataBlock* pCheckpointRes;
int8_t pkColType;
int32_t pkColLen;
bool useGetResultRange;
STimeWindow lastScanRange;
} SStreamScanInfo;
typedef struct {
@ -781,25 +808,6 @@ typedef struct SStreamPartitionOperatorInfo {
SSDataBlock* pCreateTbRes;
} SStreamPartitionOperatorInfo;
typedef struct SStreamFillSupporter {
int32_t type; // fill type
SInterval interval;
SResultRowData prev;
SResultRowData cur;
SResultRowData next;
SResultRowData nextNext;
SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
SExprSupp notFillExprSup;
int32_t numOfAllCols; // number of all exprs, including the tags columns
int32_t numOfFillCols;
int32_t numOfNotFillCols;
int32_t rowSize;
SSHashObj* pResMap;
bool hasDelete;
SStorageAPI* pAPI;
STimeWindow winRange;
} SStreamFillSupporter;
typedef struct SStreamFillOperatorInfo {
SSteamOpBasicInfo basic;
SStreamFillSupporter* pFillSup;
@ -813,8 +821,70 @@ typedef struct SStreamFillOperatorInfo {
int32_t primaryTsCol;
int32_t primarySrcSlotId;
SStreamFillInfo* pFillInfo;
SStreamAggSupporter* pStreamAggSup;
SArray* pCloseTs;
SArray* pUpdated;
SGroupResInfo groupResInfo;
} SStreamFillOperatorInfo;
typedef struct SStreamTimeSliceOperatorInfo {
SSteamOpBasicInfo basic;
STimeWindowAggSupp twAggSup;
SStreamAggSupporter streamAggSup;
SStreamFillSupporter* pFillSup;
SStreamFillInfo* pFillInfo;
SSDataBlock* pRes;
SSDataBlock* pDelRes;
bool recvCkBlock;
SSDataBlock* pCheckpointRes;
int32_t fillType;
SResultRowData leftRow;
SResultRowData valueRow;
SResultRowData rightRow;
int32_t primaryTsIndex;
SExprSupp scalarSup; // scalar calculation
bool ignoreExpiredData;
bool ignoreExpiredDataSaved;
bool destHasPrimaryKey;
SArray* historyPoints;
SArray* pUpdated; // SWinKey
SArray* historyWins;
SSHashObj* pUpdatedMap;
int32_t delIndex;
SArray* pDelWins; // SWinKey
SSHashObj* pDeletedMap;
uint64_t numOfDatapack;
SGroupResInfo groupResInfo;
bool ignoreNull;
bool isHistoryOp;
SArray* pCloseTs;
struct SOperatorInfo* pOperator;
} SStreamTimeSliceOperatorInfo;
typedef struct SStreamIntervalSliceOperatorInfo {
SSteamOpBasicInfo basic;
SOptrBasicInfo binfo;
STimeWindowAggSupp twAggSup;
SStreamAggSupporter streamAggSup;
SExprSupp scalarSup;
SInterval interval;
bool recvCkBlock;
SSDataBlock* pCheckpointRes;
int32_t primaryTsIndex;
SSHashObj* pUpdatedMap; // SWinKey
SArray* pUpdated; // SWinKey
SSHashObj* pDeletedMap;
SArray* pDelWins;
SSDataBlock* pDelRes;
int32_t delIndex;
bool destHasPrimaryKey;
int64_t endTs;
SGroupResInfo groupResInfo;
struct SOperatorInfo* pOperator;
bool hasFill;
bool hasInterpoFunc;
} SStreamIntervalSliceOperatorInfo;
#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
#define OPTR_SET_OPENED(_optr) ((_optr)->status |= OP_OPENED)
@ -946,7 +1016,7 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, int32_t numOfOutput, int64_t gap,
SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore,
SReadHandle* pHandle, STimeWindowAggSupp* pTwAggSup, const char* taskIdStr,
SStorageAPI* pApi, int32_t tsIndex);
SStorageAPI* pApi, int32_t tsIndex, int8_t stateType, int32_t ratio);
int32_t initDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uint16_t type,
int32_t tsColIndex, STimeWindowAggSupp* pTwSup, struct SSteamOpBasicInfo* pBasic);
int32_t getMaxTsWins(const SArray* pAllWins, SArray* pMaxWins);
@ -985,6 +1055,9 @@ void resetUnCloseSessionWinInfo(SSHashObj* winMap);
void setStreamOperatorCompleted(struct SOperatorInfo* pOperator);
void reloadAggSupFromDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup);
void destroyFlusedPos(void* pRes);
bool isIrowtsPseudoColumn(SExprInfo* pExprInfo);
bool isIsfilledPseudoColumn(SExprInfo* pExprInfo);
bool isInterpFunc(SExprInfo* pExprInfo);
int32_t encodeSSessionKey(void** buf, SSessionKey* key);
void* decodeSSessionKey(void* buf, SSessionKey* key);
@ -1018,6 +1091,8 @@ int32_t doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, S
int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo,
TSKEY* primaryKeys, int32_t prevPosition, int32_t order);
int32_t extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, int32_t status);
bool getIgoreNullRes(SExprSupp* pExprSup);
bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t index, bool ignoreNull);
#ifdef __cplusplus
}

View File

@ -167,6 +167,8 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p
int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SDynQueryCtrlPhysiNode* pPhyciNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo);
int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SOperatorInfo** ppOptInfo);
// clang-format on
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,

View File

@ -22,11 +22,87 @@ extern "C" {
#include "executorInt.h"
#include "tutil.h"
#define FILL_POS_INVALID 0
#define FILL_POS_START 1
#define FILL_POS_MID 2
#define FILL_POS_END 3
#define HAS_NON_ROW_DATA(pRowData) (pRowData->key == INT64_MIN)
#define HAS_ROW_DATA(pRowData) (pRowData && pRowData->key != INT64_MIN)
#define IS_INVALID_WIN_KEY(ts) ((ts) == INT64_MIN)
#define IS_VALID_WIN_KEY(ts) ((ts) != INT64_MIN)
#define SET_WIN_KEY_INVALID(ts) ((ts) = INT64_MIN)
#define IS_NORMAL_INTERVAL_OP(op) \
((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || \
(op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL)
#define IS_CONTINUE_INTERVAL_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL)
#define IS_FILL_CONST_VALUE(type) ((type == TSDB_FILL_NULL || type == TSDB_FILL_NULL_F || type == TSDB_FILL_SET_VALUE || type == TSDB_FILL_SET_VALUE_F))
typedef struct SSliceRowData {
TSKEY key;
char pRowVal[];
} SSliceRowData;
typedef struct SSlicePoint {
SWinKey key;
SSliceRowData* pLeftRow;
SSliceRowData* pRightRow;
SRowBuffPos* pResPos;
} SSlicePoint;
void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type);
bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo);
void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo);
void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption);
void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins);
int32_t copyIntervalDeleteKey(SSHashObj* pMap, SArray* pWins);
bool hasSrcPrimaryKeyCol(SSteamOpBasicInfo* pInfo);
int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* tsCols, int32_t startPos, TSKEY eKey,
STimeWindow* pNextWin);
int32_t saveWinResult(SWinKey* pKey, SRowBuffPos* pPos, SSHashObj* pUpdatedMap);
void doBuildDeleteResultImpl(SStateStore* pAPI, SStreamState* pState, SArray* pWins, int32_t* index,
SSDataBlock* pBlock);
SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* pRes);
SResultCellData* getResultCell(SResultRowData* pRaw, int32_t index);
void destroyStreamFillSupporter(SStreamFillSupporter* pFillSup);
bool hasCurWindow(SStreamFillSupporter* pFillSup);
bool hasPrevWindow(SStreamFillSupporter* pFillSup);
bool hasNextWindow(SStreamFillSupporter* pFillSup);
void copyNotFillExpData(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo);
int32_t setRowCell(SColumnInfoData* pCol, int32_t rowId, const SResultCellData* pCell);
bool hasRemainCalc(SStreamFillInfo* pFillInfo);
void destroySPoint(void* ptr);
void destroyStreamFillInfo(SStreamFillInfo* pFillInfo);
int32_t checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t groupId, bool* pRes);
void resetStreamFillSup(SStreamFillSupporter* pFillSup);
void setPointBuff(SSlicePoint* pPoint, SStreamFillSupporter* pFillSup);
int32_t saveTimeSliceWinResult(SWinKey* pKey, SSHashObj* pUpdatedMap);
int winPosCmprImpl(const void* pKey1, const void* pKey2);
void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI);
SResultCellData* getSliceResultCell(SResultCellData* pRowVal, int32_t index);
int32_t getDownstreamRes(struct SOperatorInfo* downstream, SSDataBlock** ppRes, SColumnInfo** ppPkCol);
void destroyFlusedppPos(void* ppRes);
void doBuildStreamIntervalResult(struct SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock,
SGroupResInfo* pGroupResInfo);
void transBlockToSliceResultRow(const SSDataBlock* pBlock, int32_t rowId, TSKEY ts, SSliceRowData* pRowVal,
int32_t rowSize, void* pPkData, SColumnInfoData* pPkCol);
int32_t getQualifiedRowNumDesc(SExprSupp* pExprSup, SSDataBlock* pBlock, TSKEY* tsCols, int32_t rowId, bool ignoreNull);
int32_t createStreamIntervalSliceOperatorInfo(struct SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle,
struct SOperatorInfo** ppOptInfo);
int32_t buildAllResultKey(SStreamAggSupporter* pAggSup, TSKEY ts, SArray* pUpdated);
#ifdef __cplusplus
}

View File

@ -107,7 +107,9 @@ typedef struct SStreamFillInfo {
TSKEY end; // endKey for fill
TSKEY current; // current Key for fill
TSKEY preRowKey;
TSKEY prePointKey;
TSKEY nextRowKey;
TSKEY nextPointKey;
SResultRowData* pResRow;
SStreamFillLinearInfo* pLinearInfo;
bool needFill;
@ -116,6 +118,8 @@ typedef struct SStreamFillInfo {
SArray* delRanges;
int32_t delIndex;
uint64_t curGroupId;
bool hasNext;
SResultRowData* pNonFillRow;
} SStreamFillInfo;
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);

View File

@ -45,6 +45,7 @@ typedef struct SDataDispatchHandle {
SDataBlockDescNode* pSchema;
STaosQueue* pDataBlocks;
SDataDispatchBuf nextOutput;
int32_t outPutColCounts;
int32_t status;
bool queryEnd;
uint64_t useconds;
@ -54,6 +55,65 @@ typedef struct SDataDispatchHandle {
TdThreadMutex mutex;
} SDataDispatchHandle;
static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* pInput) {
if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
return TSDB_CODE_SUCCESS;
}
if (pInput == NULL || pInput->pData == NULL || pInput->pData->info.rows <= 0) {
qError("invalid input data");
return TSDB_CODE_QRY_INVALID_INPUT;
}
SDataBlockDescNode* pSchema = pHandle->pSchema;
if (pSchema == NULL || pSchema->totalRowSize != pInput->pData->info.rowSize) {
qError("invalid schema");
return TSDB_CODE_QRY_INVALID_INPUT;
}
if (pHandle->outPutColCounts > taosArrayGetSize(pInput->pData->pDataBlock)) {
qError("invalid column number, schema:%d, input:%zu", pHandle->outPutColCounts, taosArrayGetSize(pInput->pData->pDataBlock));
return TSDB_CODE_QRY_INVALID_INPUT;
}
SNode* pNode;
int32_t colNum = 0;
FOREACH(pNode, pHandle->pSchema->pSlots) {
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
if (pSlotDesc->output) {
SColumnInfoData* pColInfoData = taosArrayGet(pInput->pData->pDataBlock, colNum);
if (pColInfoData == NULL) {
return -1;
}
if (pColInfoData->info.bytes < 0) {
qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (!IS_VAR_DATA_TYPE(pColInfoData->info.type) &&
TYPE_BYTES[pColInfoData->info.type] != pColInfoData->info.bytes) {
qError("invalid column bytes, schema:%d, input:%d", TYPE_BYTES[pColInfoData->info.type],
pColInfoData->info.bytes);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (pColInfoData->info.type != pSlotDesc->dataType.type) {
qError("invalid column type, schema:%d, input:%d", pSlotDesc->dataType.type, pColInfoData->info.type);
return TSDB_CODE_QRY_INVALID_INPUT;
}
if (pColInfoData->info.bytes != pSlotDesc->dataType.bytes) {
qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes);
return TSDB_CODE_QRY_INVALID_INPUT;
}
if (IS_INVALID_TYPE(pColInfoData->info.type)) {
qError("invalid column type, type:%d", pColInfoData->info.type);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
++colNum;
}
}
return TSDB_CODE_SUCCESS;
}
// clang-format off
// data format:
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
@ -67,6 +127,12 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
int32_t numOfCols = 0;
SNode* pNode;
int32_t code = inputSafetyCheck(pHandle, pInput);
if (code) {
qError("failed to check input data, code:%d", code);
return code;
}
FOREACH(pNode, pHandle->pSchema->pSlots) {
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
if (pSlotDesc->output) {
@ -84,17 +150,18 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
pBuf->useSize = sizeof(SDataCacheEntry);
{
// allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size
size_t dataEncodeBufSize = pBuf->allocSize + 8;
if ((pBuf->allocSize > tsCompressMsgSize) && (tsCompressMsgSize > 0) && pHandle->pManager->cfg.compress) {
if (pHandle->pCompressBuf == NULL) {
// allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size
pHandle->pCompressBuf = taosMemoryMalloc(pBuf->allocSize + 8);
pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeBufSize);
if (NULL == pHandle->pCompressBuf) {
QRY_RET(terrno);
}
pHandle->bufSize = pBuf->allocSize + 8;
pHandle->bufSize = dataEncodeBufSize;
} else {
if (pHandle->bufSize < pBuf->allocSize + 8) {
pHandle->bufSize = pBuf->allocSize + 8;
if (pHandle->bufSize < dataEncodeBufSize) {
pHandle->bufSize = dataEncodeBufSize;
void* p = taosMemoryRealloc(pHandle->pCompressBuf, pHandle->bufSize);
if (p != NULL) {
pHandle->pCompressBuf = p;
@ -105,7 +172,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
}
}
int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, numOfCols);
int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, dataEncodeBufSize, numOfCols);
if(dataLen < 0) {
qError("failed to encode data block, code: %d", dataLen);
return terrno;
@ -123,7 +190,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
TAOS_MEMCPY(pEntry->data, pHandle->pCompressBuf, dataLen);
}
} else {
pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, numOfCols);
pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, pBuf->allocSize, numOfCols);
if(pEntry->dataLen < 0) {
qError("failed to encode data block, code: %d", pEntry->dataLen);
return terrno;
@ -314,8 +381,60 @@ static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) {
return TSDB_CODE_SUCCESS;
}
static int32_t blockDescNodeCheck(SDataBlockDescNode* pInputDataBlockDesc) {
if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
return TSDB_CODE_SUCCESS;
}
if (pInputDataBlockDesc == NULL) {
qError("invalid schema");
return TSDB_CODE_QRY_INVALID_INPUT;
}
SNode* pNode;
int32_t realOutputRowSize = 0;
FOREACH(pNode, pInputDataBlockDesc->pSlots) {
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
if (pSlotDesc->output) {
realOutputRowSize += pSlotDesc->dataType.bytes;
} else {
// Slots must be sorted, and slots with 'output' set to true must come first
break;
}
}
if (realOutputRowSize != pInputDataBlockDesc->outputRowSize) {
qError("invalid schema, realOutputRowSize:%d, outputRowSize:%d", realOutputRowSize, pInputDataBlockDesc->outputRowSize);
return TSDB_CODE_QRY_INVALID_INPUT;
}
return TSDB_CODE_SUCCESS;
}
int32_t getOutputColCounts(SDataBlockDescNode* pInputDataBlockDesc) {
if (pInputDataBlockDesc == NULL) {
qError("invalid schema");
return 0;
}
SNode* pNode;
int32_t numOfCols = 0;
FOREACH(pNode, pInputDataBlockDesc->pSlots) {
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
if (pSlotDesc->output) {
++numOfCols;
} else {
// Slots must be sorted, and slots with 'output' set to true must come first
break;
}
}
return numOfCols;
}
int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) {
int32_t code;
code = blockDescNodeCheck(pDataSink->pInputDataBlockDesc);
if (code) {
qError("failed to check input data block desc, code:%d", code);
return code;
}
SDataDispatchHandle* dispatcher = taosMemoryCalloc(1, sizeof(SDataDispatchHandle));
if (NULL == dispatcher) {
@ -333,6 +452,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD
dispatcher->pManager = pManager;
pManager = NULL;
dispatcher->pSchema = pDataSink->pInputDataBlockDesc;
dispatcher->outPutColCounts = getOutputColCounts(dispatcher->pSchema);
dispatcher->status = DS_BUF_EMPTY;
dispatcher->queryEnd = false;
code = taosOpenQueue(&dispatcher->pDataBlocks);

View File

@ -528,7 +528,12 @@ static void seqJoinLaunchNewRetrieveImpl(SOperatorInfo* pOperator, SSDataBlock**
qDebug("%s dynamic post task begin", GET_TASKID(pOperator->pTaskInfo));
code = pOperator->pDownstream[1]->fpSet.getNextExtFn(pOperator->pDownstream[1], pParam, ppRes);
if (*ppRes && (code == 0)) {
blockDataCheck(*ppRes, false);
code = blockDataCheck(*ppRes);
if (code) {
qError("Invalid block data, blockDataCheck failed, error:%s", tstrerror(code));
pOperator->pTaskInfo->code = code;
T_LONG_JMP(pOperator->pTaskInfo->env, pOperator->pTaskInfo->code);
}
pPost->isStarted = true;
pStbJoin->execInfo.postBlkNum++;
pStbJoin->execInfo.postBlkRows += (*ppRes)->info.rows;

View File

@ -390,6 +390,7 @@ SSDataBlock* createDataBlockFromDescNode(SDataBlockDescNode* pNode) {
createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId);
idata.info.scale = pDescNode->dataType.scale;
idata.info.precision = pDescNode->dataType.precision;
idata.info.noData = pDescNode->reserve;
code = blockDataAppendColInfo(pBlock, &idata);
if (code != TSDB_CODE_SUCCESS) {
@ -2924,6 +2925,10 @@ char* getStreamOpName(uint16_t opType) {
return "stream event";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
return "stream count";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC:
return "stream interp";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL:
return "interval continue";
}
return "";
}
@ -2951,7 +2956,9 @@ void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr
qDebug("%s===stream===%s %s: Block is Null", taskIdStr, flag, opStr);
return;
} else if (pBlock->info.rows == 0) {
qDebug("%s===stream===%s %s: Block is Empty. block type %d", taskIdStr, flag, opStr, pBlock->info.type);
qDebug("%s===stream===%s %s: Block is Empty. block type %d.skey:%" PRId64 ",ekey:%" PRId64 ",version%" PRId64,
taskIdStr, flag, opStr, pBlock->info.type, pBlock->info.window.skey, pBlock->info.window.ekey,
pBlock->info.version);
return;
}
if (qDebugFlag & DEBUG_DEBUG) {

View File

@ -131,7 +131,7 @@ static void clearStreamBlock(SOperatorInfo* pOperator) {
}
}
void resetTaskInfo(qTaskInfo_t tinfo) {
void qResetTaskInfoCode(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
pTaskInfo->code = 0;
clearStreamBlock(pTaskInfo->pRoot);
@ -700,12 +700,12 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
if (pTaskInfo->pOpParam && !pTaskInfo->paramSet) {
pTaskInfo->paramSet = true;
code = pTaskInfo->pRoot->fpSet.getNextExtFn(pTaskInfo->pRoot, pTaskInfo->pOpParam, &pRes);
blockDataCheck(pRes, false);
} else {
code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes);
blockDataCheck(pRes, false);
}
QUERY_CHECK_CODE(code, lino, _end);
code = blockDataCheck(pRes);
QUERY_CHECK_CODE(code, lino, _end);
if (pRes == NULL) {
@ -750,7 +750,8 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
}
code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes);
blockDataCheck(pRes, false);
QUERY_CHECK_CODE(code, lino, _end);
code = blockDataCheck(pRes);
QUERY_CHECK_CODE(code, lino, _end);
}
@ -849,7 +850,11 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo));
}
blockDataCheck(*pRes, false);
code = blockDataCheck(*pRes);
if (code) {
pTaskInfo->code = code;
qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo));
}
uint64_t el = (taosGetTimestampUs() - st);
@ -1093,6 +1098,23 @@ _end:
return code;
}
static int32_t getOpratorIntervalInfo(SOperatorInfo* pOperator, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
return getOpratorIntervalInfo(pOperator->pDownstream[0], pWaterMark, pInterval, pLastWindow);
}
SStreamScanInfo* pScanOp = (SStreamScanInfo*) pOperator->info;
*pWaterMark = pScanOp->twAggSup.waterMark;
*pInterval = pScanOp->interval;
*pLastWindow = pScanOp->lastScanRange;
return TSDB_CODE_SUCCESS;
}
int32_t qGetStreamIntervalExecInfo(qTaskInfo_t tinfo, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
return getOpratorIntervalInfo(pOperator, pWaterMark, pInterval, pLastWindow);
}
int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
@ -1157,6 +1179,19 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
pSup->calTriggerSaved = pSup->calTrigger;
pSup->deleteMarkSaved = pSup->deleteMark;
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;
pSup->deleteMark = INT64_MAX;
pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData;
pInfo->ignoreExpiredData = false;
qInfo("save stream task:%s, param for state: %d", GET_TASKID(pTaskInfo), pInfo->ignoreExpiredData);
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC) {
SStreamTimeSliceOperatorInfo* pInfo = pOperator->info;
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
pSup->calTriggerSaved = pSup->calTrigger;
pSup->deleteMarkSaved = pSup->deleteMark;
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;

View File

@ -616,11 +616,12 @@ int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* p
}
}
}
code = TSDB_CODE_SUCCESS;
code = blockDataCheck(pBlock);
QUERY_CHECK_CODE(code, lino, _err);
_err:
blockDataCheck(pBlock, true);
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
colDataDestroy(p);
taosMemoryFree(p);
return code;
@ -701,7 +702,7 @@ int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResu
QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno);
char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
for (int32_t k = 0; k < pRow->numOfRows; ++k) {
code = colDataSetVal(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
code = colDataSetValOrCover(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
QUERY_CHECK_CODE(code, lino, _end);
}
}

View File

@ -765,7 +765,7 @@ static FORCE_INLINE int32_t getBlkFromDownstreamOperator(struct SOperatorInfo* p
}
}
blockDataCheck(pBlock, false);
code = blockDataCheck(pBlock);
*ppRes = pBlock;
return code;

View File

@ -1508,6 +1508,7 @@ static int32_t doStreamHashPartitionNext(SOperatorInfo* pOperator, SSDataBlock**
case STREAM_CREATE_CHILD_TABLE:
case STREAM_RETRIEVE:
case STREAM_CHECKPOINT:
case STREAM_GET_RESULT:
case STREAM_GET_ALL: {
(*ppRes) = pBlock;
return code;

View File

@ -66,10 +66,13 @@ static int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock);
int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock) {
SOperatorInfo* pOperator = (SOperatorInfo*)param;
int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock);
blockDataCheck(*ppBlock, false);
if (code) {
qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code));
}
code = blockDataCheck(*ppBlock);
if (code) {
qError("failed to check data block got from upstream, %s code:%s", __func__, tstrerror(code));
}
return code;
}
@ -526,7 +529,8 @@ int32_t doMultiwayMerge(SOperatorInfo* pOperator, SSDataBlock** pResBlock) {
if ((*pResBlock) != NULL) {
pOperator->resultInfo.totalRows += (*pResBlock)->info.rows;
blockDataCheck(*pResBlock, false);
code = blockDataCheck(*pResBlock);
QUERY_CHECK_CODE(code, lino, _end);
} else {
setOperatorCompleted(pOperator);
}

View File

@ -631,6 +631,8 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
code = createStreamCountAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle, &pOptr);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT == type) {
code = createCountwindowOperatorInfo(ops[0], pPhyNode, pTaskInfo, &pOptr);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC == type) {
code = createStreamTimeSliceOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle, &pOptr);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY == type) {
code = createAnomalywindowOperatorInfo(ops[0], pPhyNode, pTaskInfo, &pOptr);
} else {
@ -871,14 +873,24 @@ int32_t setOperatorParams(struct SOperatorInfo* pOperator, SOperatorParam* pInpu
SSDataBlock* getNextBlockFromDownstream(struct SOperatorInfo* pOperator, int32_t idx) {
SSDataBlock* p = NULL;
int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p);
blockDataCheck(p, false);
if (code == TSDB_CODE_SUCCESS) {
code = blockDataCheck(p);
if (code != TSDB_CODE_SUCCESS) {
qError("blockDataCheck failed, code:%s", tstrerror(code));
}
}
return (code == 0) ? p : NULL;
}
SSDataBlock* getNextBlockFromDownstreamRemain(struct SOperatorInfo* pOperator, int32_t idx) {
SSDataBlock* p = NULL;
int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p);
blockDataCheck(p, false);
if (code == TSDB_CODE_SUCCESS) {
code = blockDataCheck(p);
if (code != TSDB_CODE_SUCCESS) {
qError("blockDataCheck failed, code:%s", tstrerror(code));
}
}
return (code == 0)? p:NULL;
}

View File

@ -25,6 +25,7 @@
#include "tdatablock.h"
#include "tmsg.h"
#include "ttime.h"
#include "operator.h"
#include "query.h"
@ -1461,6 +1462,18 @@ static void destroyTableScanOperatorInfo(void* param) {
taosMemoryFreeClear(param);
}
static void resetClolumnReserve(SSDataBlock* pBlock, int32_t dataRequireFlag) {
if (pBlock && dataRequireFlag == FUNC_DATA_REQUIRED_NOT_LOAD) {
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);
if (pCol) {
pCol->info.noData = true;
}
}
}
}
int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo,
SOperatorInfo** pOptrInfo) {
@ -1511,6 +1524,7 @@ int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHa
pInfo->base.readerAPI = pTaskInfo->storageAPI.tsdReader;
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
resetClolumnReserve(pInfo->pResBlock, pInfo->base.dataBlockLoadFlag);
QUERY_CHECK_NULL(pInfo->pResBlock, code, lino, _error, terrno);
code = prepareDataBlockBuf(pInfo->pResBlock, &pInfo->base.matchInfo);
@ -1636,11 +1650,19 @@ static bool isCountWindow(SStreamScanInfo* pInfo) {
return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT;
}
static bool isTimeSlice(SStreamScanInfo* pInfo) {
return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC;
}
static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
if (colDataIsNull_s(pColInfo, rowIndex)) {
pInfo->igCheckGroupId = true;
} else {
pInfo->groupId = groupCol[rowIndex];
}
}
void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint64_t ver) {
pTableScanInfo->base.cond.twindows = *pWin;
@ -1913,6 +1935,12 @@ static int32_t doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t ts
continue;
}
if (pInfo->igCheckGroupId == true) {
pResult->info.calWin = pInfo->updateWin;
(*ppRes) = pResult;
goto _end;
}
if (pInfo->partitionSup.needCalc) {
SSDataBlock* tmpBlock = NULL;
code = createOneDataBlock(pResult, true, &tmpBlock);
@ -1987,10 +2015,10 @@ int32_t appendOneRowToSpecialBlockImpl(SSDataBlock* pBlock, TSKEY* pStartTs, TSK
code = colDataSetVal(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pUidCol, pBlock->info.rows, (const char*)pUid, false);
code = colDataSetVal(pUidCol, pBlock->info.rows, (const char*)pUid, pUid == NULL);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pGpCol, pBlock->info.rows, (const char*)pGp, false);
code = colDataSetVal(pGpCol, pBlock->info.rows, (const char*)pGp, pGp == NULL);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pCalStartCol, pBlock->info.rows, (const char*)pCalStartTs, false);
@ -2068,6 +2096,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
if (pSrcBlock->info.rows == 0) {
return TSDB_CODE_SUCCESS;
}
SSHashObj* pScanRange = tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startData = (TSKEY*)pStartTsCol->pData;
@ -2102,6 +2131,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
uint64_t groupId = pSrcGp[i];
if (groupId == 0) {
@ -2128,6 +2158,14 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
qError("generate session scan range failed. rang start:%" PRIx64 ", end:%" PRIx64, startData[i], endData[i]);
continue;
}
SSessionKey checkKey = {.groupId = groupId, .win.skey = startWin.win.skey, .win.ekey = endWin.win.ekey};
if (tSimpleHashGet(pScanRange, &checkKey, sizeof(SSessionKey)) != NULL) {
continue;
}
code = tSimpleHashPut(pScanRange, &checkKey, sizeof(SSessionKey), NULL, 0);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pDestStartCol, i, (const char*)&startWin.win.skey, false);
QUERY_CHECK_CODE(code, lino, _end);
@ -2139,11 +2177,12 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
QUERY_CHECK_CODE(code, lino, _end);
colDataSetNULL(pDestCalStartTsCol, i);
colDataSetNULL(pDestCalEndTsCol, i);
colDataSetNULL(pDestTableNameInxCol, i);
pDestBlock->info.rows++;
}
_end:
tSimpleHashCleanup(pScanRange);
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
@ -2192,6 +2231,7 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
uint64_t groupId = pSrcGp[i];
if (groupId == 0) {
@ -2220,6 +2260,169 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB
code = colDataSetVal(pDestCalEndTsCol, i, (const char*)&range.win.ekey, false);
QUERY_CHECK_CODE(code, lino, _end);
colDataSetNULL(pDestTableNameInxCol, i);
pDestBlock->info.rows++;
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t setDelRangeEndKey(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, SWinKey* pEndKey, STimeWindow* pScanRange, bool* pRes) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SSlicePoint nextPoint = {.key.groupId = pEndKey->groupId};
int32_t vLen = 0;
int32_t winCode = TSDB_CODE_SUCCESS;
code = pAggSup->stateStore.streamStateFillGetNext(pAggSup->pState, pEndKey, &nextPoint.key, (void**)&nextPoint.pResPos, &vLen, &winCode);
QUERY_CHECK_CODE(code, lino, _end);
if (winCode == TSDB_CODE_SUCCESS) {
setPointBuff(&nextPoint, pFillSup);
if (HAS_ROW_DATA(nextPoint.pLeftRow) && pEndKey->ts < nextPoint.pLeftRow->key) {
pScanRange->ekey = nextPoint.pLeftRow->key;
*pRes = true;
} else if (pEndKey->ts < nextPoint.pRightRow->key) {
pScanRange->ekey = nextPoint.pRightRow->key;
*pRes = true;
} else {
*pEndKey = nextPoint.key;
pScanRange->ekey = TMAX(nextPoint.pRightRow->key, nextPoint.key.ts);
*pRes = false;
}
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
int32_t getTimeSliceWinRange(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, SInterval* pInterval, TSKEY start, TSKEY end,
int64_t groupId, STimeWindow* pScanRange) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t winCode = TSDB_CODE_SUCCESS;
SResultRowInfo dumyInfo = {0};
dumyInfo.cur.pageId = -1;
STimeWindow sWin = getActiveTimeWindow(NULL, &dumyInfo, start, pInterval, TSDB_ORDER_ASC);
SWinKey startKey = {.groupId = groupId, .ts = sWin.skey};
sWin = getActiveTimeWindow(NULL, &dumyInfo, end, pInterval, TSDB_ORDER_ASC);
SWinKey endKey = {.groupId = groupId, .ts = sWin.ekey};
SSlicePoint prevPoint = {.key.groupId = groupId};
SSlicePoint nextPoint = {.key.groupId = groupId};
int32_t vLen = 0;
code = pAggSup->stateStore.streamStateFillGetPrev(pAggSup->pState, &startKey, &prevPoint.key, (void**)&prevPoint.pResPos, &vLen, &winCode);
QUERY_CHECK_CODE(code, lino, _end);
if (winCode == TSDB_CODE_SUCCESS) {
setPointBuff(&prevPoint, pFillSup);
if (HAS_ROW_DATA(prevPoint.pRightRow)) {
pScanRange->skey = prevPoint.pRightRow->key;
} else {
pScanRange->skey = prevPoint.pLeftRow->key;
}
} else {
pScanRange->skey = startKey.ts;
}
bool res = false;
SWinKey curKey = endKey;
code = setDelRangeEndKey(pAggSup, pFillSup, &curKey, pScanRange, &res);
QUERY_CHECK_CODE(code, lino, _end);
if (res == false) {
code = setDelRangeEndKey(pAggSup, pFillSup, &curKey, pScanRange, &res);
QUERY_CHECK_CODE(code, lino, _end);
}
if (res == false) {
pScanRange->ekey = TMAX(endKey.ts, pScanRange->ekey);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t generateTimeSliceScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock,
EStreamType mode) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
blockDataCleanup(pDestBlock);
if (pSrcBlock->info.rows == 0) {
return TSDB_CODE_SUCCESS;
}
SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startData = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
uint64_t* uidCol = (uint64_t*)pUidCol->pData;
SColumnInfoData* pGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* pSrcGp = (uint64_t*)pGpCol->pData;
SColumnInfoData* pSrcPkCol = NULL;
if (taosArrayGetSize(pSrcBlock->pDataBlock) > PRIMARY_KEY_COLUMN_INDEX) {
pSrcPkCol = taosArrayGet(pSrcBlock->pDataBlock, PRIMARY_KEY_COLUMN_INDEX);
}
int64_t ver = pSrcBlock->info.version - 1;
if (pInfo->partitionSup.needCalc &&
(startData[0] != endData[0] || (hasPrimaryKeyCol(pInfo) && mode == STREAM_DELETE_DATA))) {
code = getPreVersionDataBlock(uidCol[0], startData[0], endData[0], ver, GET_TASKID(pTaskInfo), pInfo, pSrcBlock);
QUERY_CHECK_CODE(code, lino, _end);
startData = (TSKEY*)pStartTsCol->pData;
endData = (TSKEY*)pEndTsCol->pData;
uidCol = (uint64_t*)pUidCol->pData;
pSrcGp = (uint64_t*)pGpCol->pData;
}
code = blockDataEnsureCapacity(pDestBlock, pSrcBlock->info.rows);
QUERY_CHECK_CODE(code, lino, _end);
SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
uint64_t groupId = pSrcGp[i];
if (groupId == 0) {
void* pVal = NULL;
if (hasPrimaryKeyCol(pInfo) && pSrcPkCol) {
pVal = colDataGetData(pSrcPkCol, i);
}
groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], ver, pVal);
}
STimeWindow scanRange = {0};
code = getTimeSliceWinRange(pInfo->windowSup.pStreamAggSup, pInfo->pFillSup, &pInfo->interval, startData[i], endData[i], groupId,
&scanRange);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pDestStartCol, i, (const char*)&scanRange.skey, false);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pDestEndCol, i, (const char*)&scanRange.ekey, false);
QUERY_CHECK_CODE(code, lino, _end);
colDataSetNULL(pDestUidCol, i);
code = colDataSetVal(pDestGpCol, i, (const char*)&groupId, false);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pDestCalStartTsCol, i, (const char*)&scanRange.skey, false);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pDestCalEndTsCol, i, (const char*)&scanRange.ekey, false);
QUERY_CHECK_CODE(code, lino, _end);
pDestBlock->info.rows++;
}
@ -2238,6 +2441,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
if (pSrcBlock->info.rows == 0) {
return TSDB_CODE_SUCCESS;
}
SSHashObj* pScanRange = tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
SColumnInfoData* pSrcStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pSrcEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
@ -2274,6 +2478,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
for (int32_t i = 0; i < pSrcBlock->info.rows;) {
uint64_t srcUid = srcUidData[i];
uint64_t groupId = srcGp[i];
@ -2297,6 +2502,13 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
code = colDataSetVal(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
QUERY_CHECK_CODE(code, lino, _end);
SSessionKey checkKey = {.groupId = groupId, .win = win};
if (tSimpleHashGet(pScanRange, &checkKey, sizeof(SSessionKey)) != NULL) {
continue;
}
code = tSimpleHashPut(pScanRange, &checkKey, sizeof(SSessionKey), NULL, 0);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
QUERY_CHECK_CODE(code, lino, _end);
@ -2306,10 +2518,13 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
code = colDataSetVal(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
QUERY_CHECK_CODE(code, lino, _end);
colDataSetNULL(pDestTableNameInxCol, pDestBlock->info.rows);
pDestBlock->info.rows++;
}
_end:
tSimpleHashCleanup(pScanRange);
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
@ -2484,6 +2699,9 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock,
} else if (isCountWindow(pInfo)) {
code = generateCountScanRange(pInfo, pSrcBlock, pDestBlock, type);
QUERY_CHECK_CODE(code, lino, _end);
} else if (isTimeSlice(pInfo)) {
code = generateTimeSliceScanRange(pInfo, pSrcBlock, pDestBlock, type);
QUERY_CHECK_CODE(code, lino, _end);
} else {
code = generateDeleteResultBlock(pInfo, pSrcBlock, pDestBlock);
QUERY_CHECK_CODE(code, lino, _end);
@ -3061,6 +3279,7 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S
colDataSetNULL(taosArrayGet(pDst->pDataBlock, GROUPID_COLUMN_INDEX), j);
colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), j);
colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), j);
colDataSetNULL(taosArrayGet(pDst->pDataBlock, TABLE_NAME_COLUMN_INDEX), j);
j++;
}
}
@ -3137,21 +3356,60 @@ int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff, int32_t*
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t len = 0;
code = pInfo->stateStore.updateInfoSerialize(NULL, 0, pInfo->pUpdateInfo, &len);
QUERY_CHECK_CODE(code, lino, _end);
SEncoder* pEnCoder = NULL;
SEncoder* pScanEnCoder = NULL;
len += encodeSTimeWindowAggSupp(NULL, &pInfo->twAggSup);
SEncoder encoder = {0};
pEnCoder = &encoder;
tEncoderInit(pEnCoder, NULL, 0);
if (tStartEncode(pEnCoder) != 0) {
code = TSDB_CODE_STREAM_INTERNAL_ERROR;
QUERY_CHECK_CODE(code, lino, _end);
}
code = pInfo->stateStore.updateInfoSerialize(pEnCoder, pInfo->pUpdateInfo);
QUERY_CHECK_CODE(code, lino, _end);
if (tEncodeI64(pEnCoder, pInfo->lastScanRange.skey) < 0) {
code = TSDB_CODE_STREAM_INTERNAL_ERROR;
QUERY_CHECK_CODE(code, lino, _end);
}
if (tEncodeI64(pEnCoder, pInfo->lastScanRange.ekey) < 0) {
code = TSDB_CODE_STREAM_INTERNAL_ERROR;
QUERY_CHECK_CODE(code, lino, _end);
}
tEndEncode(pEnCoder);
len += encoder.pos;
tEncoderClear(pEnCoder);
pEnCoder = NULL;
*pBuff = taosMemoryCalloc(1, len);
if (!(*pBuff)) {
code = terrno;
QUERY_CHECK_CODE(code, lino, _end);
}
void* buf = *pBuff;
(void)encodeSTimeWindowAggSupp(&buf, &pInfo->twAggSup);
int32_t stwLen = encodeSTimeWindowAggSupp(&buf, &pInfo->twAggSup);
int32_t tmp = 0;
code = pInfo->stateStore.updateInfoSerialize(buf, len, pInfo->pUpdateInfo, &tmp);
SEncoder scanEncoder = {0};
pScanEnCoder = &scanEncoder;
tEncoderInit(pScanEnCoder, buf, len - stwLen);
if (tStartEncode(pScanEnCoder) != 0) {
code = TSDB_CODE_FAILED;
QUERY_CHECK_CODE(code, lino, _end);
}
code = pInfo->stateStore.updateInfoSerialize(pScanEnCoder, pInfo->pUpdateInfo);
QUERY_CHECK_CODE(code, lino, _end);
if (tEncodeI64(pScanEnCoder, pInfo->lastScanRange.skey) < 0) {
code = TSDB_CODE_STREAM_INTERNAL_ERROR;
QUERY_CHECK_CODE(code, lino, _end);
}
if (tEncodeI64(pScanEnCoder, pInfo->lastScanRange.ekey) < 0) {
code = TSDB_CODE_STREAM_INTERNAL_ERROR;
QUERY_CHECK_CODE(code, lino, _end);
}
*pLen = len;
@ -3159,6 +3417,14 @@ _end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (pEnCoder != NULL) {
tEndEncode(pEnCoder);
tEncoderClear(pEnCoder);
}
if (pScanEnCoder != NULL) {
tEndEncode(pScanEnCoder);
tEncoderClear(pScanEnCoder);
}
return code;
}
@ -3187,35 +3453,86 @@ _end:
// other properties are recovered from the execution plan
void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SDecoder* pDeCoder = NULL;
if (!pBuff || len == 0) {
return;
lino = __LINE__;
goto _end;
}
void* buf = pBuff;
buf = decodeSTimeWindowAggSupp(buf, &pInfo->twAggSup);
int32_t tlen = len - encodeSTimeWindowAggSupp(NULL, &pInfo->twAggSup);
if (tlen == 0) {
return;
lino = __LINE__;
goto _end;
}
void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
if (!pUpInfo) {
return;
lino = __LINE__;
goto _end;
}
int32_t code = pInfo->stateStore.updateInfoDeserialize(buf, tlen, pUpInfo);
SDecoder decoder = {0};
pDeCoder = &decoder;
tDecoderInit(pDeCoder, buf, tlen);
if (tStartDecode(pDeCoder) < 0) {
lino = __LINE__;
goto _end;
}
code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
if (code == TSDB_CODE_SUCCESS) {
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
pInfo->pUpdateInfo = pUpInfo;
} else {
taosMemoryFree(pUpInfo);
lino = __LINE__;
goto _end;
}
if (tDecodeIsEnd(pDeCoder)) {
lino = __LINE__;
goto _end;
}
SET_WIN_KEY_INVALID(pInfo->lastScanRange.skey);
SET_WIN_KEY_INVALID(pInfo->lastScanRange.ekey);
if (tDecodeI64(pDeCoder, &pInfo->lastScanRange.skey) < 0) {
lino = __LINE__;
goto _end;
}
if (tDecodeI64(pDeCoder, &pInfo->lastScanRange.ekey) < 0) {
lino = __LINE__;
goto _end;
}
_end:
if (pDeCoder != NULL) {
tEndDecode(pDeCoder);
tDecoderClear(pDeCoder);
}
qInfo("%s end at line %d", __func__, lino);
}
static bool hasScanRange(SStreamScanInfo* pInfo) {
SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
return pSup && pSup->pScanBlock->info.rows > 0 && (isStateWindow(pInfo) || isCountWindow(pInfo));
}
static bool isStreamWindow(SStreamScanInfo* pInfo) {
return isIntervalWindow(pInfo) || isSessionWindow(pInfo) || isStateWindow(pInfo) || isCountWindow(pInfo);
return isIntervalWindow(pInfo) || isSessionWindow(pInfo) || isStateWindow(pInfo) || isCountWindow(pInfo) ||
isTimeSlice(pInfo);
}
static int32_t copyGetResultBlock(SSDataBlock* dest, TSKEY start, TSKEY end) {
int32_t code = blockDataEnsureCapacity(dest, 1);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
return appendDataToSpecialBlock(dest, &start, &end, NULL, NULL, NULL);
}
static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
@ -3443,13 +3760,27 @@ FETCH_NEXT_BLOCK:
}
}
} break;
case STREAM_GET_RESULT: {
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
pInfo->updateResIndex = 0;
pInfo->lastScanRange = pBlock->info.window;
TSKEY endKey = taosTimeGetIntervalEnd(pBlock->info.window.skey, &pInfo->interval);
if (pInfo->useGetResultRange == true) {
endKey = pBlock->info.window.ekey;
}
code = copyGetResultBlock(pInfo->pUpdateRes, pBlock->info.window.skey, endKey);
QUERY_CHECK_CODE(code, lino, _end);
pInfo->pUpdateInfo->maxDataVersion = -1;
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
} break;
case STREAM_CHECKPOINT: {
qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK");
} break;
default:
break;
}
printDataBlock(pBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "block recv", GET_TASKID(pTaskInfo));
setStreamOperatorState(&pInfo->basic, pBlock->info.type);
(*ppRes) = pBlock;
return code;
@ -3657,6 +3988,9 @@ FETCH_NEXT_BLOCK:
// printDataBlock(pInfo->pCheckpointRes, "stream scan ck", GET_TASKID(pTaskInfo));
(*ppRes) = pInfo->pCheckpointRes;
return code;
} else {
qError("stream scan error, invalid block type %d, %s", pInfo->blockType, id);
code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
}
_end:
@ -3940,6 +4274,8 @@ void streamScanReleaseState(SOperatorInfo* pOperator) {
int32_t lino = 0;
SStreamScanInfo* pInfo = pOperator->info;
void* pBuff = NULL;
SEncoder* pEnCoder = NULL;
SEncoder* pScanEnCoder = NULL;
if (!pInfo->pState) {
return;
}
@ -3948,8 +4284,20 @@ void streamScanReleaseState(SOperatorInfo* pOperator) {
return;
}
int32_t len = 0;
code = pInfo->stateStore.updateInfoSerialize(NULL, 0, pInfo->pUpdateInfo, &len);
SEncoder encoder = {0};
pEnCoder = &encoder;
tEncoderInit(pEnCoder, NULL, 0);
if (tStartEncode(pEnCoder) != 0) {
code = TSDB_CODE_FAILED;
QUERY_CHECK_CODE(code, lino, _end);
}
code = pInfo->stateStore.updateInfoSerialize(pEnCoder, pInfo->pUpdateInfo);
QUERY_CHECK_CODE(code, lino, _end);
tEndEncode(pEnCoder);
len += encoder.pos;
tEncoderClear(pEnCoder);
pEnCoder = NULL;
pBuff = taosMemoryCalloc(1, len);
if (!pBuff) {
@ -3957,9 +4305,19 @@ void streamScanReleaseState(SOperatorInfo* pOperator) {
QUERY_CHECK_CODE(code, lino, _end);
}
int32_t tmp = 0;
code = pInfo->stateStore.updateInfoSerialize(pBuff, len, pInfo->pUpdateInfo, &tmp);
SEncoder scanEncoder = {0};
pScanEnCoder = &scanEncoder;
tEncoderInit(pScanEnCoder, pBuff, len);
if (tStartEncode(pScanEnCoder) != 0) {
code = TSDB_CODE_FAILED;
QUERY_CHECK_CODE(code, lino, _end);
}
code = pInfo->stateStore.updateInfoSerialize(pScanEnCoder, pInfo->pUpdateInfo);
QUERY_CHECK_CODE(code, lino, _end);
tEndEncode(pScanEnCoder);
tEncoderClear(pScanEnCoder);
pScanEnCoder = NULL;
pInfo->stateStore.streamStateSaveInfo(pInfo->pState, STREAM_SCAN_OP_STATE_NAME, strlen(STREAM_SCAN_OP_STATE_NAME),
pBuff, len);
@ -3967,12 +4325,21 @@ _end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (pEnCoder != NULL) {
tEndEncode(pEnCoder);
tEncoderClear(pEnCoder);
}
if (pScanEnCoder != NULL) {
tEndEncode(pScanEnCoder);
tEncoderClear(pScanEnCoder);
}
taosMemoryFree(pBuff);
}
void streamScanReloadState(SOperatorInfo* pOperator) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SDecoder* pDeCoder = NULL;
SStreamScanInfo* pInfo = pOperator->info;
if (!pInfo->pState) {
return;
@ -3993,7 +4360,14 @@ void streamScanReloadState(SOperatorInfo* pOperator) {
QUERY_CHECK_CODE(code, lino, _end);
}
int32_t winCode = pInfo->stateStore.updateInfoDeserialize(pBuff, len, pUpInfo);
SDecoder decoder = {0};
pDeCoder = &decoder;
tDecoderInit(pDeCoder, pBuff, len);
if (tStartDecode(pDeCoder) < 0) {
lino = __LINE__;
goto _end;
}
int32_t winCode = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
taosMemoryFree(pBuff);
if (winCode == TSDB_CODE_SUCCESS && pInfo->pUpdateInfo) {
if (pInfo->pUpdateInfo->minTS < 0) {
@ -4030,6 +4404,10 @@ void streamScanReloadState(SOperatorInfo* pOperator) {
}
_end:
if (pDeCoder != NULL) {
tEndDecode(pDeCoder);
tDecoderClear(pDeCoder);
}
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
@ -4220,6 +4598,7 @@ int32_t createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode*
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
pInfo->windowSup = (SWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->groupId = 0;
pInfo->igCheckGroupId = false;
pInfo->pStreamScanOp = pOperator;
pInfo->deleteDataIndex = 0;
code = createSpecialDataBlock(STREAM_DELETE_DATA, &pInfo->pDeleteDataRes);
@ -4245,10 +4624,14 @@ int32_t createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode*
pInfo->pState = pTaskInfo->streamInfo.pState;
pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
pInfo->readerFn = pTaskInfo->storageAPI.tqReaderFn;
pInfo->pFillSup = NULL;
pInfo->useGetResultRange = false;
code = createSpecialDataBlock(STREAM_CHECKPOINT, &pInfo->pCheckpointRes);
QUERY_CHECK_CODE(code, lino, _error);
SET_WIN_KEY_INVALID(pInfo->lastScanRange.skey);
SET_WIN_KEY_INVALID(pInfo->lastScanRange.ekey);
// for stream
if (pTaskInfo->streamInfo.pState) {
void* buff = NULL;

View File

@ -335,9 +335,13 @@ static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock,
int32_t loadNextDataBlock(void* param, SSDataBlock** ppBlock) {
SOperatorInfo* pOperator = (SOperatorInfo*)param;
int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock);
blockDataCheck(*ppBlock, false);
if (code) {
qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code));
} else {
code = blockDataCheck(*ppBlock);
if (code) {
qError("failed to check block data, %s code:%s", __func__, tstrerror(code));
}
}
return code;
}
@ -630,7 +634,8 @@ int32_t fetchNextGroupSortDataBlock(void* param, SSDataBlock** ppBlock) {
QUERY_CHECK_CODE(code, lino, _end);
if (block != NULL) {
blockDataCheck(block, false);
code = blockDataCheck(block);
QUERY_CHECK_CODE(code, lino, _end);
if (block->info.id.groupId == grpSortOpInfo->currGroupId) {
grpSortOpInfo->childOpStatus = CHILD_OP_SAME_GROUP;
*ppBlock = block;

View File

@ -818,8 +818,8 @@ _end:
}
}
int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SOperatorInfo** pOptrInfo) {
int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo,
SReadHandle* pHandle, SOperatorInfo** pOptrInfo) {
QRY_PARAM_CHECK(pOptrInfo);
SCountWinodwPhysiNode* pCountNode = (SCountWinodwPhysiNode*)pPhyNode;
@ -869,7 +869,8 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
pInfo->primaryTsIndex = ((SColumnNode*)pCountNode->window.pTspk)->slotId;
code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState,
sizeof(COUNT_TYPE), 0, &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup,
GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex);
GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex,
STREAM_STATE_BUFF_SORT, 1);
QUERY_CHECK_CODE(code, lino, _error);
pInfo->streamAggSup.windowCount = pCountNode->windowCount;
@ -903,7 +904,7 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
pInfo->recvGetAll = false;
pInfo->pPkDeleted = tSimpleHashInit(64, hashFn);
QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno);
pInfo->destHasPrimaryKey = pCountNode->window.destHasPrimayKey;
pInfo->destHasPrimaryKey = pCountNode->window.destHasPrimaryKey;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT;
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, true,

View File

@ -525,8 +525,8 @@ int32_t doStreamEventDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera
QUERY_CHECK_CODE(code, lino, _end);
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
code =
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &winfo.sessionWin, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
code = tSimpleHashPut(pInfo->streamAggSup.pResultRows, &winfo.sessionWin, sizeof(SSessionKey), &winfo,
sizeof(SResultWindowInfo));
QUERY_CHECK_CODE(code, lino, _end);
}
@ -917,7 +917,8 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
pInfo->primaryTsIndex = tsSlotId;
code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState,
sizeof(bool) + sizeof(bool), 0, &pTaskInfo->storageAPI.stateStore, pHandle,
&pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex);
&pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex,
STREAM_STATE_BUFF_SORT, 1);
QUERY_CHECK_CODE(code, lino, _error);
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
@ -955,7 +956,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
pInfo->recvGetAll = false;
pInfo->pPkDeleted = tSimpleHashInit(64, hashFn);
QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno);
pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimayKey;
pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey;
pInfo->pOperator = pOperator;
setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED,

View File

@ -28,3 +28,8 @@ bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo) {
void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) {
pBasicInfo->updateOperatorInfo = false;
}
void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
pBasicInfo->primaryPkIndex = -1;
pBasicInfo->updateOperatorInfo = false;
}

Some files were not shown because too many files have changed in this diff Show More