Merge branch '3.0' into enh/TS-5574-3.0
This commit is contained in:
commit
0823c60623
|
@ -261,11 +261,19 @@ if(${TD_DARWIN})
|
|||
endif(${TD_DARWIN})
|
||||
|
||||
add_subdirectory(zlib EXCLUDE_FROM_ALL)
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(zlibstatic PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
target_include_directories(
|
||||
zlibstatic
|
||||
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib
|
||||
)
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(zlib PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
target_include_directories(
|
||||
zlib
|
||||
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib
|
||||
|
|
|
@ -7,8 +7,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
|
|||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
```
|
||||
|
||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
||||
|
||||
|
||||
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.
|
||||
|
|
|
@ -23,17 +23,18 @@ docker pull tdengine/tdengine:3.3.3.0
|
|||
然后只需执行下面的命令:
|
||||
|
||||
```shell
|
||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine
|
||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine
|
||||
```
|
||||
|
||||
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
|
||||
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043 为 taosKeeper 使用端口。6044-6049 TCP 端口为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
|
||||
6044 和 6045 UDP 端口为 statsd 和 collectd 格式写入接口,可根据需要选择是否打开。6060 为 taosExplorer 使用端口。具体端口使用情况请参考[网络端口要求](../../operation/planning#网络端口要求)。
|
||||
|
||||
如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令:
|
||||
|
||||
```shell
|
||||
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
|
||||
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||
-p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine
|
||||
-p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine
|
||||
```
|
||||
|
||||
:::note
|
||||
|
|
|
@ -106,7 +106,7 @@ CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16;
|
|||
```
|
||||
该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下:
|
||||
- `PRECISION 'ms'` :这个数据库的时序数据使用毫秒(ms)精度的时间戳
|
||||
- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除
|
||||
- `KEEP 3650`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除
|
||||
- `DURATION 10` :每 10 天的数据放在一个数据文件中
|
||||
- `BUFFER 16` :写入使用大小为 16MB 的内存池。
|
||||
|
||||
|
@ -214,4 +214,4 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型
|
|||
|
||||
尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。
|
||||
|
||||
总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。
|
||||
总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。
|
||||
|
|
|
@ -7,12 +7,12 @@ sidebar_label: "ARIMA"
|
|||
|
||||
## 功能概述
|
||||
|
||||
ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
|
||||
ARIMA模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA模型要求时序数据是**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
|
||||
ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
|
||||
ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
|
||||
|
||||
>平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
|
||||
|
||||
以下参数可以动态输入控制预测过程中生成 合适的 ARIMA 的模型。
|
||||
以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。
|
||||
|
||||
- p= 自回归模型阶数
|
||||
- d= 差分阶数
|
||||
|
@ -21,30 +21,30 @@ ARIMA模型是一种自回归模型,只需要自变量即可预测后续的值
|
|||
|
||||
### 参数
|
||||
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|
||||
|参数名称|说明|必填项|
|
||||
|---|---|---|
|
||||
|period|输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0, 将使用非季节性/周期性的 ARIMA 模型预测。|选填|
|
||||
|start_p| 自回归模型阶数的 起始值,0 开始的整数,不推荐大于 10 |选填|
|
||||
|max_p| 自回归模型阶数的 结束值,0 开始的整数,不推荐大于 10 |选填|
|
||||
|start_q| 移动平均模型阶数的起始值, 0 开始的整数,不推荐大于 10 |选填|
|
||||
|max_q| 移动平均模型阶数的结束值, 0 开始的整数,不推荐大于 10 |选填|
|
||||
|d| 差分阶数|选填|
|
||||
|参数|说明|必填项|
|
||||
|---|---|-----|
|
||||
|period|输入时间序列每个周期包含的数据点个数,如果不设置该参数或该参数设置为 0,将使用非季节性/周期性的 ARIMA 模型预测|选填|
|
||||
|start_p|自回归模型阶数的起始值,0 开始的整数,不推荐大于 10|选填|
|
||||
|max_p|自回归模型阶数的结束值,0 开始的整数,不推荐大于 10|选填|
|
||||
|start_q|移动平均模型阶数的起始值,0 开始的整数,不推荐大于 10|选填|
|
||||
|max_q|移动平均模型阶数的结束值,0 开始的整数,不推荐大于 10|选填|
|
||||
|d|差分阶数|选填|
|
||||
|
||||
`start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。
|
||||
|
||||
### 示例及结果
|
||||
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,start_p 起始是 1, 最大拟合是 5,start_q是1,最大值是5,预测结果中返回 95% 置信区间范围边界。
|
||||
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,start_p 起始是 1, 最大拟合是 5,start_q 是 1,最大值是 5,预测结果中返回 95% 置信区间范围边界。
|
||||
```
|
||||
FORECAST(i32, "algo=arima,alpha=95,period=10, start_p=1, max_p=5, start_q=1, max_q=5")
|
||||
FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5")
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"rows": fc_rows, // 预测结果的行数
|
||||
"rows": fc_rows, // 返回结果的行数
|
||||
"period": period, // 返回结果的周期性,同输入
|
||||
"alpha": alpha, // 返回结果的置信区间,同输入
|
||||
"algo": "arima", // 返回结果使用的算法
|
||||
"mse":mse, // 拟合输入时序数据时候生成模型的最小均方误差(MSE)
|
||||
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
|
||||
"res": res // 列模式的结果
|
||||
}
|
||||
```
|
||||
|
|
|
@ -6,19 +6,19 @@ sidebar_label: "HoltWinters"
|
|||
本节讲述 HoltWinters 算法模型的使用方法。
|
||||
|
||||
## 功能概述
|
||||
HoltWinters模型又称为多次指数平滑模型(EMA)。对含有线性趋势和周期波动的非平稳序列适用,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。
|
||||
HoltWinters有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。
|
||||
该模型对于返回数据也不提供计算的置信区间范围结果。在 95% 置信区间的上下界结果与预测结果相同。
|
||||
HoltWinters 模型又称为多次指数平滑模型(EMA)。适用于含有线性趋势和周期波动的非平稳序列,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。
|
||||
HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。
|
||||
该模型对于返回数据不提供计算的置信区间范围结果,在 95% 置信区间的上下界结果与预测结果相同。
|
||||
|
||||
|
||||
### 参数
|
||||
|
||||
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|
||||
|参数名称|说明|必填项|
|
||||
分析平台中使用自动化的 HoltWinters 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|
||||
|参数|说明|必填项|
|
||||
|---|---|---|
|
||||
|period| 输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0, 将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填|
|
||||
|trend| 趋势模型使用加法模型还是乘法模型|选填|
|
||||
|seasonal| 季节性采用加法模型还是乘法模型|选填|
|
||||
|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0,将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填|
|
||||
|trend|趋势模型使用加法模型还是乘法模型|选填|
|
||||
|seasonal|季节性采用加法模型还是乘法模型|选填|
|
||||
|
||||
参数 `trend` 和 `seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。
|
||||
|
||||
|
@ -30,11 +30,11 @@ FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul")
|
|||
|
||||
```json5
|
||||
{
|
||||
"rows": rows, // 结果的行数
|
||||
"period": period, // 返回结果的周期性, 该结果与输入的周期性相同,如果没有周期性,该值为 0
|
||||
"algo": 'holtwinters' // 返回结果使用的计算模型
|
||||
"mse":mse, // 最小均方误差(minmum square error)
|
||||
"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了 两列[timestamp][fc_results]。
|
||||
"rows": rows, // 返回结果的行数
|
||||
"period": period, // 返回结果的周期性,该结果与输入的周期性相同,如果没有周期性,该值为 0
|
||||
"algo": 'holtwinters' // 返回结果使用的计算模型
|
||||
"mse": mse, // 最小均方误差(minmum square error)
|
||||
"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了两列 [timestamp][fc_results]。
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: "Anomaly-detection"
|
|||
sidebar_label: "Anomaly-detection"
|
||||
---
|
||||
|
||||
本节讲述 异常检测 算法模型的使用方法。
|
||||
本节讲述异常检测算法模型的使用方法。
|
||||
|
||||
## 概述
|
||||
分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。
|
||||
|
@ -11,26 +11,26 @@ sidebar_label: "Anomaly-detection"
|
|||
|
||||
### 统计学异常检测方法
|
||||
|
||||
- k-sigma<sup>[1]</sup>: 即 ***68–95–99.7 rule*** 。***k***值默认为3, 即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值K倍标准差,则该点被视为异常点.
|
||||
- k-sigma<sup>[1]</sup>: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点.
|
||||
|
||||
|参数名称|说明|是否必选|默认值|
|
||||
|参数|说明|是否必选|默认值|
|
||||
|---|---|---|---|
|
||||
|k|标准差倍数|选填|3|
|
||||
|
||||
|
||||
- IQR<sup>[2]</sup>:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5 。无输入参数。
|
||||
- IQR<sup>[2]</sup>:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5。无输入参数。
|
||||
|
||||
- Grubbs<sup>[3]</sup>: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
|
||||
|
||||
- SHESD<sup>[4]</sup>: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多49.9%。数据集的异常比例一般不超过5%
|
||||
- SHESD<sup>[4]</sup>: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5%
|
||||
|
||||
|参数名称|说明|是否必选|默认值|
|
||||
|参数|说明|是否必选|默认值|
|
||||
|---|---|---|---|
|
||||
|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
|
||||
|
||||
|
||||
### 基于数据密度的检测方法
|
||||
LOF<sup>[5]</sup>: 局部离群因子(LOF,又叫局部异常因子)算法是Breunig于2000年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的top(n)个点。
|
||||
LOF<sup>[5]</sup>: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。
|
||||
|
||||
|
||||
### 基于深度学习的检测方法
|
||||
|
|
|
@ -3,7 +3,7 @@ title: "addins"
|
|||
sidebar_label: "addins"
|
||||
---
|
||||
|
||||
本节说明如何将自己开发的新预测算法和异常检测算法整合到 TDengine 分析平台, 并能够通过 SQL 语句进行调用。
|
||||
本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。
|
||||
|
||||
## 目录结构
|
||||
|
||||
|
@ -11,49 +11,51 @@ sidebar_label: "addins"
|
|||
|
||||
|目录|说明|
|
||||
|---|---|
|
||||
|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc, 单元测试和集成测试目录 test。 algo目录下 ad 放置异常检测算法代码, fc 放置预测算法代码|
|
||||
|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码,fc 放置预测算法代码|
|
||||
|script|是安装脚本和发布脚本放置目录|
|
||||
|model|放置针对数据集完成的训练模型|
|
||||
|cfg| 配置文件目录|
|
||||
|cfg|配置文件目录|
|
||||
|
||||
## 约定与限制
|
||||
|
||||
定义异常检测算法的 Python 代码文件 需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
|
||||
定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
|
||||
|
||||
|
||||
### 类命名规范
|
||||
|
||||
算法类的名称需要 以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。
|
||||
算法类的名称需要以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。
|
||||
|
||||
### 类继承约定
|
||||
|
||||
异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`.
|
||||
预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`。
|
||||
- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
|
||||
- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
|
||||
|
||||
### 类属性初始化
|
||||
每个算法实现的类需要静态初始化两个类属性,分别是
|
||||
每个算法实现的类需要静态初始化两个类属性,分别是:
|
||||
|
||||
`name`: 的触发调用关键词,全小写英文字母。
|
||||
`desc`:该算法的描述信息。
|
||||
- `name`:触发调用的关键词,全小写英文字母
|
||||
- `desc`:算法的描述信息
|
||||
|
||||
### 核心方法输入与输出约定
|
||||
|
||||
`execute` 是算法处理的核心方法。调用该方法的时候, `self.list` 已经设置好输入数组。
|
||||
`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。
|
||||
|
||||
异常检测输出结果
|
||||
|
||||
`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
|
||||
`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
|
||||
|
||||
预测输出结果
|
||||
|
||||
对于预测算法, `AbstractForecastService` 的对象属性说明如下:
|
||||
对于预测算法,`AbstractForecastService` 的对象属性说明如下:
|
||||
|
||||
|属性名称|说明|默认值|
|
||||
|---|---|---|
|
||||
|period|输入时序数据的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可。| 0|
|
||||
|start_ts|预测数据的开始时间| 0|
|
||||
|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
|
||||
|start_ts|预测结果的开始时间| 0|
|
||||
|time_step|预测结果的两个数据点之间时间间隔|0 |
|
||||
|fc_rows|预测结果数量| 0 |
|
||||
|return_conf|返回结果中是否包含执行区间范围,如果算法计算结果不包含置信区间,那么上界和下界与自身相同| 1|
|
||||
|conf|执行区间分位数 0.05|
|
||||
|fc_rows|预测结果的数量| 0 |
|
||||
|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
|
||||
|conf|置信区间分位数 0.05|
|
||||
|
||||
|
||||
预测返回结果如下:
|
||||
|
@ -76,7 +78,7 @@ from service import AbstractAnomalyDetectionService
|
|||
|
||||
# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。
|
||||
class _IqrService(AbstractAnomalyDetectionService):
|
||||
""" IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService类的抽象函数 """
|
||||
""" IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """
|
||||
|
||||
# 定义算法调用关键词,全小写ASCII码(必须添加)
|
||||
name = 'iqr'
|
||||
|
@ -111,6 +113,7 @@ class _IqrService(AbstractAnomalyDetectionService):
|
|||
## 单元测试
|
||||
|
||||
在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
|
||||
|
||||
```python
|
||||
def test_iqr(self):
|
||||
""" 测试 _IqrService 类 """
|
||||
|
@ -137,13 +140,13 @@ def test_iqr(self):
|
|||
|
||||
## 需要模型的算法
|
||||
|
||||
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立了, autoencoder的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
|
||||
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
|
||||
|
||||
训练完成后的模型,使用 joblib 进行保存。
|
||||
训练完成后的模型,使用 joblib 进行保存。
|
||||
|
||||
并在 model 目录下建立对应的文件夹存放该模型。
|
||||
|
||||
保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 {"model": "ad_encoder_keras"} 的方式,可以调用该模型进行计算。
|
||||
保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。
|
||||
|
||||
具体的调用方式如下:
|
||||
|
||||
|
|
|
@ -5,18 +5,18 @@ title: 数据分析功能
|
|||
|
||||
## 概述
|
||||
|
||||
TDengine 通过 ANode(AnalysisNode) 是提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,从而拓展 TDengine 的功能,支持时间序列高级分析功能。
|
||||
ANode 是无状态的数据分析节点,集群中可以存在多个 ANode节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
|
||||
ANode(Analysis Node)是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。
|
||||
ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
|
||||
下图是数据分析的技术架构示意图。
|
||||
|
||||

|
||||
|
||||
## 安装部署
|
||||
### 环境准备
|
||||
ANode 的要求节点上准备有 Python 3.10 及以上版本以及相应的Python包自动安装组件 Pip ,同时请确保能够正常连接互联网。
|
||||
ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip,同时请确保能够正常连接互联网。
|
||||
|
||||
### 安装及卸载
|
||||
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengien 的安装流程一致。
|
||||
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。
|
||||
|
||||
```bash
|
||||
tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
|
||||
|
@ -30,7 +30,7 @@ sudo ./install.sh
|
|||
为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。
|
||||
|
||||
## 启动及停止服务
|
||||
安装 ANode 以后,可以使用`systemctl`来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
|
||||
安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
|
||||
|
||||
```bash
|
||||
systemctl start taosanoded
|
||||
|
@ -50,27 +50,27 @@ systemctl status taosanoded
|
|||
|
||||
### 配置说明
|
||||
|
||||
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 的配置和 uWSGI 的配置在同一个配置文件中,具体如下:
|
||||
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下:
|
||||
|
||||
```ini
|
||||
[uwsgi]
|
||||
# charset
|
||||
env=LC_ALL=en_US.UTF-8
|
||||
env = LC_ALL = en_US.UTF-8
|
||||
|
||||
# ip:port
|
||||
http = 127.0.0.1:6050
|
||||
|
||||
# the local unix socket file than communicate to Nginx
|
||||
#socket = 127.0.0.1:8001
|
||||
#socket-timeout=10
|
||||
#socket-timeout = 10
|
||||
|
||||
# base directory
|
||||
chdir = /usr/local/taos/taosanode/lib
|
||||
|
||||
# initialize python file
|
||||
wsgi-file = /usr/local/taos/taosanode/lib/app.py
|
||||
wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
|
||||
|
||||
# invoke app model
|
||||
# call module of uWSGI
|
||||
callable = app
|
||||
|
||||
# auto remove unix Socket and pid file when stopping
|
||||
|
@ -80,10 +80,10 @@ vacuum = true
|
|||
#chmod-socket = 664
|
||||
|
||||
# uWSGI pid
|
||||
uid=root
|
||||
uid = root
|
||||
|
||||
# uWSGI gid
|
||||
gid=root
|
||||
gid = root
|
||||
|
||||
# main process
|
||||
master = true
|
||||
|
@ -92,25 +92,27 @@ master = true
|
|||
processes = 2
|
||||
|
||||
# pid file
|
||||
pidfile = /usr/local/taos/taosanode/uwsgi.pid
|
||||
pidfile = /usr/local/taos/taosanode/taosanode.pid
|
||||
|
||||
# enable threads
|
||||
enable-threads=true
|
||||
enable-threads = true
|
||||
|
||||
# the number of threads for each process
|
||||
threads=2
|
||||
threads = 4
|
||||
|
||||
# memory useage report
|
||||
memory-report = true
|
||||
|
||||
# smooth restart
|
||||
reload-mercy = 10
|
||||
|
||||
# conflict with systemctl, so do NOT uncomment this
|
||||
# daemonize = /var/log/taos/taosanode/taosanode.log
|
||||
|
||||
# set log
|
||||
# log directory
|
||||
logto = /var/log/taos/taosanode/taosanode.log
|
||||
|
||||
# monitor server
|
||||
# wWSGI monitor port
|
||||
stats = 127.0.0.1:8387
|
||||
|
||||
# python virtual environment directory
|
||||
|
@ -121,10 +123,13 @@ virtualenv = /usr/local/taos/taosanode/venv/
|
|||
app-log = /var/log/taos/taosanode/taosanode.app.log
|
||||
|
||||
# model storage directory
|
||||
model-dir=/usr/local/taos/taosanode/model/
|
||||
model-dir = /usr/local/taos/taosanode/model/
|
||||
|
||||
# default log level
|
||||
log-level = DEBUG
|
||||
|
||||
# draw the query results
|
||||
draw-result = 0
|
||||
```
|
||||
|
||||
**提示**
|
||||
|
@ -137,7 +142,7 @@ log-level = DEBUG
|
|||
```sql
|
||||
CREATE ANODE {node_url}
|
||||
```
|
||||
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
|
||||
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
|
||||
|
||||
#### 查看 ANode
|
||||
列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。
|
||||
|
@ -151,7 +156,7 @@ SHOW ANODES;
|
|||
SHOW ANODES FULL;
|
||||
```
|
||||
|
||||
#### 强制刷新 TDengine 集群中分析算法缓存
|
||||
#### 强制刷新集群中的分析算法缓存
|
||||
```SQL
|
||||
UPDATE ANODE {node_id}
|
||||
UPDATE ALL ANODES
|
||||
|
@ -167,24 +172,23 @@ DROP ANODE {anode_id}
|
|||
|
||||
#### 白噪声检查
|
||||
|
||||
平台提供 Restful的服务检测输入时间序列是否是白噪声时间序列(White Noise Data, WND),白噪声时间序列及随机数序列。
|
||||
此外,分析平台要求输入的数据不能是 , 因此针对的所有数据均默认进行 白噪声检查。当前白噪声检查采用通行的 `Ljung-Box`检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
|
||||
分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列(White Noise Data, WND)和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
|
||||
如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
|
||||
同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
|
||||
|
||||
|
||||
#### 数据重采样和时间戳对齐
|
||||
|
||||
数据分析平台支持将输入的数据进行重采样的预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
|
||||
分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
|
||||
|
||||
- 数据时间戳对齐。由于真实数据时间可能并非严格按照查询指定的时间戳输入。此时数据平台将自动将数据的时间间隔按照指定的时间间隔进行对齐。例如有输入时间序列:[11, 22, 29, 41],用户指定时间间隔为 10,那么该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
|
||||
- 数据时间重采样。用户输入的时间序列其采样频率超过了指定的查询需要获得结果的时间间隔,例如输入原始数据是 5, 但是输出结果的频率是 10. [0, 5, 10, 15, 20, 25, 30],那么该输入数据列将重采用为间隔 为 10 的输入序列,其结果如下 [0, 10, 20,30]。[5, 15, 25] 处的数据将被丢弃。
|
||||
- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
|
||||
- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。
|
||||
|
||||
需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据[11, 22, 29, 49],并且用户要求的时间间隔为 10, 重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
|
||||
需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
|
||||
|
||||
|
||||
#### 时序数据异常检测
|
||||
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
|
||||
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
|
||||
异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
|
||||
对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
|
||||
|
||||
|
@ -201,18 +205,18 @@ algo=expr1
|
|||
"}
|
||||
```
|
||||
|
||||
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列输入,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
|
||||
2. `options`:字符串。其中使用 K/V 调用异常检测的算法,及与算法相关的参数。采用 逗号分隔的K/V字符串表示,其中的字符串不需要使用单引号、双引号、或转意号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma, k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
|
||||
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
|
||||
2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
|
||||
3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
|
||||
4. 输入数据默认进行白噪声检查,如果检查结果是输入数据是白噪声,将不会有任何(异常)窗口信息返回。
|
||||
4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
|
||||
|
||||
**参数说明**
|
||||
|参数|含义|默认值|
|
||||
|---|---|---|
|
||||
|algo|异常检测调用的算法|iqr|
|
||||
|wncheck|对输入数据列是否进行白噪声检查|取值为0或者1,默认值为 1,表示进行白噪声检查|
|
||||
|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查|
|
||||
|
||||
异常检测的返回结果以窗口的形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
|
||||
异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
|
||||
1. `_WSTART`: 异常窗口开始时间戳
|
||||
2. `_WEND`:异常窗口结束时间戳
|
||||
3. `_WDURATION`:异常窗口持续时间
|
||||
|
@ -232,9 +236,9 @@ ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
|
|||
|
||||
```
|
||||
taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
|
||||
_wstart | _wend | count(*) |
|
||||
============================================================================
|
||||
2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
|
||||
_wstart | _wend | count(*) |
|
||||
====================================================================
|
||||
2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
|
||||
Query OK, 1 row(s) in set (0.028946s)
|
||||
```
|
||||
|
||||
|
@ -267,7 +271,7 @@ algo=expr1
|
|||
|
||||
```
|
||||
1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。
|
||||
2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持`conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
|
||||
2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
|
||||
|
||||
**参数说明**
|
||||
|
||||
|
@ -275,13 +279,13 @@ algo=expr1
|
|||
|---|---|---|
|
||||
|algo|预测分析使用的算法|holtwinters|
|
||||
|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
|
||||
|conf|预测数据的置信区间范围 ,取值范围[0, 100]|95|
|
||||
|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
|
||||
|every|预测数据的采样间隔|输入数据的采样间隔|
|
||||
|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
|
||||
|rows|预测结果的记录数|10|
|
||||
|
||||
1. 预测查询结果新增了三个伪列,具体如下: `_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
|
||||
2. 更改参数 `START`:返回预测结果的起始时间,改变这个起始时间不会影响返回的预测数值,只影响起始时间。
|
||||
1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
|
||||
2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
|
||||
3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
|
||||
4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
|
||||
|
||||
|
@ -292,12 +296,12 @@ algo=expr1
|
|||
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
|
||||
FROM ai.ftb;
|
||||
|
||||
--- 使用 arima 算法进行预测,输入数据的是周期数据,每10个采样点是一个周期。返回置信区间是 95%.
|
||||
--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%.
|
||||
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
|
||||
FROM ai.ftb;
|
||||
```
|
||||
```
|
||||
taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
|
||||
taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
|
||||
_flow | _fhigh | _frowts | forecast(i32) |
|
||||
========================================================================================
|
||||
10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
|
||||
|
|
|
@ -146,11 +146,17 @@ TDengine 的多级存储功能在使用上还具备以下优点。
|
|||
|
||||
下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。
|
||||
|
||||
| 接口或组件 | 端口 |
|
||||
| :---------------: | :--------: |
|
||||
| 原生接口(taosc) | 6030 |
|
||||
| RESTful 接口 | 6041 |
|
||||
| WebSocket 接口 | 6041 |
|
||||
| taosKeeper | 6043 |
|
||||
| taosX | 6050, 6055 |
|
||||
| taosExplorer | 6060 |
|
||||
| 接口或组件名称 | 端口 | 协议 |
|
||||
|:-----------------------------------------:|:----------:|:--------:|
|
||||
| 原生接口(taosc) | 6030 | TCP |
|
||||
| RESTful 接口 | 6041 | TCP |
|
||||
| WebSocket 接口 | 6041 | TCP |
|
||||
| taosKeeper | 6043 | TCP |
|
||||
| statsd 格式写入接口 | 6044 | TCP/UDP |
|
||||
| collectd 格式写入接口 | 6045 | TCP/UDP |
|
||||
| openTSDB Telnet 格式写入接口 | 6046 | TCP |
|
||||
| collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP |
|
||||
| icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP |
|
||||
| tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP |
|
||||
| taosX | 6050, 6055 | TCP |
|
||||
| taosExplorer | 6060 | TCP |
|
||||
|
|
|
@ -163,3 +163,15 @@ s3BucketName td-test
|
|||
- 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价
|
||||
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
|
||||
- 最大支持的 S3 服务配置数为 10
|
||||
|
||||
### 不依赖 Flexify 服务
|
||||
|
||||
用户界面同 S3,不同的地方在于下面三个参数的配置:
|
||||
|
||||
| # | 参数 | 示例值 | 描述 |
|
||||
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
|
||||
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
|
||||
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
|
||||
| 3 | s3BucketName | test-container | Container name |
|
||||
|
||||
其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。
|
||||
|
|
|
@ -156,8 +156,8 @@ charset 的有效值是 UTF-8。
|
|||
### 内存相关
|
||||
| 参数名称 | 参数说明 |
|
||||
| :----------------: | :---------------------------------------------: |
|
||||
| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10 |
|
||||
| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||
| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10 |
|
||||
| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||
|
||||
### 性能调优
|
||||
|
||||
|
|
|
@ -42,12 +42,12 @@ table_option: {
|
|||
**使用说明**
|
||||
|
||||
1. 表(列)名命名规则参见[名称命名规则](./19-limit.md#名称命名规则)。
|
||||
1. 表名最大长度为 192。
|
||||
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。
|
||||
1. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(varchar)。
|
||||
1. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
1. 使用数据类型 BINARY/NCHAR/GEOMETRY,需指定其最长的字节数,如 BINARY(20),表示 20 字节。
|
||||
1. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress)
|
||||
2. 表名最大长度为 192。
|
||||
3. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。
|
||||
4. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(VARCHAR)。
|
||||
5. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 VARCHAR/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
6. 使用数据类型 VARCHAR/NCHAR/GEOMETRY,需指定其最长的字节数,如 VARCHAR(20),表示 20 字节。
|
||||
7. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress)
|
||||
|
||||
**参数说明**
|
||||
|
||||
|
|
|
@ -81,6 +81,13 @@ typedef enum {
|
|||
TSDB_SML_TIMESTAMP_NANO_SECONDS,
|
||||
} TSDB_SML_TIMESTAMP_TYPE;
|
||||
|
||||
typedef enum TAOS_FIELD_T {
|
||||
TAOS_FIELD_COL = 1,
|
||||
TAOS_FIELD_TAG,
|
||||
TAOS_FIELD_QUERY,
|
||||
TAOS_FIELD_TBNAME,
|
||||
} TAOS_FIELD_T;
|
||||
|
||||
typedef struct taosField {
|
||||
char name[65];
|
||||
int8_t type;
|
||||
|
@ -95,6 +102,15 @@ typedef struct TAOS_FIELD_E {
|
|||
int32_t bytes;
|
||||
} TAOS_FIELD_E;
|
||||
|
||||
typedef struct TAOS_FIELD_STB {
|
||||
char name[65];
|
||||
int8_t type;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
int32_t bytes;
|
||||
TAOS_FIELD_T field_type;
|
||||
} TAOS_FIELD_STB;
|
||||
|
||||
#ifdef WINDOWS
|
||||
#define DLL_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
|
@ -195,13 +211,6 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt);
|
|||
|
||||
typedef void TAOS_STMT2;
|
||||
|
||||
typedef enum TAOS_FIELD_T {
|
||||
TAOS_FIELD_COL = 1,
|
||||
TAOS_FIELD_TAG,
|
||||
TAOS_FIELD_QUERY,
|
||||
TAOS_FIELD_TBNAME,
|
||||
} TAOS_FIELD_T;
|
||||
|
||||
typedef struct TAOS_STMT2_OPTION {
|
||||
int64_t reqid;
|
||||
bool singleStbInsert;
|
||||
|
@ -232,7 +241,9 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows);
|
|||
DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt);
|
||||
DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert);
|
||||
DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields);
|
||||
DLL_EXPORT int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields);
|
||||
DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields);
|
||||
DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields);
|
||||
DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt);
|
||||
DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt);
|
||||
|
||||
|
@ -251,17 +262,17 @@ DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res);
|
|||
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
|
||||
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
DLL_EXPORT void taos_stop_query(TAOS_RES *res);
|
||||
DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col);
|
||||
DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows);
|
||||
DLL_EXPORT bool taos_is_update_query(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
|
||||
DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows);
|
||||
DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData);
|
||||
DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex);
|
||||
DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
|
||||
DLL_EXPORT void taos_reset_current_db(TAOS *taos);
|
||||
DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
DLL_EXPORT void taos_stop_query(TAOS_RES *res);
|
||||
DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col);
|
||||
DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows);
|
||||
DLL_EXPORT bool taos_is_update_query(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
|
||||
DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows);
|
||||
DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData);
|
||||
DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex);
|
||||
DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
|
||||
DLL_EXPORT void taos_reset_current_db(TAOS *taos);
|
||||
|
||||
DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res);
|
||||
DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res);
|
||||
|
|
|
@ -189,7 +189,12 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint
|
|||
|
||||
int32_t getJsonValueLen(const char* data);
|
||||
|
||||
// For the VAR_DATA_TYPE type, new data is inserted strictly according to the position of SVarColAttr.length.
|
||||
// If the same row is inserted repeatedly, data holes will result.
|
||||
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
// For the VAR_DATA_TYPE type, if a row already has data before inserting it (judged by offset != -1),
|
||||
// it will be inserted at the original position and the old data will be overwritten.
|
||||
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
|
||||
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows,
|
||||
bool trimValue);
|
||||
|
@ -233,7 +238,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo);
|
|||
* @brief find how many rows already in order start from first row
|
||||
*/
|
||||
int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo);
|
||||
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk);
|
||||
int32_t blockDataCheck(const SSDataBlock* pDataBlock);
|
||||
|
||||
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
|
||||
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
|
@ -266,7 +271,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId)
|
|||
int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData);
|
||||
|
||||
int32_t blockGetEncodeSize(const SSDataBlock* pBlock);
|
||||
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
|
||||
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataLen, int32_t numOfCols);
|
||||
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos);
|
||||
|
||||
// for debug
|
||||
|
|
|
@ -142,6 +142,7 @@ extern bool tsMonitorForceV2;
|
|||
// audit
|
||||
extern bool tsEnableAudit;
|
||||
extern bool tsEnableAuditCreateTable;
|
||||
extern bool tsEnableAuditDelete;
|
||||
extern int32_t tsAuditInterval;
|
||||
|
||||
// telem
|
||||
|
@ -153,6 +154,12 @@ extern bool tsEnableCrashReport;
|
|||
extern char *tsTelemUri;
|
||||
extern char *tsClientCrashReportUri;
|
||||
extern char *tsSvrCrashReportUri;
|
||||
extern int8_t tsSafetyCheckLevel;
|
||||
enum {
|
||||
TSDB_SAFETY_CHECK_LEVELL_NEVER = 0,
|
||||
TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1,
|
||||
TSDB_SAFETY_CHECK_LEVELL_BYROW = 2,
|
||||
};
|
||||
|
||||
// query buffer management
|
||||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||
|
@ -194,10 +201,10 @@ extern int32_t tsMinIntervalTime;
|
|||
extern int32_t tsMaxInsertBatchRows;
|
||||
|
||||
// build info
|
||||
extern char version[];
|
||||
extern char compatible_version[];
|
||||
extern char gitinfo[];
|
||||
extern char buildinfo[];
|
||||
extern char td_version[];
|
||||
extern char td_compatible_version[];
|
||||
extern char td_gitinfo[];
|
||||
extern char td_buildinfo[];
|
||||
|
||||
// lossy
|
||||
extern char tsLossyColumns[];
|
||||
|
|
|
@ -1022,6 +1022,7 @@ typedef struct {
|
|||
char sDetailVer[128];
|
||||
int64_t whiteListVer;
|
||||
SMonitorParas monitorParas;
|
||||
int8_t enableAuditDelete;
|
||||
} SConnectRsp;
|
||||
|
||||
int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp);
|
||||
|
@ -1215,6 +1216,7 @@ typedef struct {
|
|||
int32_t bytes;
|
||||
int8_t type;
|
||||
uint8_t pk;
|
||||
bool noData;
|
||||
} SColumnInfo;
|
||||
|
||||
typedef struct STimeWindow {
|
||||
|
@ -1826,6 +1828,17 @@ int32_t tSerializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
|
|||
int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
|
||||
void tFreeSStatisReq(SStatisReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
char table[TSDB_TABLE_NAME_LEN];
|
||||
char operation[AUDIT_OPERATION_LEN];
|
||||
int32_t sqlLen;
|
||||
char* pSql;
|
||||
} SAuditReq;
|
||||
int32_t tSerializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
|
||||
int32_t tDeserializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
|
||||
void tFreeSAuditReq(SAuditReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int64_t clusterId;
|
||||
|
@ -3414,6 +3427,7 @@ typedef struct {
|
|||
int32_t svrTimestamp;
|
||||
SArray* rsps; // SArray<SClientHbRsp>
|
||||
SMonitorParas monitorParas;
|
||||
int8_t enableAuditDelete;
|
||||
} SClientHbBatchRsp;
|
||||
|
||||
static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); }
|
||||
|
|
|
@ -259,6 +259,7 @@
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
|
||||
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
|
||||
|
|
|
@ -298,6 +298,7 @@ typedef struct {
|
|||
#define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX)
|
||||
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
|
||||
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
|
||||
#define IS_INVALID_TYPE(_t) ((_t) < TSDB_DATA_TYPE_NULL || (_t) >= TSDB_DATA_TYPE_MAX)
|
||||
|
||||
#define IS_CONVERT_AS_SIGNED(_t) \
|
||||
(IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
||||
|
|
|
@ -29,7 +29,6 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#define AUDIT_DETAIL_MAX 65472
|
||||
#define AUDIT_OPERATION_LEN 20
|
||||
|
||||
typedef struct {
|
||||
const char *server;
|
||||
|
|
|
@ -292,6 +292,7 @@ bool fmIsElapsedFunc(int32_t funcId);
|
|||
|
||||
void getLastCacheDataType(SDataType* pType, int32_t pkBytes);
|
||||
int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc);
|
||||
int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** pFunc);
|
||||
|
||||
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ extern "C" {
|
|||
#include "thash.h"
|
||||
#include "query.h"
|
||||
#include "tqueue.h"
|
||||
#include "clientInt.h"
|
||||
|
||||
typedef enum {
|
||||
SQL_RESULT_SUCCESS = 0,
|
||||
|
@ -81,6 +82,8 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name,
|
|||
void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values);
|
||||
const char* monitorResultStr(SQL_RESULT_CODE code);
|
||||
int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data);
|
||||
|
||||
void clientOperateReport(SRequestObj* pRequest);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -65,7 +65,7 @@ typedef struct SParseCsvCxt {
|
|||
const char* pLastSqlPos; // the location of the last parsed sql
|
||||
} SParseCsvCxt;
|
||||
|
||||
typedef void(*setQueryFn)(int64_t);
|
||||
typedef void (*setQueryFn)(int64_t);
|
||||
|
||||
typedef struct SParseContext {
|
||||
uint64_t requestId;
|
||||
|
@ -147,6 +147,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
|
|||
int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
|
||||
int32_t colIdx, int32_t rowNum);
|
||||
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
|
||||
int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields);
|
||||
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
|
||||
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
|
||||
TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
|
||||
|
|
|
@ -105,6 +105,7 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara
|
|||
int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t timeZoneStrLen();
|
||||
int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
|
|
@ -70,7 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo;
|
|||
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
|
||||
|
||||
extern int32_t streamMetaId;
|
||||
extern int32_t streamMetaRefPool;
|
||||
extern int32_t streamTaskRefPool;
|
||||
|
||||
enum {
|
||||
STREAM_STATUS__NORMAL = 0,
|
||||
|
@ -258,6 +259,7 @@ typedef struct STaskId {
|
|||
typedef struct SStreamTaskId {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int64_t refId;
|
||||
const char* idStr;
|
||||
} SStreamTaskId;
|
||||
|
||||
|
@ -291,7 +293,6 @@ typedef struct SStreamStatus {
|
|||
int8_t schedStatus;
|
||||
int8_t statusBackup;
|
||||
int32_t schedIdleTime; // idle time before invoke again
|
||||
int32_t timerActive; // timer is active
|
||||
int64_t lastExecTs; // last exec time stamp
|
||||
int32_t inScanHistorySentinel;
|
||||
bool appendTranstateBlock; // has append the transfer state data block already
|
||||
|
@ -454,7 +455,6 @@ struct SStreamTask {
|
|||
|
||||
// the followings attributes don't be serialized
|
||||
SScanhistorySchedInfo schedHistoryInfo;
|
||||
int32_t refCnt;
|
||||
int32_t transferStateAlignCnt;
|
||||
struct SStreamMeta* pMeta;
|
||||
SSHashObj* pNameMap;
|
||||
|
@ -546,7 +546,7 @@ typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
|
|||
|
||||
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
|
||||
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask);
|
||||
void tFreeStreamTask(SStreamTask* pTask);
|
||||
void tFreeStreamTask(void* pTask);
|
||||
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
|
||||
|
@ -664,6 +664,8 @@ void streamTaskResetStatus(SStreamTask* pTask);
|
|||
void streamTaskSetStatusReady(SStreamTask* pTask);
|
||||
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
|
||||
const char* streamTaskGetExecType(int32_t type);
|
||||
int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId);
|
||||
void streamTaskFreeRefId(int64_t* pRefId);
|
||||
|
||||
bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
||||
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
||||
|
@ -752,16 +754,15 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
|
|||
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||
int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask);
|
||||
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
|
||||
void streamMetaClear(SStreamMeta* pMeta);
|
||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
||||
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
||||
int64_t endTs, bool ready);
|
||||
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo);
|
||||
|
|
|
@ -653,6 +653,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
|
|||
#define MONITOR_TAG_VALUE_LEN 300
|
||||
#define MONITOR_METRIC_NAME_LEN 100
|
||||
|
||||
#define AUDIT_OPERATION_LEN 20
|
||||
|
||||
typedef enum {
|
||||
ANAL_ALGO_TYPE_ANOMALY_DETECT = 0,
|
||||
ANAL_ALGO_TYPE_FORECAST = 1,
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern char version[];
|
||||
extern char compatible_version[];
|
||||
extern char gitinfo[];
|
||||
extern char gitinfoOfInternal[];
|
||||
extern char buildinfo[];
|
||||
extern char td_version[];
|
||||
extern char td_compatible_version[];
|
||||
extern char td_gitinfo[];
|
||||
extern char td_gitinfoOfInternal[];
|
||||
extern char td_buildinfo[];
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -185,7 +185,14 @@ function kill_process() {
|
|||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}rm -rf ${install_main_dir}/cfg || :
|
||||
${csudo}rm -rf ${install_main_dir}/bin || :
|
||||
${csudo}rm -rf ${install_main_dir}/driver || :
|
||||
${csudo}rm -rf ${install_main_dir}/examples || :
|
||||
${csudo}rm -rf ${install_main_dir}/include || :
|
||||
${csudo}rm -rf ${install_main_dir}/share || :
|
||||
${csudo}rm -rf ${install_main_dir}/log || :
|
||||
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
TDengine client is installed successfully. Please open a terminal and execute the commands below:
|
||||
|
||||
To configure TDengine client, sudo vi /etc/taos/taos.cfg
|
||||
To access TDengine command line interface, taos -h YouServerName
|
|
@ -10,6 +10,10 @@ else()
|
|||
add_library(taos SHARED ${CLIENT_SRC})
|
||||
endif()
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
|
||||
INCLUDE_DIRECTORIES(jni)
|
||||
|
||||
target_include_directories(
|
||||
|
@ -46,6 +50,11 @@ set_target_properties(
|
|||
)
|
||||
|
||||
add_library(taos_static STATIC ${CLIENT_SRC})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
taos_static
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
||||
|
|
|
@ -108,6 +108,10 @@ typedef struct SQueryExecMetric {
|
|||
int64_t execCostUs;
|
||||
} SQueryExecMetric;
|
||||
|
||||
typedef struct {
|
||||
SMonitorParas monitorParas;
|
||||
int8_t enableAuditDelete;
|
||||
} SAppInstServerCFG;
|
||||
struct SAppInstInfo {
|
||||
int64_t numOfConns;
|
||||
SCorEpSet mgmtEp;
|
||||
|
@ -121,7 +125,7 @@ struct SAppInstInfo {
|
|||
void* pTransporter;
|
||||
SAppHbMgr* pAppHbMgr;
|
||||
char* instKey;
|
||||
SMonitorParas monitorParas;
|
||||
SAppInstServerCFG serverCfg;
|
||||
};
|
||||
|
||||
typedef struct SAppInfo {
|
||||
|
@ -297,8 +301,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
|
|||
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
|
||||
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
|
||||
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4);
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
|
||||
bool convertUcs4);
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4);
|
||||
int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
|
||||
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
|
||||
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);
|
||||
|
|
|
@ -222,6 +222,7 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags);
|
|||
int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx);
|
||||
int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields);
|
||||
int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields);
|
||||
int stmtGetStbColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_STB **fields);
|
||||
int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums);
|
||||
int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums);
|
||||
int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert);
|
||||
|
|
|
@ -166,11 +166,11 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int
|
|||
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType)));
|
||||
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(
|
||||
json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows)));
|
||||
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen) {
|
||||
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen];
|
||||
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = '\0';
|
||||
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen) {
|
||||
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen];
|
||||
pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = '\0';
|
||||
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
|
||||
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = tmp;
|
||||
pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = tmp;
|
||||
} else {
|
||||
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
}
|
||||
}
|
||||
|
||||
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) {
|
||||
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
|
||||
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) {
|
||||
sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT);
|
||||
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
|
||||
|
@ -294,15 +294,15 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
}
|
||||
}
|
||||
|
||||
if ((duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThreshold * 1000000UL ||
|
||||
duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
|
||||
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->monitorParas.tsSlowLogExceptDb)) {
|
||||
if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL ||
|
||||
duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
|
||||
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
|
||||
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
|
||||
if (pTscObj->pAppInfo->monitorParas.tsSlowLogScope & reqType) {
|
||||
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
|
||||
taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s",
|
||||
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration,
|
||||
pRequest->sqlstr);
|
||||
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) {
|
||||
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
|
||||
slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration);
|
||||
if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) {
|
||||
tscError("failed to generate write slow log");
|
||||
|
@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread,
|
|||
rpcInit.startReadTimer = 1;
|
||||
rpcInit.readTimeout = tsReadTimeout;
|
||||
|
||||
int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
||||
int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscError("invalid version string.");
|
||||
return code;
|
||||
|
@ -689,7 +689,7 @@ void doDestroyRequest(void *p) {
|
|||
|
||||
int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscError("failed to remove request from hash, code:%s", tstrerror(code));
|
||||
tscWarn("failed to remove request from hash, code:%s", tstrerror(code));
|
||||
}
|
||||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||
|
||||
|
|
|
@ -605,7 +605,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
|||
return code;
|
||||
}
|
||||
|
||||
pInst->monitorParas = pRsp.monitorParas;
|
||||
pInst->serverCfg.monitorParas = pRsp.monitorParas;
|
||||
pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete;
|
||||
tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId,
|
||||
pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope);
|
||||
|
||||
|
|
|
@ -1700,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf
|
|||
tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app));
|
||||
tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user));
|
||||
tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd));
|
||||
tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer));
|
||||
tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer));
|
||||
|
||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||
void* pReq = taosMemoryMalloc(contLen);
|
||||
|
@ -1770,19 +1770,15 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
|
|||
}
|
||||
}
|
||||
|
||||
int32_t doProcessMsgFromServer(void* param) {
|
||||
AsyncArg* arg = (AsyncArg*)param;
|
||||
SRpcMsg* pMsg = &arg->msg;
|
||||
SEpSet* pEpSet = arg->pEpset;
|
||||
|
||||
int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||
if (pMsg->info.ahandle == NULL) {
|
||||
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
|
||||
taosMemoryFree(arg->pEpset);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosMemoryFree(arg);
|
||||
taosMemoryFree(pEpSet);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
STscObj* pTscObj = NULL;
|
||||
|
||||
STraceId* trace = &pMsg->info.traceId;
|
||||
|
@ -1802,10 +1798,9 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) {
|
||||
tscError("doProcessMsgFromServer taosReleaseRef failed");
|
||||
}
|
||||
taosMemoryFree(arg->pEpset);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosMemoryFree(pEpSet);
|
||||
destroySendMsgInfo(pSendInfo);
|
||||
taosMemoryFree(arg);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
pTscObj = pRequest->pTscObj;
|
||||
|
@ -1844,20 +1839,24 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
destroySendMsgInfo(pSendInfo);
|
||||
|
||||
taosMemoryFree(arg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
int32_t doProcessMsgFromServer(void* param) {
|
||||
AsyncArg* arg = (AsyncArg*)param;
|
||||
int32_t code = doProcessMsgFromServerImpl(&arg->msg, arg->pEpset);
|
||||
taosMemoryFree(arg);
|
||||
return code;
|
||||
}
|
||||
|
||||
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||
int32_t code = 0;
|
||||
SEpSet* tEpSet = NULL;
|
||||
if (pEpSet != NULL) {
|
||||
tEpSet = taosMemoryCalloc(1, sizeof(SEpSet));
|
||||
if (NULL == tEpSet) {
|
||||
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
destroySendMsgInfo(pMsg->info.ahandle);
|
||||
return;
|
||||
code = terrno;
|
||||
pMsg->code = terrno;
|
||||
goto _exit;
|
||||
}
|
||||
(void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet));
|
||||
}
|
||||
|
@ -1879,21 +1878,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
|
||||
AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg));
|
||||
if (NULL == arg) {
|
||||
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosMemoryFree(tEpSet);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
destroySendMsgInfo(pMsg->info.ahandle);
|
||||
return;
|
||||
code = terrno;
|
||||
pMsg->code = code;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
arg->msg = *pMsg;
|
||||
arg->pEpset = tEpSet;
|
||||
|
||||
if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) {
|
||||
tscError("failed to sched msg to tsc, tsc ready to quit");
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosMemoryFree(arg->pEpset);
|
||||
destroySendMsgInfo(pMsg->info.ahandle);
|
||||
if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) {
|
||||
pMsg->code = code;
|
||||
taosMemoryFree(arg);
|
||||
goto _exit;
|
||||
}
|
||||
return;
|
||||
_exit:
|
||||
tscError("failed to sched msg to tsc since %s", tstrerror(code));
|
||||
code = doProcessMsgFromServerImpl(pMsg, tEpSet);
|
||||
if (code != 0) {
|
||||
tscError("failed to sched msg to tsc, tsc ready quit");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2081,12 +2084,12 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) {
|
||||
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
|
||||
int32_t idx = -1;
|
||||
iconv_t conv = taosAcquireConv(&idx, C2M);
|
||||
if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
|
||||
int32_t type = pResultInfo->fields[i].type;
|
||||
int32_t bytes = pResultInfo->fields[i].bytes;
|
||||
|
||||
|
@ -2100,7 +2103,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
pResultInfo->convertBuf[i] = p;
|
||||
|
||||
SResultColumn* pCol = &pResultInfo->pCol[i];
|
||||
for (int32_t j = 0; j < numOfRows; ++j) {
|
||||
for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) {
|
||||
if (pCol->offset[j] != -1) {
|
||||
char* pStart = pCol->offset[j] + pCol->pData;
|
||||
|
||||
|
@ -2133,10 +2136,13 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
|
|||
numOfCols * (sizeof(int8_t) + sizeof(int32_t));
|
||||
}
|
||||
|
||||
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
|
||||
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) {
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
int32_t blockVersion = *(int32_t*)p;
|
||||
|
||||
int32_t numOfRows = pResultInfo->numOfRows;
|
||||
int32_t numOfCols = pResultInfo->numOfCols;
|
||||
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
|
@ -2195,10 +2201,16 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
}
|
||||
pStart += colLen;
|
||||
}
|
||||
|
||||
// Ensure the complete structure of the block, including the blankfill field,
|
||||
// even though it is not used on the client side.
|
||||
len += sizeof(bool);
|
||||
return len;
|
||||
}
|
||||
|
||||
static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
|
||||
static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
|
||||
int32_t numOfRows = pResultInfo->numOfRows;
|
||||
int32_t numOfCols = pResultInfo->numOfCols;
|
||||
bool needConvert = false;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
|
||||
|
@ -2215,7 +2227,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
int32_t blockVersion = *(int32_t*)p;
|
||||
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
|
||||
int32_t dataLen = estimateJsonLen(pResultInfo);
|
||||
if (dataLen <= 0) {
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
@ -2338,27 +2350,36 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
pStart1 += colLen1;
|
||||
}
|
||||
|
||||
// Ensure the complete structure of the block, including the blankfill field,
|
||||
// even though it is not used on the client side.
|
||||
// (void)memcpy(pStart1, pStart, sizeof(bool));
|
||||
totalLen += sizeof(bool);
|
||||
|
||||
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
|
||||
pResultInfo->pData = pResultInfo->convertJson;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
|
||||
bool convertUcs4) {
|
||||
if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) {
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) {
|
||||
if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) {
|
||||
tscError("setResultDataPtr paras error");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (numOfRows == 0) {
|
||||
if (pResultInfo->numOfRows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pResultInfo->pData == NULL) {
|
||||
tscError("setResultDataPtr error: pData is NULL");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t code = doPrepareResPtr(pResultInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
code = doConvertJson(pResultInfo, numOfCols, numOfRows);
|
||||
code = doConvertJson(pResultInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2378,9 +2399,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
int32_t cols = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
if (rows != numOfRows || cols != numOfCols) {
|
||||
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols,
|
||||
numOfCols);
|
||||
if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) {
|
||||
tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols,
|
||||
pResultInfo->numOfCols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
|
@ -2391,7 +2412,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
p += sizeof(uint64_t);
|
||||
|
||||
// check fields
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
|
||||
int8_t type = *(int8_t*)p;
|
||||
p += sizeof(int8_t);
|
||||
|
||||
|
@ -2400,10 +2421,14 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
}
|
||||
|
||||
int32_t* colLength = (int32_t*)p;
|
||||
p += sizeof(int32_t) * numOfCols;
|
||||
p += sizeof(int32_t) * pResultInfo->numOfCols;
|
||||
|
||||
char* pStart = p;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
|
||||
if ((pStart - pResultInfo->pData) >= dataLen) {
|
||||
tscError("setResultDataPtr invalid offset over dataLen %d", dataLen);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (blockVersion == BLOCK_VERSION_1) {
|
||||
colLength[i] = htonl(colLength[i]);
|
||||
}
|
||||
|
@ -2411,10 +2436,13 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (IS_INVALID_TYPE(pResultInfo->fields[i].type)) {
|
||||
tscError("invalid type %d", pResultInfo->fields[i].type);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
pResultInfo->pCol[i].offset = (int32_t*)pStart;
|
||||
pStart += numOfRows * sizeof(int32_t);
|
||||
pStart += pResultInfo->numOfRows * sizeof(int32_t);
|
||||
} else {
|
||||
pResultInfo->pCol[i].nullbitmap = pStart;
|
||||
pStart += BitmapLen(pResultInfo->numOfRows);
|
||||
|
@ -2427,11 +2455,17 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
pStart += colLength[i];
|
||||
}
|
||||
|
||||
p = pStart;
|
||||
// bool blankFill = *(bool*)p;
|
||||
p += sizeof(bool);
|
||||
int32_t offset = p - pResultInfo->pData;
|
||||
if (offset > dataLen) {
|
||||
tscError("invalid offset %d, dataLen %d", offset, dataLen);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (convertUcs4) {
|
||||
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
|
||||
code = doConvertUCS4(pResultInfo, colLength);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -2544,7 +2578,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR
|
|||
pResultInfo->totalRows += pResultInfo->numOfRows;
|
||||
|
||||
int32_t code =
|
||||
setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4);
|
||||
setResultDataPtr(pResultInfo, convertUcs4);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2573,7 +2607,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
|
|||
rpcInit.connLimitNum = connLimitNum;
|
||||
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||
rpcInit.readTimeout = tsReadTimeout;
|
||||
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) {
|
||||
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) {
|
||||
tscError("faild to convert taos version from str to int, errcode:%s", terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
|
@ -2839,6 +2873,7 @@ void syncQueryFn(void* param, void* res, int32_t code) {
|
|||
|
||||
if (pParam->pRequest) {
|
||||
pParam->pRequest->code = code;
|
||||
clientOperateReport(pParam->pRequest);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) {
|
||||
|
|
|
@ -84,7 +84,7 @@ void taos_cleanup(void) {
|
|||
taosCloseRef(id);
|
||||
|
||||
nodesDestroyAllocatorSet();
|
||||
// cleanupAppInfo();
|
||||
// cleanupAppInfo();
|
||||
rpcCleanup();
|
||||
tscDebug("rpc cleanup");
|
||||
|
||||
|
@ -388,7 +388,6 @@ void taos_free_result(TAOS_RES *res) {
|
|||
tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp);
|
||||
}
|
||||
taosMemoryFree(pRsp);
|
||||
|
||||
}
|
||||
|
||||
void taos_kill_query(TAOS *taos) {
|
||||
|
@ -484,7 +483,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields);
|
||||
}
|
||||
int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields){
|
||||
int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
int32_t len = 0;
|
||||
for (int i = 0; i < num_fields; ++i) {
|
||||
if (i > 0 && len < size - 1) {
|
||||
|
@ -589,7 +588,7 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (len < size){
|
||||
if (len < size) {
|
||||
str[len] = 0;
|
||||
}
|
||||
|
||||
|
@ -670,7 +669,7 @@ const char *taos_data_type(int type) {
|
|||
}
|
||||
}
|
||||
|
||||
const char *taos_get_client_info() { return version; }
|
||||
const char *taos_get_client_info() { return td_version; }
|
||||
|
||||
// return int32_t
|
||||
int taos_affected_rows(TAOS_RES *res) {
|
||||
|
@ -2082,7 +2081,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) {
|
|||
}
|
||||
|
||||
int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) {
|
||||
if (stmt == NULL || NULL == count) {
|
||||
if (stmt == NULL || count == NULL) {
|
||||
tscError("NULL parameter for %s", __FUNCTION__);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
|
@ -2103,12 +2102,28 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count,
|
|||
}
|
||||
}
|
||||
|
||||
int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields) {
|
||||
if (stmt == NULL || count == NULL) {
|
||||
tscError("NULL parameter for %s", __FUNCTION__);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
return stmtGetStbColFields2(stmt, count, fields);
|
||||
}
|
||||
|
||||
void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) {
|
||||
(void)stmt;
|
||||
if (!fields) return;
|
||||
taosMemoryFree(fields);
|
||||
}
|
||||
|
||||
DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields) {
|
||||
(void)stmt;
|
||||
if (!fields) return;
|
||||
taosMemoryFree(fields);
|
||||
}
|
||||
|
||||
TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) {
|
||||
if (stmt == NULL) {
|
||||
tscError("NULL parameter for %s", __FUNCTION__);
|
||||
|
@ -2144,4 +2159,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
char *getBuildInfo() { return buildinfo; }
|
||||
char *getBuildInfo() { return td_buildinfo; }
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#include "cJSON.h"
|
||||
#include "clientInt.h"
|
||||
#include "clientLog.h"
|
||||
#include "os.h"
|
||||
#include "tglobal.h"
|
||||
#include "tmisce.h"
|
||||
#include "tqueue.h"
|
||||
#include "ttime.h"
|
||||
|
@ -19,6 +17,7 @@ STaosQueue* monitorQueue;
|
|||
SHashObj* monitorSlowLogHash;
|
||||
char tmpSlowLogPath[PATH_MAX] = {0};
|
||||
TdThread monitorThread;
|
||||
extern bool tsEnableAuditDelete;
|
||||
|
||||
static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) {
|
||||
int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir);
|
||||
|
@ -216,7 +215,7 @@ static void reportSendProcess(void* param, void* tmrId) {
|
|||
SEpSet ep = getEpSet_s(&pInst->mgmtEp);
|
||||
generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep);
|
||||
bool reset =
|
||||
taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
|
||||
taosTmrReset(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
|
||||
tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset);
|
||||
taosRUnLockLatch(&monitorLock);
|
||||
}
|
||||
|
@ -289,7 +288,7 @@ void monitorCreateClient(int64_t clusterId) {
|
|||
goto fail;
|
||||
}
|
||||
pMonitor->timer =
|
||||
taosTmrStart(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
|
||||
taosTmrStart(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
|
||||
if (pMonitor->timer == NULL) {
|
||||
tscError("failed to start timer");
|
||||
goto fail;
|
||||
|
@ -660,7 +659,7 @@ static void monitorSendAllSlowLog() {
|
|||
taosHashCancelIterate(monitorSlowLogHash, pIter);
|
||||
return;
|
||||
}
|
||||
if (t - pClient->lastCheckTime > pInst->monitorParas.tsMonitorInterval * 1000) {
|
||||
if (t - pClient->lastCheckTime > pInst->serverCfg.monitorParas.tsMonitorInterval * 1000) {
|
||||
pClient->lastCheckTime = t;
|
||||
} else {
|
||||
continue;
|
||||
|
@ -686,7 +685,7 @@ static void monitorSendAllSlowLog() {
|
|||
static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) {
|
||||
SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId);
|
||||
|
||||
if (pInst == NULL || !pInst->monitorParas.tsEnableMonitor) {
|
||||
if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) {
|
||||
tscInfo("[monitor] monitor is disabled, skip send slow log");
|
||||
return;
|
||||
}
|
||||
|
@ -932,4 +931,101 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) {
|
|||
taosFreeQitem(slowLogData);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
tscDebug("[del report]delete reportCB code:%d", code);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) {
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
tscError("[del report]failed to allocate memory for sendInfo");
|
||||
return terrno;
|
||||
}
|
||||
|
||||
sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL};
|
||||
|
||||
sendInfo->requestId = requestId;
|
||||
sendInfo->requestObjRefId = 0;
|
||||
sendInfo->param = NULL;
|
||||
sendInfo->fp = reportCB;
|
||||
sendInfo->msgType = TDMT_MND_AUDIT;
|
||||
|
||||
SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
|
||||
if (code != 0) {
|
||||
tscError("[del report]failed to send msg to server, code:%d", code);
|
||||
taosMemoryFree(sendInfo);
|
||||
return code;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void reportDeleteSql(SRequestObj* pRequest) {
|
||||
SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot;
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
||||
if (pTscObj == NULL || pTscObj->pAppInfo == NULL) {
|
||||
tscError("[del report]invalid tsc obj");
|
||||
return;
|
||||
}
|
||||
|
||||
if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) {
|
||||
tscDebug("[del report]audit delete is disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("[del report]delete request result code:%d", pRequest->code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) {
|
||||
tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable));
|
||||
return;
|
||||
}
|
||||
|
||||
SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable;
|
||||
SAuditReq req;
|
||||
req.pSql = pRequest->sqlstr;
|
||||
req.sqlLen = pRequest->sqlLen;
|
||||
TAOS_UNUSED(tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName));
|
||||
TAOS_UNUSED(tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName));
|
||||
TAOS_UNUSED(tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"));
|
||||
int32_t tlen = tSerializeSAuditReq(NULL, 0, &req);
|
||||
void* pReq = taosMemoryCalloc(1, tlen);
|
||||
if (pReq == NULL) {
|
||||
tscError("[del report]failed to allocate memory for req");
|
||||
return;
|
||||
}
|
||||
|
||||
if (tSerializeSAuditReq(pReq, tlen, &req) < 0) {
|
||||
tscError("[del report]failed to serialize req");
|
||||
taosMemoryFree(pReq);
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId);
|
||||
if (code != 0) {
|
||||
tscError("[del report]failed to send audit info, code:%d", code);
|
||||
taosMemoryFree(pReq);
|
||||
return;
|
||||
}
|
||||
tscDebug("[del report]delete data, sql:%s", req.pSql);
|
||||
}
|
||||
|
||||
void clientOperateReport(SRequestObj* pRequest) {
|
||||
if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) {
|
||||
tscError("[del report]invalid request");
|
||||
return;
|
||||
}
|
||||
|
||||
if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) {
|
||||
reportDeleteSql(pRequest);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
goto End;
|
||||
}
|
||||
|
||||
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
|
||||
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
|
||||
if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) {
|
||||
tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer);
|
||||
goto End;
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
// update the appInstInfo
|
||||
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
|
||||
pTscObj->pAppInfo->monitorParas = connectRsp.monitorParas;
|
||||
pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas;
|
||||
pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete;
|
||||
tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d",
|
||||
connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope);
|
||||
lastClusterId = connectRsp.clusterId;
|
||||
|
@ -588,7 +589,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
|
|||
return code;
|
||||
}
|
||||
|
||||
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
|
||||
*pRsp = taosMemoryCalloc(1, rspSize);
|
||||
if (NULL == *pRsp) {
|
||||
code = terrno;
|
||||
|
@ -603,7 +605,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
|
|||
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
|
||||
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
|
||||
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, SHOW_VARIABLES_RESULT_COLS);
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, SHOW_VARIABLES_RESULT_COLS);
|
||||
if(len < 0) {
|
||||
uError("buildShowVariablesRsp error, len:%d", len);
|
||||
code = terrno;
|
||||
|
@ -741,7 +743,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
|
|||
return code;
|
||||
}
|
||||
|
||||
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
|
||||
*pRsp = taosMemoryCalloc(1, rspSize);
|
||||
if (NULL == *pRsp) {
|
||||
code = terrno;
|
||||
|
@ -757,7 +760,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
|
|||
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
|
||||
(*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS);
|
||||
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, COMPACT_DB_RESULT_COLS);
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, COMPACT_DB_RESULT_COLS);
|
||||
if(len < 0) {
|
||||
uError("buildRetriveTableRspForCompactDb error, len:%d", len);
|
||||
code = terrno;
|
||||
|
|
|
@ -1570,7 +1570,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
|
|||
SHashObj* pVgHash = NULL;
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest));
|
||||
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
|
||||
rows, pData, tbname, fields, numFields);
|
||||
|
@ -1631,7 +1631,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
|
|||
SHashObj* pVgHash = NULL;
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest));
|
||||
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname);
|
||||
|
||||
|
@ -1835,7 +1835,7 @@ end:
|
|||
|
||||
static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) {
|
||||
int32_t code = 0;
|
||||
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, pRequest));
|
||||
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, pRequest, 0));
|
||||
(*pRequest)->syncQuery = true;
|
||||
if (!(*pRequest)->pDb) {
|
||||
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
|
||||
|
@ -1855,19 +1855,19 @@ end:
|
|||
typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp);
|
||||
static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func,
|
||||
SMqRspObj* rspObj) {
|
||||
int8_t dataVersion = *(int8_t*)data;
|
||||
if (dataVersion >= MQ_DATA_RSP_VERSION) {
|
||||
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
|
||||
dataLen -= sizeof(int8_t) + sizeof(int32_t);
|
||||
int8_t dataVersion = *(int8_t*)data;
|
||||
if (dataVersion >= MQ_DATA_RSP_VERSION) {
|
||||
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
|
||||
dataLen -= sizeof(int8_t) + sizeof(int32_t);
|
||||
}
|
||||
|
||||
rspObj->resIter = -1;
|
||||
tDecoderInit(decoder, data, dataLen);
|
||||
int32_t code = func(decoder, &rspObj->dataRsp);
|
||||
if (code != 0) {
|
||||
SET_ERROR_MSG("decode mq taosx data rsp failed");
|
||||
rspObj->resIter = -1;
|
||||
tDecoderInit(decoder, data, dataLen);
|
||||
int32_t code = func(decoder, &rspObj->dataRsp);
|
||||
if (code != 0) {
|
||||
SET_ERROR_MSG("decode mq taosx data rsp failed");
|
||||
}
|
||||
return code;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash,
|
||||
|
@ -2195,44 +2195,44 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) {
|
|||
|
||||
typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
||||
static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) {
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
SEncoder encoder = {0};
|
||||
void* buf = NULL;
|
||||
tEncodeSize(encodeFunc, rspObj, len, code);
|
||||
if (code < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
SEncoder encoder = {0};
|
||||
void* buf = NULL;
|
||||
tEncodeSize(encodeFunc, rspObj, len, code);
|
||||
if (code < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
len += sizeof(int8_t) + sizeof(int32_t);
|
||||
buf = taosMemoryCalloc(1, len);
|
||||
if (buf == NULL) {
|
||||
code = terrno;
|
||||
goto FAILED;
|
||||
len += sizeof(int8_t) + sizeof(int32_t);
|
||||
buf = taosMemoryCalloc(1, len);
|
||||
if (buf == NULL) {
|
||||
code = terrno;
|
||||
goto FAILED;
|
||||
}
|
||||
tEncoderInit(&encoder, buf, len);
|
||||
if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
tEncoderInit(&encoder, buf, len);
|
||||
if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
int32_t offsetLen = getOffSetLen(rspObj);
|
||||
if (offsetLen <= 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
int32_t offsetLen = getOffSetLen(rspObj);
|
||||
if (offsetLen <= 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
if (tEncodeI32(&encoder, offsetLen) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
if (tEncodeI32(&encoder, offsetLen) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
if (encodeFunc(&encoder, rspObj) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
if (encodeFunc(&encoder, rspObj) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
raw->raw = buf;
|
||||
raw->raw_len = len;
|
||||
return code;
|
||||
raw->raw = buf;
|
||||
raw->raw_len = len;
|
||||
return code;
|
||||
FAILED:
|
||||
tEncoderClear(&encoder);
|
||||
taosMemoryFree(buf);
|
||||
|
@ -2380,4 +2380,4 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen
|
|||
end:
|
||||
tDeleteMqBatchMetaRsp(&rsp);
|
||||
return code;
|
||||
}
|
||||
}
|
|
@ -1695,7 +1695,7 @@ END:
|
|||
}
|
||||
|
||||
void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd) {
|
||||
if (request->pTscObj->pAppInfo->monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) {
|
||||
if (request->pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) {
|
||||
int32_t len = 0;
|
||||
int32_t rlen = 0;
|
||||
char *p = NULL;
|
||||
|
@ -1740,7 +1740,7 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
|
|||
SSmlHandle *info = NULL;
|
||||
int cnt = 0;
|
||||
while (1) {
|
||||
SML_CHECK_CODE(createRequest(*(int64_t *)taos, TSDB_SQL_INSERT, reqid, &request));
|
||||
SML_CHECK_CODE(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &request, reqid));
|
||||
SSmlMsgBuf msg = {request->msgBufLen, request->msgBuf};
|
||||
request->code = smlBuildSmlInfo(taos, &info);
|
||||
SML_CHECK_CODE(request->code);
|
||||
|
|
|
@ -1068,6 +1068,34 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_STB** fields) {
|
||||
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
|
||||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
tscError("invalid operation to get query column fileds");
|
||||
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
|
||||
}
|
||||
|
||||
STableDataCxt** pDataBlock = NULL;
|
||||
|
||||
if (pStmt->sql.stbInterlaceMode) {
|
||||
pDataBlock = &pStmt->sql.siInfo.pDataCtx;
|
||||
} else {
|
||||
pDataBlock =
|
||||
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
|
||||
if (NULL == pDataBlock) {
|
||||
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
|
||||
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, fieldNum, fields));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
/*
|
||||
SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) {
|
||||
while (true) {
|
||||
|
@ -1808,7 +1836,7 @@ _return:
|
|||
return code;
|
||||
}
|
||||
|
||||
int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) {
|
||||
int stmtParseColFields2(TAOS_STMT2* stmt) {
|
||||
int32_t code = 0;
|
||||
STscStmt2* pStmt = (STscStmt2*)stmt;
|
||||
int32_t preCode = pStmt->errCode;
|
||||
|
@ -1842,8 +1870,6 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) {
|
|||
STMT_ERRI_JRET(stmtParseSql(pStmt));
|
||||
}
|
||||
|
||||
STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, fields));
|
||||
|
||||
_return:
|
||||
|
||||
pStmt->errCode = preCode;
|
||||
|
@ -1851,6 +1877,24 @@ _return:
|
|||
return code;
|
||||
}
|
||||
|
||||
int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) {
|
||||
int32_t code = stmtParseColFields2(stmt);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
return stmtFetchColFields2(stmt, nums, fields);
|
||||
}
|
||||
|
||||
int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_STB** fields) {
|
||||
int32_t code = stmtParseColFields2(stmt);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
return stmtFetchStbColFields2(stmt, nums, fields);
|
||||
}
|
||||
|
||||
int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
|
||||
STscStmt2* pStmt = (STscStmt2*)stmt;
|
||||
|
||||
|
|
|
@ -2869,8 +2869,7 @@ int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pRes
|
|||
pRspObj->resInfo.precision = precision;
|
||||
|
||||
pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows;
|
||||
int32_t code = setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols,
|
||||
pRspObj->resInfo.numOfRows, convertUcs4);
|
||||
int32_t code = setResultDataPtr(&pRspObj->resInfo, convertUcs4);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -165,8 +165,8 @@ static const SSysDbTableSchema userStbsSchema[] = {
|
|||
static const SSysDbTableSchema streamSchema[] = {
|
||||
{.name = "stream_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
||||
{.name = "stream_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "history_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "stream_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "history_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
|
@ -190,9 +190,9 @@ static const SSysDbTableSchema streamTaskSchema[] = {
|
|||
{.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "process_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "process_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "out_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "out_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "process_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "out_total", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "out_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
// {.name = "dispatch_throughput", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
// {.name = "dispatch_total", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "tcompare.h"
|
||||
#include "tlog.h"
|
||||
#include "tname.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
#define MALLOC_ALIGN_BYTES 32
|
||||
|
||||
|
@ -86,8 +87,18 @@ int32_t getJsonValueLen(const char* data) {
|
|||
return dataLen;
|
||||
}
|
||||
|
||||
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
|
||||
if (isNull || pData == NULL) {
|
||||
static int32_t getDataLen(int32_t type, const char* pData) {
|
||||
int32_t dataLen = 0;
|
||||
if (type == TSDB_DATA_TYPE_JSON) {
|
||||
dataLen = getJsonValueLen(pData);
|
||||
} else {
|
||||
dataLen = varDataTLen(pData);
|
||||
}
|
||||
return dataLen;
|
||||
}
|
||||
|
||||
static int32_t colDataSetValHelp(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
|
||||
if (isNull || pData == NULL) {
|
||||
// There is a placehold for each NULL value of binary or nchar type.
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
pColumnInfoData->varmeta.offset[rowIndex] = -1; // it is a null value of VAR type.
|
||||
|
@ -101,11 +112,9 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
|
|||
|
||||
int32_t type = pColumnInfoData->info.type;
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
int32_t dataLen = 0;
|
||||
if (type == TSDB_DATA_TYPE_JSON) {
|
||||
dataLen = getJsonValueLen(pData);
|
||||
} else {
|
||||
dataLen = varDataTLen(pData);
|
||||
int32_t dataLen = getDataLen(type, pData);
|
||||
if (pColumnInfoData->varmeta.offset[rowIndex] > 0) {
|
||||
pColumnInfoData->varmeta.length = pColumnInfoData->varmeta.offset[rowIndex];
|
||||
}
|
||||
|
||||
SVarColAttr* pAttr = &pColumnInfoData->varmeta;
|
||||
|
@ -134,7 +143,7 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
|
|||
uint32_t len = pColumnInfoData->varmeta.length;
|
||||
pColumnInfoData->varmeta.offset[rowIndex] = len;
|
||||
|
||||
(void) memmove(pColumnInfoData->pData + len, pData, dataLen);
|
||||
(void)memmove(pColumnInfoData->pData + len, pData, dataLen);
|
||||
pColumnInfoData->varmeta.length += dataLen;
|
||||
} else {
|
||||
memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * rowIndex, pData, pColumnInfoData->info.bytes);
|
||||
|
@ -144,6 +153,18 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
pColumnInfoData->varmeta.offset[rowIndex] = -1;
|
||||
}
|
||||
|
||||
return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull);
|
||||
}
|
||||
|
||||
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
|
||||
return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull);
|
||||
}
|
||||
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx,
|
||||
const char* pData) {
|
||||
int32_t type = pColumnInfoData->info.type;
|
||||
|
@ -3041,8 +3062,12 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
|
|||
}
|
||||
|
||||
// return length of encoded data, return -1 if failed
|
||||
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
||||
blockDataCheck(pBlock, false);
|
||||
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) {
|
||||
int32_t code = blockDataCheck(pBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t dataLen = 0;
|
||||
|
||||
|
@ -3106,9 +3131,11 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
size_t metaSize = 0;
|
||||
if (IS_VAR_DATA_TYPE(pColRes->info.type)) {
|
||||
metaSize = numOfRows * sizeof(int32_t);
|
||||
if(dataLen + metaSize > dataBuflen) goto _exit;
|
||||
memcpy(data, pColRes->varmeta.offset, metaSize);
|
||||
} else {
|
||||
metaSize = BitmapLen(numOfRows);
|
||||
if(dataLen + metaSize > dataBuflen) goto _exit;
|
||||
memcpy(data, pColRes->nullbitmap, metaSize);
|
||||
}
|
||||
|
||||
|
@ -3127,12 +3154,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
}
|
||||
colSizes[col] += colSize;
|
||||
dataLen += colSize;
|
||||
if(dataLen > dataBuflen) goto _exit;
|
||||
(void) memmove(data, pColData, colSize);
|
||||
data += colSize;
|
||||
}
|
||||
} else {
|
||||
colSizes[col] = colDataGetLength(pColRes, numOfRows);
|
||||
dataLen += colSizes[col];
|
||||
if(dataLen > dataBuflen) goto _exit;
|
||||
if (pColRes->pData != NULL) {
|
||||
(void) memmove(data, pColRes->pData, colSizes[col]);
|
||||
}
|
||||
|
@ -3156,7 +3185,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
|
||||
*actualLen = dataLen;
|
||||
*groupId = pBlock->info.id.groupId;
|
||||
if (dataLen > dataBuflen) goto _exit;
|
||||
|
||||
return dataLen;
|
||||
|
||||
_exit:
|
||||
uError("blockEncode dataLen:%d, dataBuflen:%zu", dataLen, dataBuflen);
|
||||
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos) {
|
||||
|
@ -3286,9 +3322,13 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos
|
|||
|
||||
*pEndPos = pStart;
|
||||
|
||||
blockDataCheck(pBlock, false);
|
||||
code = blockDataCheck(pBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
return code;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) {
|
||||
|
@ -3498,20 +3538,19 @@ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
|
|||
return nextRowIdx;
|
||||
}
|
||||
|
||||
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
|
||||
return;
|
||||
|
||||
if (NULL == pDataBlock || pDataBlock->info.rows == 0) {
|
||||
return;
|
||||
#define BLOCK_DATA_CHECK_TRESSA(o) \
|
||||
if (!(o)) { \
|
||||
uError("blockDataCheck failed! line:%d", __LINE__); \
|
||||
return TSDB_CODE_INTERNAL_ERROR; \
|
||||
}
|
||||
int32_t blockDataCheck(const SSDataBlock* pDataBlock) {
|
||||
if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER || NULL == pDataBlock || pDataBlock->info.rows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#define BLOCK_DATA_CHECK_TRESSA(o) ;
|
||||
//#define BLOCK_DATA_CHECK_TRESSA(o) A S S E R T(o)
|
||||
|
||||
BLOCK_DATA_CHECK_TRESSA(pDataBlock->info.rows > 0);
|
||||
|
||||
if (!pDataBlock->info.dataLoad && !forceChk) {
|
||||
return;
|
||||
if (!pDataBlock->info.dataLoad) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool isVarType = false;
|
||||
|
@ -3522,8 +3561,10 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
|
|||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < colNum; ++i) {
|
||||
SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i);
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol != NULL);
|
||||
isVarType = IS_VAR_DATA_TYPE(pCol->info.type);
|
||||
checkRows = pDataBlock->info.rows;
|
||||
if (pCol->info.noData == true) continue;
|
||||
|
||||
if (isVarType) {
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset);
|
||||
|
@ -3531,27 +3572,39 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
|
|||
BLOCK_DATA_CHECK_TRESSA(pCol->nullbitmap);
|
||||
}
|
||||
|
||||
nextPos = 0;
|
||||
nextPos = -1;
|
||||
for (int64_t r = 0; r < checkRows; ++r) {
|
||||
if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break;
|
||||
if (!colDataIsNull_s(pCol, r)) {
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->pData);
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen);
|
||||
|
||||
|
||||
if (isVarType) {
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.allocLen > 0);
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] < pCol->varmeta.length);
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] <= pCol->varmeta.length);
|
||||
if (pCol->reassigned) {
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0);
|
||||
} else if (0 == r) {
|
||||
} else if (0 == r || nextPos == -1) {
|
||||
nextPos = pCol->varmeta.offset[r];
|
||||
} else {
|
||||
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos);
|
||||
}
|
||||
|
||||
colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]);
|
||||
BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE);
|
||||
|
||||
char* pColData = pCol->pData + pCol->varmeta.offset[r];
|
||||
int32_t colSize = 0;
|
||||
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colLen = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colLen = varDataTLen(pColData);
|
||||
}
|
||||
|
||||
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
BLOCK_DATA_CHECK_TRESSA(colLen >= CHAR_BYTES);
|
||||
} else {
|
||||
BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE);
|
||||
}
|
||||
BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes);
|
||||
|
||||
|
||||
if (pCol->reassigned) {
|
||||
BLOCK_DATA_CHECK_TRESSA((pCol->varmeta.offset[r] + colLen) <= pCol->varmeta.length);
|
||||
} else {
|
||||
|
@ -3561,13 +3614,21 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
|
|||
|
||||
typeValue = *(char*)(pCol->pData + pCol->varmeta.offset[r] + colLen - 1);
|
||||
} else {
|
||||
GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r));
|
||||
if (TSDB_DATA_TYPE_FLOAT == pCol->info.type) {
|
||||
float v = 0;
|
||||
GET_TYPED_DATA(v, float, pCol->info.type, colDataGetNumData(pCol, r));
|
||||
} else if (TSDB_DATA_TYPE_DOUBLE == pCol->info.type) {
|
||||
double v = 0;
|
||||
GET_TYPED_DATA(v, double, pCol->info.type, colDataGetNumData(pCol, r));
|
||||
} else {
|
||||
GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -119,6 +119,7 @@ bool tsMonitorForceV2 = true;
|
|||
// audit
|
||||
bool tsEnableAudit = true;
|
||||
bool tsEnableAuditCreateTable = true;
|
||||
bool tsEnableAuditDelete = true;
|
||||
int32_t tsAuditInterval = 5000;
|
||||
|
||||
// telem
|
||||
|
@ -137,8 +138,9 @@ bool tsEnableCrashReport = false;
|
|||
#else
|
||||
bool tsEnableCrashReport = true;
|
||||
#endif
|
||||
char *tsClientCrashReportUri = "/ccrashreport";
|
||||
char *tsSvrCrashReportUri = "/dcrashreport";
|
||||
char *tsClientCrashReportUri = "/ccrashreport";
|
||||
char *tsSvrCrashReportUri = "/dcrashreport";
|
||||
int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NORMAL;
|
||||
|
||||
// schemaless
|
||||
bool tsSmlDot2Underline = true;
|
||||
|
@ -610,6 +612,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(
|
||||
cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
|
||||
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
|
||||
|
||||
tsNumOfRpcThreads = tsNumOfCores / 2;
|
||||
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS);
|
||||
|
@ -675,10 +678,10 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", td_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", td_compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", td_gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", td_buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -777,6 +780,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
|
||||
|
||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableAuditDelete", tsEnableAuditDelete, CFG_SCOPE_SERVER, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
|
||||
|
||||
|
@ -1305,6 +1309,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark");
|
||||
tsmaDataDeleteMark = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel");
|
||||
tsSafetyCheckLevel = pItem->i32;
|
||||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -1490,6 +1497,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditCreateTable");
|
||||
tsEnableAuditCreateTable = pItem->bval;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableAuditDelete");
|
||||
tsEnableAuditDelete = pItem->bval;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditInterval");
|
||||
tsAuditInterval = pItem->i32;
|
||||
|
||||
|
@ -1646,6 +1656,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval");
|
||||
tsStreamCheckpointInterval = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint");
|
||||
tsMaxConcurrentCheckpoint = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate");
|
||||
tsSinkDataRate = pItem->fval;
|
||||
|
||||
|
@ -2046,7 +2059,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
{"s3UploadDelaySec", &tsS3UploadDelaySec},
|
||||
{"supportVnodes", &tsNumOfSupportVnodes},
|
||||
{"experimental", &tsExperimental},
|
||||
{"maxTsmaNum", &tsMaxTsmaNum}};
|
||||
{"maxTsmaNum", &tsMaxTsmaNum},
|
||||
{"safetyCheckLevel", &tsSafetyCheckLevel}};
|
||||
|
||||
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
|
||||
code = taosCfgSetOption(options, tListLen(options), pItem, false);
|
||||
|
@ -2302,7 +2316,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
|||
{"experimental", &tsExperimental},
|
||||
{"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags},
|
||||
{"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay},
|
||||
{"tsmaDataDeleteMark", &tsmaDataDeleteMark}};
|
||||
{"tsmaDataDeleteMark", &tsmaDataDeleteMark},
|
||||
{"safetyCheckLevel", &tsSafetyCheckLevel}};
|
||||
|
||||
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
|
||||
code = taosCfgSetOption(options, tListLen(options), pItem, false);
|
||||
|
|
|
@ -221,10 +221,9 @@ int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t
|
|||
}
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit);
|
||||
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), NULL, _exit);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), NULL, _exit);
|
||||
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), NULL, _exit);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), NULL, _exit);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), NULL, _exit);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), NULL, _exit);
|
||||
|
||||
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit);
|
||||
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit);
|
||||
|
|
|
@ -567,6 +567,7 @@ int32_t tSerializeSClientHbBatchRsp(void *buf, int32_t bufLen, const SClientHbBa
|
|||
TAOS_CHECK_EXIT(tSerializeSClientHbRsp(&encoder, pRsp));
|
||||
}
|
||||
TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pBatchRsp->monitorParas));
|
||||
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pBatchRsp->enableAuditDelete));
|
||||
tEndEncode(&encoder);
|
||||
|
||||
_exit:
|
||||
|
@ -609,6 +610,12 @@ int32_t tDeserializeSClientHbBatchRsp(void *buf, int32_t bufLen, SClientHbBatchR
|
|||
TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pBatchRsp->monitorParas));
|
||||
}
|
||||
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pBatchRsp->enableAuditDelete));
|
||||
} else {
|
||||
pBatchRsp->enableAuditDelete = 0;
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
_exit:
|
||||
|
@ -1813,6 +1820,60 @@ _exit:
|
|||
|
||||
void tFreeSDropUserReq(SDropUserReq *pReq) { FREESQL(); }
|
||||
|
||||
int32_t tSerializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
int32_t code = 0;
|
||||
int32_t lino;
|
||||
int32_t tlen;
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
TAOS_CHECK_EXIT(tStartEncode(&encoder));
|
||||
|
||||
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->operation));
|
||||
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->db));
|
||||
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->table));
|
||||
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->sqlLen));
|
||||
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->pSql));
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
tlen = code;
|
||||
} else {
|
||||
tlen = encoder.pos;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
int32_t code = 0;
|
||||
int32_t lino;
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
TAOS_CHECK_EXIT(tStartDecode(&decoder));
|
||||
|
||||
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->operation));
|
||||
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->db));
|
||||
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->table));
|
||||
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->sqlLen));
|
||||
if (pReq->sqlLen > 0) {
|
||||
pReq->pSql = taosMemoryMalloc(pReq->sqlLen + 1);
|
||||
if (pReq->pSql == NULL) {
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->pSql));
|
||||
}
|
||||
tEndDecode(&decoder);
|
||||
_exit:
|
||||
tDecoderClear(&decoder);
|
||||
return code;
|
||||
}
|
||||
|
||||
void tFreeSAuditReq(SAuditReq *pReq) { taosMemoryFreeClear(pReq->pSql); }
|
||||
|
||||
SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) {
|
||||
if (pIpWhiteList == NULL) return NULL;
|
||||
|
||||
|
@ -6294,6 +6355,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
|
|||
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->authVer));
|
||||
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pRsp->whiteListVer));
|
||||
TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pRsp->monitorParas));
|
||||
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pRsp->enableAuditDelete));
|
||||
tEndEncode(&encoder);
|
||||
|
||||
_exit:
|
||||
|
@ -6345,6 +6407,11 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
|
|||
if (!tDecodeIsEnd(&decoder)) {
|
||||
TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pRsp->monitorParas));
|
||||
}
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pRsp->enableAuditDelete));
|
||||
} else {
|
||||
pRsp->enableAuditDelete = 0;
|
||||
}
|
||||
tEndDecode(&decoder);
|
||||
|
||||
_exit:
|
||||
|
|
|
@ -297,12 +297,13 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) {
|
|||
static void dmGenerateGrant() { mndGenerateMachineCode(); }
|
||||
|
||||
static void dmPrintVersion() {
|
||||
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version);
|
||||
printf("git: %s\n", gitinfo);
|
||||
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, td_version,
|
||||
td_compatible_version);
|
||||
printf("git: %s\n", td_gitinfo);
|
||||
#ifdef TD_ENTERPRISE
|
||||
printf("gitOfInternal: %s\n", gitinfoOfInternal);
|
||||
printf("gitOfInternal: %s\n", td_gitinfoOfInternal);
|
||||
#endif
|
||||
printf("build: %s\n", buildinfo);
|
||||
printf("build: %s\n", td_buildinfo);
|
||||
}
|
||||
|
||||
static void dmPrintHelp() {
|
||||
|
|
|
@ -548,8 +548,8 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols +
|
||||
blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(numOfCols);
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeBufSize;
|
||||
|
||||
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
|
||||
if (pRsp == NULL) {
|
||||
|
@ -574,7 +574,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
pStart += sizeof(SSysTableSchema);
|
||||
}
|
||||
|
||||
int32_t len = blockEncode(pBlock, pStart, numOfCols);
|
||||
int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, numOfCols);
|
||||
if (len < 0) {
|
||||
dError("failed to retrieve data since %s", tstrerror(code));
|
||||
blockDataDestroy(pBlock);
|
||||
|
|
|
@ -212,6 +212,7 @@ SArray *mmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_AUDIT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
aux_source_directory(src MGMT_SNODE)
|
||||
add_library(mgmt_snode STATIC ${MGMT_SNODE})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(mgmt_snode PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
mgmt_snode
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
aux_source_directory(src MGMT_VNODE)
|
||||
add_library(mgmt_vnode STATIC ${MGMT_VNODE})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(mgmt_vnode PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
mgmt_vnode
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
|
|
|
@ -138,9 +138,9 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
|||
pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId);
|
||||
|
||||
int32_t svrVer = 0;
|
||||
code = taosVersionStrToInt(version, &svrVer);
|
||||
code = taosVersionStrToInt(td_version, &svrVer);
|
||||
if (code != 0) {
|
||||
dError("failed to convert version string:%s to int, code:%d", version, code);
|
||||
dError("failed to convert version string:%s to int, code:%d", td_version, code);
|
||||
goto _OVER;
|
||||
}
|
||||
if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) {
|
||||
|
@ -434,8 +434,8 @@ int32_t dmInitClient(SDnode *pDnode) {
|
|||
rpcInit.startReadTimer = 1;
|
||||
rpcInit.readTimeout = tsReadTimeout;
|
||||
|
||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
||||
dError("failed to convert version string:%s to int", version);
|
||||
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||
dError("failed to convert version string:%s to int", td_version);
|
||||
}
|
||||
|
||||
pTrans->clientRpc = rpcOpen(&rpcInit);
|
||||
|
@ -483,8 +483,8 @@ int32_t dmInitStatusClient(SDnode *pDnode) {
|
|||
rpcInit.startReadTimer = 0;
|
||||
rpcInit.readTimeout = 0;
|
||||
|
||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
||||
dError("failed to convert version string:%s to int", version);
|
||||
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||
dError("failed to convert version string:%s to int", td_version);
|
||||
}
|
||||
|
||||
pTrans->statusRpc = rpcOpen(&rpcInit);
|
||||
|
@ -533,8 +533,8 @@ int32_t dmInitSyncClient(SDnode *pDnode) {
|
|||
rpcInit.startReadTimer = 1;
|
||||
rpcInit.readTimeout = tsReadTimeout;
|
||||
|
||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
||||
dError("failed to convert version string:%s to int", version);
|
||||
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||
dError("failed to convert version string:%s to int", td_version);
|
||||
}
|
||||
|
||||
pTrans->syncRpc = rpcOpen(&rpcInit);
|
||||
|
@ -588,8 +588,8 @@ int32_t dmInitServer(SDnode *pDnode) {
|
|||
rpcInit.compressSize = tsCompressMsgSize;
|
||||
rpcInit.shareConnLimit = tsShareConnLimit * 16;
|
||||
|
||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
||||
dError("failed to convert version string:%s to int", version);
|
||||
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||
dError("failed to convert version string:%s to int", td_version);
|
||||
}
|
||||
|
||||
pTrans->serverRpc = rpcOpen(&rpcInit);
|
||||
|
|
|
@ -54,7 +54,7 @@ void TestClient::DoInit() {
|
|||
rpcInit.parent = this;
|
||||
// rpcInit.secret = (char*)secretEncrypt;
|
||||
// rpcInit.spi = 1;
|
||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
||||
taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
|
||||
|
||||
clientRpc = rpcOpen(&rpcInit);
|
||||
ASSERT(clientRpc);
|
||||
|
|
|
@ -47,6 +47,15 @@ bool mndUpdateArbGroupBySetAssignedLeader(SArbGroup *pGroup, int32_t vgId, char
|
|||
|
||||
int32_t mndGetArbGroupSize(SMnode *pMnode);
|
||||
|
||||
typedef enum {
|
||||
CHECK_SYNC_NONE = 0,
|
||||
CHECK_SYNC_SET_ASSIGNED_LEADER = 1,
|
||||
CHECK_SYNC_CHECK_SYNC = 2,
|
||||
CHECK_SYNC_UPDATE = 3
|
||||
} ECheckSyncOp;
|
||||
|
||||
void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -250,6 +250,12 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p
|
|||
pOld->assignedLeader.acked = pNew->assignedLeader.acked;
|
||||
pOld->version++;
|
||||
|
||||
mInfo(
|
||||
"arbgroup:%d, perform update action. members[0].token:%s, members[1].token:%s, isSync:%d, as-dnodeid:%d, "
|
||||
"as-token:%s, as-acked:%d, version:%" PRId64,
|
||||
pOld->vgId, pOld->members[0].state.token, pOld->members[1].state.token, pOld->isSync,
|
||||
pOld->assignedLeader.dnodeId, pOld->assignedLeader.token, pOld->assignedLeader.acked, pOld->version);
|
||||
|
||||
_OVER:
|
||||
(void)taosThreadMutexUnlock(&pOld->mutex);
|
||||
|
||||
|
@ -577,19 +583,77 @@ static int32_t mndSendArbSetAssignedLeaderReq(SMnode *pMnode, int32_t dnodeId, i
|
|||
return code;
|
||||
}
|
||||
|
||||
void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup) {
|
||||
*pOp = CHECK_SYNC_NONE;
|
||||
int32_t code = 0;
|
||||
|
||||
int32_t vgId = pArbGroup->vgId;
|
||||
|
||||
bool member0IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 0, nowMs);
|
||||
bool member1IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 1, nowMs);
|
||||
SArbAssignedLeader *pAssignedLeader = &pArbGroup->assignedLeader;
|
||||
int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId;
|
||||
|
||||
// 1. has assigned && no response => send req
|
||||
if (currentAssignedDnodeId != 0 && pAssignedLeader->acked == false) {
|
||||
*pOp = CHECK_SYNC_SET_ASSIGNED_LEADER;
|
||||
return;
|
||||
}
|
||||
|
||||
// 2. both of the two members are timeout => skip
|
||||
if (member0IsTimeout && member1IsTimeout) {
|
||||
return;
|
||||
}
|
||||
|
||||
// 3. no member is timeout => check sync
|
||||
if (member0IsTimeout == false && member1IsTimeout == false) {
|
||||
// no assigned leader and not sync
|
||||
if (currentAssignedDnodeId == 0 && !pArbGroup->isSync) {
|
||||
*pOp = CHECK_SYNC_CHECK_SYNC;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// 4. one of the members is timeout => set assigned leader
|
||||
int32_t candidateIndex = member0IsTimeout ? 1 : 0;
|
||||
SArbGroupMember *pMember = &pArbGroup->members[candidateIndex];
|
||||
|
||||
// has assigned leader and dnodeId not match => skip
|
||||
if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) {
|
||||
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId,
|
||||
pMember->info.dnodeId, currentAssignedDnodeId);
|
||||
return;
|
||||
}
|
||||
|
||||
// not sync => skip
|
||||
if (pArbGroup->isSync == false) {
|
||||
if (currentAssignedDnodeId == pMember->info.dnodeId) {
|
||||
mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
|
||||
pMember->info.dnodeId);
|
||||
} else {
|
||||
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
|
||||
pMember->info.dnodeId);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// is sync && no assigned leader => write to sdb
|
||||
mndArbGroupDupObj(pArbGroup, pNewGroup);
|
||||
mndArbGroupSetAssignedLeader(pNewGroup, candidateIndex);
|
||||
*pOp = CHECK_SYNC_UPDATE;
|
||||
}
|
||||
|
||||
static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
|
||||
int32_t code = 0;
|
||||
int32_t code = 0, lino = 0;
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SArbGroup *pArbGroup = NULL;
|
||||
SArbGroup arbGroupDup = {0};
|
||||
void *pIter = NULL;
|
||||
SArray *pUpdateArray = NULL;
|
||||
|
||||
char arbToken[TSDB_ARB_TOKEN_SIZE];
|
||||
if ((code = mndGetArbToken(pMnode, arbToken)) != 0) {
|
||||
mError("failed to get arb token for arb-check-sync timer");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
TAOS_CHECK_EXIT(mndGetArbToken(pMnode, arbToken));
|
||||
|
||||
int64_t term = mndGetTerm(pMnode);
|
||||
if (term < 0) {
|
||||
mError("arb failed to get term since %s", terrstr());
|
||||
|
@ -606,87 +670,63 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
SArray *pUpdateArray = taosArrayInit(16, sizeof(SArbGroup));
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_ARBGROUP, pIter, (void **)&pArbGroup);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
SArbGroup arbGroupDup = {0};
|
||||
|
||||
(void)taosThreadMutexLock(&pArbGroup->mutex);
|
||||
mndArbGroupDupObj(pArbGroup, &arbGroupDup);
|
||||
(void)taosThreadMutexUnlock(&pArbGroup->mutex);
|
||||
|
||||
int32_t vgId = arbGroupDup.vgId;
|
||||
|
||||
bool member0IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 0, nowMs);
|
||||
bool member1IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 1, nowMs);
|
||||
SArbAssignedLeader *pAssignedLeader = &arbGroupDup.assignedLeader;
|
||||
int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId;
|
||||
|
||||
// 1. has assigned && is sync && no response => send req
|
||||
if (currentAssignedDnodeId != 0 && arbGroupDup.isSync == true && pAssignedLeader->acked == false) {
|
||||
(void)mndSendArbSetAssignedLeaderReq(pMnode, currentAssignedDnodeId, vgId, arbToken, term,
|
||||
pAssignedLeader->token);
|
||||
mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, currentAssignedDnodeId);
|
||||
sdbRelease(pSdb, pArbGroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 2. both of the two members are timeout => skip
|
||||
if (member0IsTimeout && member1IsTimeout) {
|
||||
sdbRelease(pSdb, pArbGroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 3. no member is timeout => check sync
|
||||
if (member0IsTimeout == false && member1IsTimeout == false) {
|
||||
// no assigned leader and not sync
|
||||
if (currentAssignedDnodeId == 0 && !arbGroupDup.isSync) {
|
||||
(void)mndSendArbCheckSyncReq(pMnode, arbGroupDup.vgId, arbToken, term, arbGroupDup.members[0].state.token,
|
||||
arbGroupDup.members[1].state.token);
|
||||
}
|
||||
sdbRelease(pSdb, pArbGroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 4. one of the members is timeout => set assigned leader
|
||||
int32_t candidateIndex = member0IsTimeout ? 1 : 0;
|
||||
SArbGroupMember *pMember = &arbGroupDup.members[candidateIndex];
|
||||
|
||||
// has assigned leader and dnodeId not match => skip
|
||||
if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) {
|
||||
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId,
|
||||
pMember->info.dnodeId, currentAssignedDnodeId);
|
||||
sdbRelease(pSdb, pArbGroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
// not sync => skip
|
||||
if (arbGroupDup.isSync == false) {
|
||||
if (currentAssignedDnodeId == pMember->info.dnodeId) {
|
||||
mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
|
||||
pMember->info.dnodeId);
|
||||
} else {
|
||||
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
|
||||
pMember->info.dnodeId);
|
||||
}
|
||||
sdbRelease(pSdb, pArbGroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
// is sync && no assigned leader => write to sdb
|
||||
SArbGroup newGroup = {0};
|
||||
mndArbGroupDupObj(&arbGroupDup, &newGroup);
|
||||
mndArbGroupSetAssignedLeader(&newGroup, candidateIndex);
|
||||
if (taosArrayPush(pUpdateArray, &newGroup) == NULL) {
|
||||
taosArrayDestroy(pUpdateArray);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pArbGroup);
|
||||
|
||||
ECheckSyncOp op = CHECK_SYNC_NONE;
|
||||
SArbGroup newGroup = {0};
|
||||
mndArbCheckSync(&arbGroupDup, nowMs, &op, &newGroup);
|
||||
|
||||
int32_t vgId = arbGroupDup.vgId;
|
||||
SArbAssignedLeader *pAssgndLeader = &arbGroupDup.assignedLeader;
|
||||
int32_t assgndDnodeId = pAssgndLeader->dnodeId;
|
||||
|
||||
switch (op) {
|
||||
case CHECK_SYNC_NONE:
|
||||
mTrace("vgId:%d, arb skip to send msg by check sync", vgId);
|
||||
break;
|
||||
case CHECK_SYNC_SET_ASSIGNED_LEADER:
|
||||
(void)mndSendArbSetAssignedLeaderReq(pMnode, assgndDnodeId, vgId, arbToken, term, pAssgndLeader->token);
|
||||
mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, assgndDnodeId);
|
||||
break;
|
||||
case CHECK_SYNC_CHECK_SYNC:
|
||||
(void)mndSendArbCheckSyncReq(pMnode, vgId, arbToken, term, arbGroupDup.members[0].state.token,
|
||||
arbGroupDup.members[1].state.token);
|
||||
mInfo("vgId:%d, arb send check sync request", vgId);
|
||||
break;
|
||||
case CHECK_SYNC_UPDATE:
|
||||
if (!pUpdateArray) {
|
||||
pUpdateArray = taosArrayInit(16, sizeof(SArbGroup));
|
||||
if (!pUpdateArray) {
|
||||
TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
if (taosArrayPush(pUpdateArray, &newGroup) == NULL) {
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
mError("vgId:%d, arb unknown check sync op:%d", vgId, op);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TAOS_CHECK_RETURN(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray));
|
||||
TAOS_CHECK_EXIT(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray));
|
||||
|
||||
_exit:
|
||||
if (code != 0) {
|
||||
mError("failed to check sync at line %d since %s", lino, terrstr());
|
||||
}
|
||||
|
||||
taosArrayDestroy(pUpdateArray);
|
||||
return 0;
|
||||
|
|
|
@ -86,6 +86,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq);
|
|||
static int32_t mndProcessNotifyReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessStatisReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessAuditReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessUpdateDnodeInfoReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessCreateEncryptKeyReq(SRpcMsg *pRsp);
|
||||
static int32_t mndProcessCreateEncryptKeyRsp(SRpcMsg *pRsp);
|
||||
|
@ -125,6 +126,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
|
|||
mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_STATIS, mndProcessStatisReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_AUDIT, mndProcessAuditReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ENCRYPT_KEY, mndProcessCreateEncryptKeyReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_ENCRYPT_KEY_RSP, mndProcessCreateEncryptKeyRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_UPDATE_DNODE_INFO, mndProcessUpdateDnodeInfoReq);
|
||||
|
@ -604,6 +606,24 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndProcessAuditReq(SRpcMsg *pReq) {
|
||||
mTrace("process audit req:%p", pReq);
|
||||
if (tsEnableAudit && tsEnableAuditDelete) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SAuditReq auditReq = {0};
|
||||
|
||||
TAOS_CHECK_RETURN(tDeserializeSAuditReq(pReq->pCont, pReq->contLen, &auditReq));
|
||||
|
||||
mDebug("received audit req:%s, %s, %s, %s", auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql);
|
||||
|
||||
auditAddRecord(pReq, pMnode->clusterId, auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql,
|
||||
auditReq.sqlLen);
|
||||
|
||||
tFreeSAuditReq(&auditReq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndUpdateDnodeObj(SMnode *pMnode, SDnodeObj *pDnode) {
|
||||
int32_t code = 0, lino = 0;
|
||||
SDnodeInfoReq infoReq = {0};
|
||||
|
|
|
@ -1021,7 +1021,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
|
|||
}
|
||||
|
||||
// cluster info
|
||||
tstrncpy(pClusterInfo->version, version, sizeof(pClusterInfo->version));
|
||||
tstrncpy(pClusterInfo->version, td_version, sizeof(pClusterInfo->version));
|
||||
pClusterInfo->monitor_interval = tsMonitorInterval;
|
||||
pClusterInfo->connections_total = mndGetNumOfConnections(pMnode);
|
||||
pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB);
|
||||
|
|
|
@ -239,8 +239,8 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) {
|
||||
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version);
|
||||
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, td_version, 3)) != 0) {
|
||||
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, td_version);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -305,12 +305,13 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
|||
connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
|
||||
connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
|
||||
connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
|
||||
connectRsp.enableAuditDelete = tsEnableAuditDelete;
|
||||
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
|
||||
connectRsp.whiteListVer = pUser->ipWhiteListVer;
|
||||
|
||||
(void)strcpy(connectRsp.sVer, version);
|
||||
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version,
|
||||
buildinfo, gitinfo);
|
||||
tstrncpy(connectRsp.sVer, td_version, sizeof(connectRsp.sVer));
|
||||
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", td_version,
|
||||
td_buildinfo, td_gitinfo);
|
||||
mndGetMnodeEpSet(pMnode, &connectRsp.epSet);
|
||||
|
||||
int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp);
|
||||
|
@ -709,6 +710,7 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) {
|
|||
tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
|
||||
batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
|
||||
batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
|
||||
batchRsp.enableAuditDelete = tsEnableAuditDelete;
|
||||
|
||||
int32_t sz = taosArrayGetSize(batchReq.reqs);
|
||||
for (int i = 0; i < sz; i++) {
|
||||
|
@ -813,7 +815,7 @@ static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) {
|
|||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
SServerVerRsp rsp = {0};
|
||||
tstrncpy(rsp.ver, version, sizeof(rsp.ver));
|
||||
tstrncpy(rsp.ver, td_version, sizeof(rsp.ver));
|
||||
|
||||
int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp);
|
||||
if (contLen < 0) {
|
||||
|
|
|
@ -248,7 +248,7 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou
|
|||
return code;
|
||||
}
|
||||
|
||||
mDebug("doAddSinkTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
||||
mDebug("doAddSinkTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, isFillhistory);
|
||||
|
||||
pTask->info.nodeId = pVgroup->vgId;
|
||||
pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
|
@ -364,12 +364,13 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillh
|
|||
static void addNewTaskList(SStreamObj* pStream) {
|
||||
SArray* pTaskList = taosArrayInit(0, POINTER_BYTES);
|
||||
if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) {
|
||||
mError("failed to put array");
|
||||
mError("failed to put into array");
|
||||
}
|
||||
|
||||
if (pStream->conf.fillHistory) {
|
||||
pTaskList = taosArrayInit(0, POINTER_BYTES);
|
||||
if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) {
|
||||
mError("failed to put array");
|
||||
mError("failed to put into array");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -402,7 +403,8 @@ static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStre
|
|||
return code;
|
||||
}
|
||||
|
||||
mDebug("doAddSourceTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
||||
mDebug("doAddSourceTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId,
|
||||
isFillhistory);
|
||||
|
||||
if (pStream->conf.fillHistory) {
|
||||
haltInitialTaskStatus(pTask, plan, isFillhistory);
|
||||
|
@ -512,19 +514,20 @@ static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan,
|
|||
SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) {
|
||||
int32_t code = 0;
|
||||
SStreamTask* pTask = NULL;
|
||||
const char* id = NULL;
|
||||
|
||||
code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
id = pTask->id.idStr;
|
||||
if (pSnode != NULL) {
|
||||
code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode);
|
||||
mDebug("doAddAggTask taskId:%s, snode id:%d, isFillHistory:%d", pTask->id.idStr, pSnode->id, isFillhistory);
|
||||
|
||||
mDebug("doAddAggTask taskId:%s, %p snode id:%d, isFillHistory:%d", id, pTask, pSnode->id, isFillhistory);
|
||||
} else {
|
||||
code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup);
|
||||
mDebug("doAddAggTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
||||
mDebug("doAddAggTask taskId:%s, %p vgId:%d, isFillHistory:%d", id, pTask, pVgroup->vgId, isFillhistory);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -678,7 +681,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
|
|||
if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) {
|
||||
// add extra sink
|
||||
hasExtraSink = true;
|
||||
int32_t code = addSinkTask(pMnode, pStream, pEpset);
|
||||
code = addSinkTask(pMnode, pStream, pEpset);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -333,8 +333,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
|
|||
mDebug("show:0x%" PRIx64 ", stop retrieve data, rowsRead:%d numOfRows:%d", pShow->id, rowsRead, pShow->numOfRows);
|
||||
}
|
||||
|
||||
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns +
|
||||
blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock));
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + dataEncodeBufSize;
|
||||
|
||||
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
|
||||
if (pRsp == NULL) {
|
||||
|
@ -361,7 +361,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
|
|||
pStart += sizeof(SSysTableSchema);
|
||||
}
|
||||
|
||||
int32_t len = blockEncode(pBlock, pStart, pShow->pMeta->numOfColumns);
|
||||
int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, pShow->pMeta->numOfColumns);
|
||||
if(len < 0){
|
||||
mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, tstrerror(code));
|
||||
code = terrno;
|
||||
|
|
|
@ -1294,9 +1294,10 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
void* p = taosArrayPush(pList, &in);
|
||||
if (p) {
|
||||
int32_t currentSize = taosArrayGetSize(pList);
|
||||
mDebug("stream:%s (uid:0x%" PRIx64 ") checkpoint interval beyond threshold: %ds(%" PRId64
|
||||
"s) beyond concurrently launch threshold:%d",
|
||||
pStream->name, pStream->uid, tsStreamCheckpointInterval, duration / 1000, currentSize);
|
||||
mDebug("stream:%s (uid:0x%" PRIx64 ") total %d stream(s) beyond chpt interval threshold: %ds(%" PRId64
|
||||
"s), concurrently launch threshold:%d",
|
||||
pStream->name, pStream->uid, currentSize, tsStreamCheckpointInterval, duration / 1000,
|
||||
tsMaxConcurrentCheckpoint);
|
||||
} else {
|
||||
mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid);
|
||||
}
|
||||
|
@ -1348,7 +1349,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true);
|
||||
sdbRelease(pSdb, p);
|
||||
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
if (code == 0 || code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
started += 1;
|
||||
|
||||
if (started >= capacity) {
|
||||
|
@ -1356,6 +1357,8 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
|||
(started + numOfCheckpointTrans));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
mError("failed to start checkpoint trans, code:%s", tstrerror(code));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -115,9 +115,9 @@ static char* mndBuildTelemetryReport(SMnode* pMnode) {
|
|||
snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER);
|
||||
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), &lino, _OVER);
|
||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER);
|
||||
|
||||
mndBuildRuntimeInfo(pMnode, pJson);
|
||||
|
|
|
@ -80,17 +80,17 @@ TEST_F(ArbgroupTest, 01_encode_decode_sdb) {
|
|||
|
||||
SArbGroup* pNewGroup = (SArbGroup*)sdbGetRowObj(pRow);
|
||||
|
||||
EXPECT_EQ(group.vgId, pNewGroup->vgId);
|
||||
EXPECT_EQ(group.dbUid, pNewGroup->dbUid);
|
||||
EXPECT_EQ(group.members[0].info.dnodeId, pNewGroup->members[0].info.dnodeId);
|
||||
EXPECT_EQ(group.members[1].info.dnodeId, pNewGroup->members[1].info.dnodeId);
|
||||
EXPECT_EQ(group.isSync, pNewGroup->isSync);
|
||||
EXPECT_EQ(group.assignedLeader.dnodeId, pNewGroup->assignedLeader.dnodeId);
|
||||
ASSERT_EQ(group.vgId, pNewGroup->vgId);
|
||||
ASSERT_EQ(group.dbUid, pNewGroup->dbUid);
|
||||
ASSERT_EQ(group.members[0].info.dnodeId, pNewGroup->members[0].info.dnodeId);
|
||||
ASSERT_EQ(group.members[1].info.dnodeId, pNewGroup->members[1].info.dnodeId);
|
||||
ASSERT_EQ(group.isSync, pNewGroup->isSync);
|
||||
ASSERT_EQ(group.assignedLeader.dnodeId, pNewGroup->assignedLeader.dnodeId);
|
||||
|
||||
EXPECT_EQ(std::string(group.members[0].state.token), std::string(pNewGroup->members[0].state.token));
|
||||
EXPECT_EQ(std::string(group.members[1].state.token), std::string(pNewGroup->members[1].state.token));
|
||||
EXPECT_EQ(std::string(group.assignedLeader.token), std::string(pNewGroup->assignedLeader.token));
|
||||
EXPECT_EQ(group.version, pNewGroup->version);
|
||||
ASSERT_EQ(std::string(group.members[0].state.token), std::string(pNewGroup->members[0].state.token));
|
||||
ASSERT_EQ(std::string(group.members[1].state.token), std::string(pNewGroup->members[1].state.token));
|
||||
ASSERT_EQ(std::string(group.assignedLeader.token), std::string(pNewGroup->assignedLeader.token));
|
||||
ASSERT_EQ(group.version, pNewGroup->version);
|
||||
|
||||
taosMemoryFree(pRow);
|
||||
taosMemoryFree(pRaw);
|
||||
|
@ -129,9 +129,9 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) {
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup);
|
||||
|
||||
EXPECT_FALSE(updateToken);
|
||||
EXPECT_NE(group.members[0].state.responsedHbSeq, rspMember.hbSeq);
|
||||
EXPECT_NE(group.members[0].state.lastHbMs, nowMs);
|
||||
ASSERT_EQ(updateToken, false);
|
||||
ASSERT_NE(group.members[0].state.responsedHbSeq, rspMember.hbSeq);
|
||||
ASSERT_NE(group.members[0].state.lastHbMs, nowMs);
|
||||
}
|
||||
|
||||
{ // old token
|
||||
|
@ -144,9 +144,9 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) {
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup);
|
||||
|
||||
EXPECT_FALSE(updateToken);
|
||||
EXPECT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq);
|
||||
EXPECT_EQ(group.members[0].state.lastHbMs, nowMs);
|
||||
ASSERT_EQ(updateToken, false);
|
||||
ASSERT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq);
|
||||
ASSERT_EQ(group.members[0].state.lastHbMs, nowMs);
|
||||
}
|
||||
|
||||
{ // new token
|
||||
|
@ -159,14 +159,14 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) {
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup);
|
||||
|
||||
EXPECT_TRUE(updateToken);
|
||||
EXPECT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq);
|
||||
EXPECT_EQ(group.members[0].state.lastHbMs, nowMs);
|
||||
ASSERT_EQ(updateToken, true);
|
||||
ASSERT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq);
|
||||
ASSERT_EQ(group.members[0].state.lastHbMs, nowMs);
|
||||
|
||||
EXPECT_EQ(std::string(newGroup.members[0].state.token), std::string(rspMember.memberToken));
|
||||
EXPECT_FALSE(newGroup.isSync);
|
||||
EXPECT_EQ(newGroup.assignedLeader.dnodeId, 0);
|
||||
EXPECT_EQ(std::string(newGroup.assignedLeader.token).size(), 0);
|
||||
ASSERT_EQ(std::string(newGroup.members[0].state.token), std::string(rspMember.memberToken));
|
||||
ASSERT_EQ(newGroup.isSync, false);
|
||||
ASSERT_EQ(newGroup.assignedLeader.dnodeId, 0);
|
||||
ASSERT_EQ(std::string(newGroup.assignedLeader.token).size(), 0);
|
||||
}
|
||||
|
||||
taosThreadMutexDestroy(&group.mutex);
|
||||
|
@ -203,7 +203,7 @@ TEST_F(ArbgroupTest, 03_process_check_sync_rsp) {
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateIsSync = mndUpdateArbGroupByCheckSync(&group, vgId, member0Token, member1Token, newIsSync, &newGroup);
|
||||
|
||||
EXPECT_FALSE(updateIsSync);
|
||||
ASSERT_EQ(updateIsSync, false);
|
||||
}
|
||||
|
||||
{ // newIsSync
|
||||
|
@ -216,8 +216,8 @@ TEST_F(ArbgroupTest, 03_process_check_sync_rsp) {
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateIsSync = mndUpdateArbGroupByCheckSync(&group, vgId, member0Token, member1Token, newIsSync, &newGroup);
|
||||
|
||||
EXPECT_TRUE(updateIsSync);
|
||||
EXPECT_TRUE(newGroup.isSync);
|
||||
ASSERT_EQ(updateIsSync, true);
|
||||
ASSERT_EQ(newGroup.isSync, true);
|
||||
}
|
||||
|
||||
taosThreadMutexDestroy(&group.mutex);
|
||||
|
@ -254,7 +254,7 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup);
|
||||
|
||||
EXPECT_FALSE(updateAssigned);
|
||||
ASSERT_EQ(updateAssigned, false);
|
||||
}
|
||||
|
||||
{ // errcode != TSDB_CODE_SUCCESS
|
||||
|
@ -265,7 +265,7 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup);
|
||||
|
||||
EXPECT_FALSE(updateAssigned);
|
||||
ASSERT_EQ(updateAssigned, false);
|
||||
}
|
||||
|
||||
{ // errcode == TSDB_CODE_SUCCESS
|
||||
|
@ -276,11 +276,81 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){
|
|||
SArbGroup newGroup = {0};
|
||||
bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup);
|
||||
|
||||
EXPECT_TRUE(updateAssigned);
|
||||
EXPECT_FALSE(newGroup.isSync);
|
||||
ASSERT_EQ(updateAssigned, true);
|
||||
ASSERT_EQ(newGroup.isSync, false);
|
||||
}
|
||||
|
||||
taosThreadMutexDestroy(&group.mutex);
|
||||
}
|
||||
|
||||
TEST_F(ArbgroupTest, 05_check_sync_timer) {
|
||||
const int32_t assgndDnodeId = 1;
|
||||
const int32_t vgId = 5;
|
||||
const int64_t nowMs = 173044838300;
|
||||
|
||||
SArbGroup group = {0};
|
||||
group.vgId = vgId;
|
||||
group.dbUid = 1234;
|
||||
group.members[0].info.dnodeId = assgndDnodeId;
|
||||
group.members[0].state.lastHbMs = nowMs - 10;
|
||||
|
||||
group.members[1].info.dnodeId = 2;
|
||||
group.members[1].state.lastHbMs = nowMs - 10;
|
||||
|
||||
group.isSync = 1;
|
||||
taosThreadMutexInit(&group.mutex, NULL);
|
||||
|
||||
SArbAssignedLeader assgnedLeader = {0};
|
||||
assgnedLeader.dnodeId = assgndDnodeId;
|
||||
assgnedLeader.acked = false;
|
||||
strncpy(assgnedLeader.token, group.members[0].state.token, TSDB_ARB_TOKEN_SIZE);
|
||||
|
||||
SArbAssignedLeader noneAsgndLeader = {0};
|
||||
noneAsgndLeader.dnodeId = 0;
|
||||
noneAsgndLeader.acked = false;
|
||||
|
||||
ECheckSyncOp op = CHECK_SYNC_NONE;
|
||||
SArbGroup newGroup = {0};
|
||||
|
||||
// 1. asgnd,sync,noAck --> send set assigned
|
||||
group.assignedLeader = assgnedLeader;
|
||||
group.assignedLeader.acked = false;
|
||||
group.isSync = true;
|
||||
mndArbCheckSync(&group, nowMs, &op, &newGroup);
|
||||
|
||||
ASSERT_EQ(op, CHECK_SYNC_SET_ASSIGNED_LEADER);
|
||||
|
||||
// 2. asgnd,notSync,noAck --> send set assgnd
|
||||
newGroup = {0};
|
||||
group.assignedLeader = assgnedLeader;
|
||||
group.isSync = false;
|
||||
group.assignedLeader.acked = false;
|
||||
mndArbCheckSync(&group, nowMs, &op, &newGroup);
|
||||
|
||||
ASSERT_EQ(op, CHECK_SYNC_SET_ASSIGNED_LEADER);
|
||||
|
||||
// 3. noAsgnd,notSync,noAck(init) --> check sync
|
||||
newGroup = {0};
|
||||
group.assignedLeader = noneAsgndLeader;
|
||||
group.isSync = false;
|
||||
group.assignedLeader.acked = false;
|
||||
mndArbCheckSync(&group, nowMs, &op, &newGroup);
|
||||
|
||||
ASSERT_EQ(op, CHECK_SYNC_CHECK_SYNC);
|
||||
|
||||
// 4. noAsgnd,sync,noAck,one timeout--> update arbgroup (asgnd,sync,noAck)
|
||||
newGroup = {0};
|
||||
group.assignedLeader = noneAsgndLeader;
|
||||
group.isSync = true;
|
||||
group.assignedLeader.acked = false;
|
||||
group.members[1].state.lastHbMs = nowMs - 2 * tsArbSetAssignedTimeoutSec * 1000; // member1 timeout
|
||||
mndArbCheckSync(&group, nowMs, &op, &newGroup);
|
||||
|
||||
ASSERT_EQ(op, CHECK_SYNC_UPDATE);
|
||||
ASSERT_EQ(newGroup.assignedLeader.dnodeId, assgndDnodeId);
|
||||
ASSERT_EQ(std::string(newGroup.assignedLeader.token), std::string(group.members[0].state.token));
|
||||
ASSERT_EQ(newGroup.isSync, true);
|
||||
ASSERT_EQ(newGroup.assignedLeader.acked, false);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
|
|
@ -39,7 +39,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) {
|
|||
strcpy(connectReq.db, "");
|
||||
strcpy(connectReq.user, "root");
|
||||
strcpy(connectReq.passwd, secretEncrypt);
|
||||
strcpy(connectReq.sVer, version);
|
||||
strcpy(connectReq.sVer, td_version);
|
||||
|
||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
|
@ -76,7 +76,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) {
|
|||
strcpy(connectReq.db, "not_exist_db");
|
||||
strcpy(connectReq.user, "root");
|
||||
strcpy(connectReq.passwd, secretEncrypt);
|
||||
strcpy(connectReq.sVer, version);
|
||||
strcpy(connectReq.sVer, td_version);
|
||||
|
||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
|
|
|
@ -64,7 +64,7 @@ TEST_F(MndTestShow, 03_ShowMsg_Conn) {
|
|||
strcpy(connectReq.db, "");
|
||||
strcpy(connectReq.user, "root");
|
||||
strcpy(connectReq.passwd, secretEncrypt);
|
||||
strcpy(connectReq.sVer, version);
|
||||
strcpy(connectReq.sVer, td_version);
|
||||
|
||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
|
|
|
@ -38,24 +38,23 @@ int32_t sndBuildStreamTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProce
|
|||
streamTaskOpenAllUpstreamInput(pTask);
|
||||
|
||||
streamTaskResetUpstreamStageInfo(pTask);
|
||||
streamSetupScheduleTrigger(pTask);
|
||||
|
||||
SCheckpointInfo *pChkInfo = &pTask->chkInfo;
|
||||
tqSetRestoreVersionInfo(pTask);
|
||||
|
||||
char *p = streamTaskGetStatus(pTask).name;
|
||||
if (pTask->info.fillHistory) {
|
||||
sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms",
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||
(int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam);
|
||||
} else {
|
||||
sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms",
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam);
|
||||
}
|
||||
|
|
|
@ -238,13 +238,18 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
|
|||
}
|
||||
|
||||
static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTaskId *pId) {
|
||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||
SStreamTask *pTask = NULL;
|
||||
|
||||
streamMetaRLock(pMeta);
|
||||
SStreamTask **ppTask = (SStreamTask **)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask && *ppTask) {
|
||||
pItem->submitReqVer = (*ppTask)->chkInfo.checkpointVer;
|
||||
pItem->fetchResultVer = (*ppTask)->info.delaySchedParam;
|
||||
|
||||
int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||
if (code == 0) {
|
||||
pItem->submitReqVer = pTask->chkInfo.checkpointVer;
|
||||
pItem->fetchResultVer = pTask->info.delaySchedParam;
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
streamMetaRUnLock(pMeta);
|
||||
}
|
||||
|
||||
|
|
|
@ -765,7 +765,6 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
|
|||
}
|
||||
|
||||
streamTaskResetUpstreamStageInfo(pTask);
|
||||
streamSetupScheduleTrigger(pTask);
|
||||
|
||||
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
||||
tqSetRestoreVersionInfo(pTask);
|
||||
|
@ -774,19 +773,19 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
|
|||
const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus);
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, cur-status:%s, next-status:%s fill-history:%d, related stream task:0x%x "
|
||||
"delaySched:%" PRId64 " ms, inputVer:%" PRId64,
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
||||
(int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer);
|
||||
} else {
|
||||
tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x "
|
||||
"delaySched:%" PRId64 " ms, inputVer:%" PRId64,
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer);
|
||||
|
||||
|
|
|
@ -1113,12 +1113,20 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
break;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) {
|
||||
int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d, s-task:%s update qualified table error for stream task", vgId, pTask->id.idStr);
|
||||
continue;
|
||||
int64_t refId = *(int64_t*)pIter;
|
||||
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, refId);
|
||||
if (pTask != NULL) {
|
||||
int32_t taskId = pTask->id.taskId;
|
||||
|
||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) {
|
||||
int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d, s-task:0x%x update qualified table error for stream task", vgId, taskId);
|
||||
}
|
||||
}
|
||||
int32_t ret = taosReleaseRef(streamTaskRefPool, refId);
|
||||
if (ret) {
|
||||
tqError("vgId:%d release task refId failed, refId:%" PRId64, vgId, refId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,8 @@
|
|||
#include "tq.h"
|
||||
|
||||
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) {
|
||||
int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + blockGetEncodeSize(pBlock);
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeBufSize;
|
||||
void* buf = taosMemoryCalloc(1, dataStrLen);
|
||||
if (buf == NULL) {
|
||||
return terrno;
|
||||
|
@ -28,7 +29,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t
|
|||
pRetrieve->compressed = 0;
|
||||
pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows);
|
||||
|
||||
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols);
|
||||
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeBufSize, numOfCols);
|
||||
if(actualLen < 0){
|
||||
taosMemoryFree(buf);
|
||||
return terrno;
|
||||
|
|
|
@ -79,7 +79,7 @@ static void doStartScanWal(void* param, void* tmrId) {
|
|||
|
||||
SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param;
|
||||
|
||||
SStreamMeta* pMeta = taosAcquireRef(streamMetaId, pParam->metaId);
|
||||
SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, pParam->metaId);
|
||||
if (pMeta == NULL) {
|
||||
tqError("metaRid:%" PRId64 " not valid now, stream meta has been freed", pParam->metaId);
|
||||
taosMemoryFree(pParam);
|
||||
|
@ -97,7 +97,7 @@ static void doStartScanWal(void* param, void* tmrId) {
|
|||
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
|
||||
}
|
||||
|
||||
code = taosReleaseRef(streamMetaId, pParam->metaId);
|
||||
code = taosReleaseRef(streamMetaRefPool, pParam->metaId);
|
||||
if (code) {
|
||||
tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId,
|
||||
tstrerror(code));
|
||||
|
|
|
@ -628,6 +628,9 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void*
|
|||
tmp = taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
TSDB_CHECK_NULL(tmp, code, line, END, terrno)
|
||||
colDataSetNULL(tmp, i);
|
||||
tmp = taosArrayGet(pDelBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
|
||||
TSDB_CHECK_NULL(tmp, code, line, END, terrno)
|
||||
colDataSetNULL(tmp, i);
|
||||
}
|
||||
|
||||
if (type == 0) {
|
||||
|
@ -685,19 +688,21 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
|
|||
continue;
|
||||
}
|
||||
|
||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask == NULL) {
|
||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||
SStreamTask* pTask = NULL;
|
||||
|
||||
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d failed to acquire task:0x%x in retrieving progress", pMeta->vgId, pId->taskId);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((*ppTask)->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||
if (pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
// here we get the required stream source task
|
||||
SStreamTask* pTask = *ppTask;
|
||||
*fhFinished = !HAS_RELATED_FILLHISTORY_TASK(pTask);
|
||||
|
||||
int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||
|
@ -713,6 +718,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
|
|||
SWalReader* pReader = walOpenReader(pTask->exec.pWalReader->pWal, NULL, 0);
|
||||
if (pReader == NULL) {
|
||||
tqError("failed to open wal reader to extract exec progress, vgId:%d", pMeta->vgId);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -738,6 +744,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
|
|||
}
|
||||
|
||||
walCloseReader(pReader);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
streamMetaRUnLock(pMeta);
|
||||
|
|
|
@ -138,13 +138,15 @@ int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream
|
|||
|
||||
// this is to process request from transaction, always return true.
|
||||
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||
int64_t st = taosGetTimestampMs();
|
||||
bool updated = false;
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||
int64_t st = taosGetTimestampMs();
|
||||
bool updated = false;
|
||||
int32_t code = 0;
|
||||
SStreamTask* pTask = NULL;
|
||||
SStreamTask* pHTask = NULL;
|
||||
|
||||
SStreamTaskNodeUpdateMsg req = {0};
|
||||
|
||||
|
@ -170,9 +172,9 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
streamMetaWLock(pMeta);
|
||||
|
||||
// the task epset may be updated again and again, when replaying the WAL, the task may be in stop status.
|
||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask == NULL || *ppTask == NULL) {
|
||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d failed to acquire task:0x%x when handling update task epset, it may have been dropped", vgId,
|
||||
req.taskId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
|
@ -181,12 +183,13 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
return rsp.code;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *ppTask;
|
||||
const char* idstr = pTask->id.idStr;
|
||||
const char* idstr = pTask->id.idStr;
|
||||
|
||||
if (req.transId <= 0) {
|
||||
tqError("vgId:%d invalid update nodeEp task, transId:%d, discard", vgId, req.taskId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
|
@ -197,6 +200,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
bool update = streamMetaInitUpdateTaskList(pMeta, req.transId);
|
||||
if (!update) {
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
|
@ -211,7 +216,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
tqDebug("s-task:%s (vgId:%d) already update in transId:%d, discard the nodeEp update msg", idstr, vgId,
|
||||
req.transId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
@ -227,24 +235,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
|
||||
streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, pTask->id.idStr);
|
||||
|
||||
SStreamTask** ppHTask = NULL;
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||
code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHTask);
|
||||
if (code != 0) {
|
||||
tqError(
|
||||
"vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel "
|
||||
"stream task:0x%x",
|
||||
vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId);
|
||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||
} else {
|
||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||
bool updateEpSet = streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", pHTask->id.idStr);
|
||||
bool updateEpSet = streamTaskUpdateEpsetInfo(pHTask, req.pNodeList);
|
||||
if (updateEpSet) {
|
||||
updated = updateEpSet;
|
||||
}
|
||||
|
||||
streamTaskResetStatus(*ppHTask);
|
||||
streamTaskStopMonitorCheckRsp(&(*ppHTask)->taskCheckInfo, (*ppHTask)->id.idStr);
|
||||
streamTaskResetStatus(pHTask);
|
||||
streamTaskStopMonitorCheckRsp(&pHTask->taskCheckInfo, pHTask->id.idStr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,8 +263,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
tqError("s-task:%s vgId:%d failed to save task, code:%s", idstr, vgId, tstrerror(code));
|
||||
}
|
||||
|
||||
if (ppHTask != NULL) {
|
||||
code = streamMetaSaveTask(pMeta, *ppHTask);
|
||||
if (pHTask != NULL) {
|
||||
code = streamMetaSaveTask(pMeta, pHTask);
|
||||
if (code) {
|
||||
tqError("s-task:%s vgId:%d failed to save related history task, code:%s", idstr, vgId, tstrerror(code));
|
||||
}
|
||||
|
@ -271,15 +278,17 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
tqError("s-task:%s vgId:%d failed to stop task, code:%s", idstr, vgId, tstrerror(code));
|
||||
}
|
||||
|
||||
if (ppHTask != NULL) {
|
||||
code = streamTaskStop(*ppHTask);
|
||||
if (pHTask != NULL) {
|
||||
code = streamTaskStop(pHTask);
|
||||
if (code) {
|
||||
tqError("s-task:%s vgId:%d failed to stop related history task, code:%s", idstr, vgId, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
// keep info
|
||||
streamMetaAddIntoUpdateTaskList(pMeta, pTask, (ppHTask != NULL) ? (*ppHTask) : NULL, req.transId, st);
|
||||
streamMetaAddIntoUpdateTaskList(pMeta, pTask, (pHTask != NULL) ? (pHTask) : NULL, req.transId, st);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
streamMetaReleaseTask(pMeta, pHTask);
|
||||
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -643,7 +652,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
|||
if (code < 0) {
|
||||
tqError("failed to add s-task:0x%x into vgId:%d meta, existed:%d, code:%s", vgId, taskId, numOfTasks,
|
||||
tstrerror(code));
|
||||
tFreeStreamTask(pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -673,7 +681,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
|||
}
|
||||
} else {
|
||||
tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store, total:%d", vgId, taskId, numOfTasks);
|
||||
tFreeStreamTask(pTask);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -681,25 +688,25 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
|||
|
||||
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
|
||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
STaskId hTaskId = {0};
|
||||
SStreamTask* pTask = NULL;
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
STaskId hTaskId = {0};
|
||||
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if ((ppTask != NULL) && ((*ppTask) != NULL)) {
|
||||
int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask);
|
||||
SStreamTask* pTask = *ppTask;
|
||||
|
||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||
if (code == 0) {
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
hTaskId.streamId = pTask->hTaskInfo.id.streamId;
|
||||
hTaskId.taskId = pTask->hTaskInfo.id.taskId;
|
||||
}
|
||||
|
||||
// clear the relationship, and then release the stream tasks, to avoid invalid accessing of already freed
|
||||
// related stream(history) task
|
||||
streamTaskSetRemoveBackendFiles(pTask);
|
||||
code = streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
@ -742,18 +749,19 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
|||
|
||||
int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored, char* msg) {
|
||||
SVUpdateCheckpointInfoReq* pReq = (SVUpdateCheckpointInfoReq*)msg;
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
SStreamTask* pTask = NULL;
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
tqDebug("vgId:%d receive msg to update-checkpoint-info for s-task:0x%x", vgId, pReq->taskId);
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
|
||||
if (ppTask != NULL && (*ppTask) != NULL) {
|
||||
code = streamTaskUpdateTaskCheckpointInfo(*ppTask, restored, pReq);
|
||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||
if (code == 0) {
|
||||
code = streamTaskUpdateTaskCheckpointInfo(pTask, restored, pReq);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
} else { // failed to get the task.
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
tqError(
|
||||
|
@ -763,7 +771,6 @@ int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored
|
|||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// always return success when handling the requirement issued by mnode during transaction.
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -789,11 +796,6 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
|||
tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d, ts:%" PRId64, vgId,
|
||||
pMeta->updateInfo.completeTransId, pMeta->updateInfo.completeTs);
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
streamMetaClear(pMeta);
|
||||
|
||||
|
|
|
@ -22,13 +22,16 @@
|
|||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
#if defined(USE_S3)
|
||||
|
||||
#include <azure/core.hpp>
|
||||
#include <azure/storage/blobs.hpp>
|
||||
#include "td_block_blob_client.hpp"
|
||||
|
||||
// Add appropriate using namespace directives
|
||||
using namespace Azure::Storage;
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
|
@ -40,10 +43,6 @@ extern char tsS3BucketName[TSDB_FQDN_LEN];
|
|||
extern int8_t tsS3Enabled;
|
||||
extern int8_t tsS3EpNum;
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
static void checkPrint(const char *fmt, ...) {
|
||||
va_list arg_ptr;
|
||||
va_start(arg_ptr, fmt);
|
||||
|
@ -223,7 +222,6 @@ static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *objec
|
|||
uint8_t blobContent[] = "Hello Azure!";
|
||||
// Create the block blob client
|
||||
// BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||
// TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName));
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
blobClient.UploadFrom(file, offset, size);
|
||||
|
@ -467,7 +465,7 @@ int32_t azGetObjectToFile(const char *object_name, const char *fileName) {
|
|||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
|
||||
static int32_t azGetObjectsByPrefixImpl(const char *prefix, const char *path) {
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
|
@ -514,6 +512,23 @@ int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
|
||||
int32_t code = 0;
|
||||
|
||||
try {
|
||||
code = azGetObjectsByPrefixImpl(prefix, path);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject) {
|
||||
for (int i = 0; i < nobject; ++i) {
|
||||
azDeleteObjectsByPrefix(object_name[i]);
|
||||
|
@ -524,10 +539,6 @@ int32_t azDeleteObjects(const char *object_name[], int nobject) {
|
|||
|
||||
#else
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
|
|
|
@ -35,7 +35,8 @@
|
|||
extern SConfig* tsCfg;
|
||||
|
||||
static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) {
|
||||
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
|
||||
*pRsp = taosMemoryCalloc(1, rspSize);
|
||||
if (NULL == *pRsp) {
|
||||
return terrno;
|
||||
|
@ -49,8 +50,8 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe
|
|||
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
|
||||
(*pRsp)->numOfCols = htonl(numOfCols);
|
||||
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, numOfCols);
|
||||
if (len < 0) {
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols);
|
||||
if(len < 0) {
|
||||
taosMemoryFree(*pRsp);
|
||||
return terrno;
|
||||
}
|
||||
|
|
|
@ -1966,7 +1966,8 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
|
|||
|
||||
pBlock->info.rows = rowNum;
|
||||
|
||||
int32_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
int32_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
|
||||
|
||||
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, rspSize);
|
||||
if (NULL == rsp) {
|
||||
|
@ -1977,7 +1978,7 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
|
|||
rsp->completed = 1;
|
||||
rsp->numOfRows = htobe64((int64_t)rowNum);
|
||||
|
||||
int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, taosArrayGetSize(pBlock->pDataBlock));
|
||||
int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, taosArrayGetSize(pBlock->pDataBlock));
|
||||
if(len < 0) {
|
||||
qError("qExplainGetRspFromCtx: blockEncode failed");
|
||||
QRY_ERR_JRET(terrno);
|
||||
|
|
|
@ -553,6 +553,7 @@ typedef struct SIntervalAggOperatorInfo {
|
|||
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SArray* pPrevValues; // SArray<SGroupKeys> used to keep the previous not null value for interpolation.
|
||||
bool cleanGroupResInfo;
|
||||
struct SOperatorInfo* pOperator;
|
||||
// for limit optimization
|
||||
bool limited;
|
||||
|
@ -831,8 +832,10 @@ void cleanupExprSupp(SExprSupp* pSup);
|
|||
|
||||
void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp* pSup,
|
||||
SGroupResInfo* pGroupResInfo);
|
||||
void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap);
|
||||
void cleanupResultInfoInHashMap(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap);
|
||||
void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SGroupResInfo* pGroupResInfo,
|
||||
SAggSupporter *pAggSup, bool cleanHashmap);
|
||||
void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo);
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ typedef struct SAggOperatorInfo {
|
|||
SSDataBlock* pNewGroupBlock;
|
||||
bool hasCountFunc;
|
||||
SOperatorInfo* pOperator;
|
||||
bool cleanGroupResInfo;
|
||||
} SAggOperatorInfo;
|
||||
|
||||
static void destroyAggOperatorInfo(void* param);
|
||||
|
@ -121,6 +122,7 @@ int32_t createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pA
|
|||
pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder;
|
||||
pInfo->hasCountFunc = pAggNode->hasCountLikeFunc;
|
||||
pInfo->pOperator = pOperator;
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
|
||||
setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
|
||||
!pAggNode->node.forceCreateNonBlockingOptr, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
@ -159,8 +161,8 @@ void destroyAggOperatorInfo(void* param) {
|
|||
cleanupBasicInfo(&pInfo->binfo);
|
||||
|
||||
if (pInfo->pOperator) {
|
||||
cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf,
|
||||
&pInfo->groupResInfo);
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup,
|
||||
pInfo->cleanGroupResInfo);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
|
@ -191,6 +193,7 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) {
|
|||
int32_t order = pAggInfo->binfo.inputTsOrder;
|
||||
SSDataBlock* pBlock = pAggInfo->pNewGroupBlock;
|
||||
|
||||
pAggInfo->cleanGroupResInfo = false;
|
||||
if (pBlock) {
|
||||
pAggInfo->pNewGroupBlock = NULL;
|
||||
tSimpleHashClear(pAggInfo->aggSup.pResultRowHashTable);
|
||||
|
@ -263,6 +266,7 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) {
|
|||
|
||||
code = initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pAggInfo->cleanGroupResInfo = true;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -627,7 +631,7 @@ void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp
|
|||
}
|
||||
}
|
||||
|
||||
void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
void cleanupResultInfoInGroupResInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo) {
|
||||
int32_t numOfExprs = pSup->numOfExprs;
|
||||
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
|
||||
|
@ -663,7 +667,7 @@ void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDi
|
|||
}
|
||||
}
|
||||
|
||||
void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
void cleanupResultInfoInHashMap(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap) {
|
||||
int32_t numOfExprs = pSup->numOfExprs;
|
||||
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
|
||||
|
@ -701,6 +705,14 @@ void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf*
|
|||
}
|
||||
}
|
||||
|
||||
void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SGroupResInfo* pGroupResInfo,
|
||||
SAggSupporter *pAggSup, bool cleanGroupResInfo) {
|
||||
if (cleanGroupResInfo) {
|
||||
cleanupResultInfoInGroupResInfo(pTaskInfo, pSup, pAggSup->pResultBuf, pGroupResInfo);
|
||||
} else {
|
||||
cleanupResultInfoInHashMap(pTaskInfo, pSup, pAggSup->pResultBuf, pGroupResInfo, pAggSup->pResultRowHashTable);
|
||||
}
|
||||
}
|
||||
void cleanupAggSup(SAggSupporter* pAggSup) {
|
||||
taosMemoryFreeClear(pAggSup->keyBuf);
|
||||
tSimpleHashCleanup(pAggSup->pResultRowHashTable);
|
||||
|
|
|
@ -45,6 +45,7 @@ typedef struct SDataDispatchHandle {
|
|||
SDataBlockDescNode* pSchema;
|
||||
STaosQueue* pDataBlocks;
|
||||
SDataDispatchBuf nextOutput;
|
||||
int32_t outPutColCounts;
|
||||
int32_t status;
|
||||
bool queryEnd;
|
||||
uint64_t useconds;
|
||||
|
@ -54,6 +55,65 @@ typedef struct SDataDispatchHandle {
|
|||
TdThreadMutex mutex;
|
||||
} SDataDispatchHandle;
|
||||
|
||||
static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* pInput) {
|
||||
if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (pInput == NULL || pInput->pData == NULL || pInput->pData->info.rows <= 0) {
|
||||
qError("invalid input data");
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
SDataBlockDescNode* pSchema = pHandle->pSchema;
|
||||
if (pSchema == NULL || pSchema->totalRowSize != pInput->pData->info.rowSize) {
|
||||
qError("invalid schema");
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
if (pHandle->outPutColCounts > taosArrayGetSize(pInput->pData->pDataBlock)) {
|
||||
qError("invalid column number, schema:%d, input:%zu", pHandle->outPutColCounts, taosArrayGetSize(pInput->pData->pDataBlock));
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
SNode* pNode;
|
||||
int32_t colNum = 0;
|
||||
FOREACH(pNode, pHandle->pSchema->pSlots) {
|
||||
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
|
||||
if (pSlotDesc->output) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pInput->pData->pDataBlock, colNum);
|
||||
if (pColInfoData == NULL) {
|
||||
return -1;
|
||||
}
|
||||
if (pColInfoData->info.bytes < 0) {
|
||||
qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (!IS_VAR_DATA_TYPE(pColInfoData->info.type) &&
|
||||
TYPE_BYTES[pColInfoData->info.type] != pColInfoData->info.bytes) {
|
||||
qError("invalid column bytes, schema:%d, input:%d", TYPE_BYTES[pColInfoData->info.type],
|
||||
pColInfoData->info.bytes);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (pColInfoData->info.type != pSlotDesc->dataType.type) {
|
||||
qError("invalid column type, schema:%d, input:%d", pSlotDesc->dataType.type, pColInfoData->info.type);
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
if (pColInfoData->info.bytes != pSlotDesc->dataType.bytes) {
|
||||
qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes);
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
if (IS_INVALID_TYPE(pColInfoData->info.type)) {
|
||||
qError("invalid column type, type:%d", pColInfoData->info.type);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
++colNum;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
// data format:
|
||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
|
@ -67,6 +127,12 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
|
|||
int32_t numOfCols = 0;
|
||||
SNode* pNode;
|
||||
|
||||
int32_t code = inputSafetyCheck(pHandle, pInput);
|
||||
if (code) {
|
||||
qError("failed to check input data, code:%d", code);
|
||||
return code;
|
||||
}
|
||||
|
||||
FOREACH(pNode, pHandle->pSchema->pSlots) {
|
||||
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
|
||||
if (pSlotDesc->output) {
|
||||
|
@ -84,17 +150,18 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
|
|||
pBuf->useSize = sizeof(SDataCacheEntry);
|
||||
|
||||
{
|
||||
// allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size
|
||||
size_t dataEncodeBufSize = pBuf->allocSize + 8;
|
||||
if ((pBuf->allocSize > tsCompressMsgSize) && (tsCompressMsgSize > 0) && pHandle->pManager->cfg.compress) {
|
||||
if (pHandle->pCompressBuf == NULL) {
|
||||
// allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size
|
||||
pHandle->pCompressBuf = taosMemoryMalloc(pBuf->allocSize + 8);
|
||||
pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeBufSize);
|
||||
if (NULL == pHandle->pCompressBuf) {
|
||||
QRY_RET(terrno);
|
||||
}
|
||||
pHandle->bufSize = pBuf->allocSize + 8;
|
||||
pHandle->bufSize = dataEncodeBufSize;
|
||||
} else {
|
||||
if (pHandle->bufSize < pBuf->allocSize + 8) {
|
||||
pHandle->bufSize = pBuf->allocSize + 8;
|
||||
if (pHandle->bufSize < dataEncodeBufSize) {
|
||||
pHandle->bufSize = dataEncodeBufSize;
|
||||
void* p = taosMemoryRealloc(pHandle->pCompressBuf, pHandle->bufSize);
|
||||
if (p != NULL) {
|
||||
pHandle->pCompressBuf = p;
|
||||
|
@ -105,7 +172,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
|
|||
}
|
||||
}
|
||||
|
||||
int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, numOfCols);
|
||||
int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, dataEncodeBufSize, numOfCols);
|
||||
if(dataLen < 0) {
|
||||
qError("failed to encode data block, code: %d", dataLen);
|
||||
return terrno;
|
||||
|
@ -123,7 +190,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData*
|
|||
TAOS_MEMCPY(pEntry->data, pHandle->pCompressBuf, dataLen);
|
||||
}
|
||||
} else {
|
||||
pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, numOfCols);
|
||||
pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, pBuf->allocSize, numOfCols);
|
||||
if(pEntry->dataLen < 0) {
|
||||
qError("failed to encode data block, code: %d", pEntry->dataLen);
|
||||
return terrno;
|
||||
|
@ -314,8 +381,60 @@ static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t blockDescNodeCheck(SDataBlockDescNode* pInputDataBlockDesc) {
|
||||
if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pInputDataBlockDesc == NULL) {
|
||||
qError("invalid schema");
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
SNode* pNode;
|
||||
int32_t realOutputRowSize = 0;
|
||||
FOREACH(pNode, pInputDataBlockDesc->pSlots) {
|
||||
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
|
||||
if (pSlotDesc->output) {
|
||||
realOutputRowSize += pSlotDesc->dataType.bytes;
|
||||
} else {
|
||||
// Slots must be sorted, and slots with 'output' set to true must come first
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (realOutputRowSize != pInputDataBlockDesc->outputRowSize) {
|
||||
qError("invalid schema, realOutputRowSize:%d, outputRowSize:%d", realOutputRowSize, pInputDataBlockDesc->outputRowSize);
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getOutputColCounts(SDataBlockDescNode* pInputDataBlockDesc) {
|
||||
if (pInputDataBlockDesc == NULL) {
|
||||
qError("invalid schema");
|
||||
return 0;
|
||||
}
|
||||
SNode* pNode;
|
||||
int32_t numOfCols = 0;
|
||||
FOREACH(pNode, pInputDataBlockDesc->pSlots) {
|
||||
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
|
||||
if (pSlotDesc->output) {
|
||||
++numOfCols;
|
||||
} else {
|
||||
// Slots must be sorted, and slots with 'output' set to true must come first
|
||||
break;
|
||||
}
|
||||
}
|
||||
return numOfCols;
|
||||
}
|
||||
|
||||
int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) {
|
||||
int32_t code;
|
||||
code = blockDescNodeCheck(pDataSink->pInputDataBlockDesc);
|
||||
if (code) {
|
||||
qError("failed to check input data block desc, code:%d", code);
|
||||
return code;
|
||||
}
|
||||
|
||||
SDataDispatchHandle* dispatcher = taosMemoryCalloc(1, sizeof(SDataDispatchHandle));
|
||||
if (NULL == dispatcher) {
|
||||
|
@ -333,6 +452,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD
|
|||
dispatcher->pManager = pManager;
|
||||
pManager = NULL;
|
||||
dispatcher->pSchema = pDataSink->pInputDataBlockDesc;
|
||||
dispatcher->outPutColCounts = getOutputColCounts(dispatcher->pSchema);
|
||||
dispatcher->status = DS_BUF_EMPTY;
|
||||
dispatcher->queryEnd = false;
|
||||
code = taosOpenQueue(&dispatcher->pDataBlocks);
|
||||
|
|
|
@ -528,7 +528,12 @@ static void seqJoinLaunchNewRetrieveImpl(SOperatorInfo* pOperator, SSDataBlock**
|
|||
qDebug("%s dynamic post task begin", GET_TASKID(pOperator->pTaskInfo));
|
||||
code = pOperator->pDownstream[1]->fpSet.getNextExtFn(pOperator->pDownstream[1], pParam, ppRes);
|
||||
if (*ppRes && (code == 0)) {
|
||||
blockDataCheck(*ppRes, false);
|
||||
code = blockDataCheck(*ppRes);
|
||||
if (code) {
|
||||
qError("Invalid block data, blockDataCheck failed, error:%s", tstrerror(code));
|
||||
pOperator->pTaskInfo->code = code;
|
||||
T_LONG_JMP(pOperator->pTaskInfo->env, pOperator->pTaskInfo->code);
|
||||
}
|
||||
pPost->isStarted = true;
|
||||
pStbJoin->execInfo.postBlkNum++;
|
||||
pStbJoin->execInfo.postBlkRows += (*ppRes)->info.rows;
|
||||
|
|
|
@ -390,6 +390,7 @@ SSDataBlock* createDataBlockFromDescNode(SDataBlockDescNode* pNode) {
|
|||
createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId);
|
||||
idata.info.scale = pDescNode->dataType.scale;
|
||||
idata.info.precision = pDescNode->dataType.precision;
|
||||
idata.info.noData = pDescNode->reserve;
|
||||
|
||||
code = blockDataAppendColInfo(pBlock, &idata);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -700,12 +700,12 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
|
|||
if (pTaskInfo->pOpParam && !pTaskInfo->paramSet) {
|
||||
pTaskInfo->paramSet = true;
|
||||
code = pTaskInfo->pRoot->fpSet.getNextExtFn(pTaskInfo->pRoot, pTaskInfo->pOpParam, &pRes);
|
||||
blockDataCheck(pRes, false);
|
||||
} else {
|
||||
code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes);
|
||||
blockDataCheck(pRes, false);
|
||||
}
|
||||
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = blockDataCheck(pRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
if (pRes == NULL) {
|
||||
|
@ -750,7 +750,8 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
|
|||
}
|
||||
|
||||
code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes);
|
||||
blockDataCheck(pRes, false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = blockDataCheck(pRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
|
@ -849,7 +850,11 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
|
|||
qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo));
|
||||
}
|
||||
|
||||
blockDataCheck(*pRes, false);
|
||||
code = blockDataCheck(*pRes);
|
||||
if (code) {
|
||||
pTaskInfo->code = code;
|
||||
qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo));
|
||||
}
|
||||
|
||||
uint64_t el = (taosGetTimestampUs() - st);
|
||||
|
||||
|
|
|
@ -616,11 +616,12 @@ int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* p
|
|||
}
|
||||
}
|
||||
}
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
|
||||
code = blockDataCheck(pBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _err);
|
||||
_err:
|
||||
blockDataCheck(pBlock, true);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
colDataDestroy(p);
|
||||
taosMemoryFree(p);
|
||||
return code;
|
||||
|
@ -701,7 +702,7 @@ int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResu
|
|||
QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno);
|
||||
char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
|
||||
for (int32_t k = 0; k < pRow->numOfRows; ++k) {
|
||||
code = colDataSetVal(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
|
||||
code = colDataSetValOrCover(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -765,7 +765,7 @@ static FORCE_INLINE int32_t getBlkFromDownstreamOperator(struct SOperatorInfo* p
|
|||
}
|
||||
}
|
||||
|
||||
blockDataCheck(pBlock, false);
|
||||
code = blockDataCheck(pBlock);
|
||||
|
||||
*ppRes = pBlock;
|
||||
return code;
|
||||
|
|
|
@ -88,8 +88,8 @@ static void destroyGroupOperatorInfo(void* param) {
|
|||
cleanupExprSupp(&pInfo->scalarSup);
|
||||
|
||||
if (pInfo->pOperator != NULL) {
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf,
|
||||
&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable);
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup,
|
||||
false);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,11 +65,14 @@ static int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock);
|
|||
|
||||
int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*)param;
|
||||
int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock);
|
||||
blockDataCheck(*ppBlock, false);
|
||||
int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock);
|
||||
if (code) {
|
||||
qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code));
|
||||
}
|
||||
code = blockDataCheck(*ppBlock);
|
||||
if (code) {
|
||||
qError("failed to check data block got from upstream, %s code:%s", __func__, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -526,7 +529,8 @@ int32_t doMultiwayMerge(SOperatorInfo* pOperator, SSDataBlock** pResBlock) {
|
|||
|
||||
if ((*pResBlock) != NULL) {
|
||||
pOperator->resultInfo.totalRows += (*pResBlock)->info.rows;
|
||||
blockDataCheck(*pResBlock, false);
|
||||
code = blockDataCheck(*pResBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
} else {
|
||||
setOperatorCompleted(pOperator);
|
||||
}
|
||||
|
|
|
@ -870,15 +870,25 @@ int32_t setOperatorParams(struct SOperatorInfo* pOperator, SOperatorParam* pInpu
|
|||
|
||||
SSDataBlock* getNextBlockFromDownstream(struct SOperatorInfo* pOperator, int32_t idx) {
|
||||
SSDataBlock* p = NULL;
|
||||
int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p);
|
||||
blockDataCheck(p, false);
|
||||
return (code == 0)? p:NULL;
|
||||
int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = blockDataCheck(p);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("blockDataCheck failed, code:%s", tstrerror(code));
|
||||
}
|
||||
}
|
||||
return (code == 0) ? p : NULL;
|
||||
}
|
||||
|
||||
SSDataBlock* getNextBlockFromDownstreamRemain(struct SOperatorInfo* pOperator, int32_t idx) {
|
||||
SSDataBlock* p = NULL;
|
||||
int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p);
|
||||
blockDataCheck(p, false);
|
||||
int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = blockDataCheck(p);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("blockDataCheck failed, code:%s", tstrerror(code));
|
||||
}
|
||||
}
|
||||
return (code == 0)? p:NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1461,6 +1461,18 @@ static void destroyTableScanOperatorInfo(void* param) {
|
|||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
static void resetClolumnReserve(SSDataBlock* pBlock, int32_t dataRequireFlag) {
|
||||
if (pBlock && dataRequireFlag == FUNC_DATA_REQUIRED_NOT_LOAD) {
|
||||
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);
|
||||
if (pCol) {
|
||||
pCol->info.noData = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
|
||||
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo,
|
||||
SOperatorInfo** pOptrInfo) {
|
||||
|
@ -1511,6 +1523,7 @@ int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHa
|
|||
pInfo->base.readerAPI = pTaskInfo->storageAPI.tsdReader;
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
|
||||
resetClolumnReserve(pInfo->pResBlock, pInfo->base.dataBlockLoadFlag);
|
||||
QUERY_CHECK_NULL(pInfo->pResBlock, code, lino, _error, terrno);
|
||||
|
||||
code = prepareDataBlockBuf(pInfo->pResBlock, &pInfo->base.matchInfo);
|
||||
|
@ -2102,6 +2115,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
|
|||
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
|
||||
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
|
||||
uint64_t groupId = pSrcGp[i];
|
||||
if (groupId == 0) {
|
||||
|
@ -2139,7 +2153,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
|
|||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
colDataSetNULL(pDestCalStartTsCol, i);
|
||||
colDataSetNULL(pDestCalEndTsCol, i);
|
||||
colDataSetNULL(pDestTableNameInxCol, i);
|
||||
pDestBlock->info.rows++;
|
||||
}
|
||||
|
||||
|
@ -2192,6 +2206,7 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB
|
|||
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
|
||||
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
|
||||
uint64_t groupId = pSrcGp[i];
|
||||
if (groupId == 0) {
|
||||
|
@ -2220,6 +2235,8 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB
|
|||
code = colDataSetVal(pDestCalEndTsCol, i, (const char*)&range.win.ekey, false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
colDataSetNULL(pDestTableNameInxCol, i);
|
||||
|
||||
pDestBlock->info.rows++;
|
||||
}
|
||||
|
||||
|
@ -2274,6 +2291,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
|
|||
SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
|
||||
for (int32_t i = 0; i < pSrcBlock->info.rows;) {
|
||||
uint64_t srcUid = srcUidData[i];
|
||||
uint64_t groupId = srcGp[i];
|
||||
|
@ -2306,6 +2324,8 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
|
|||
code = colDataSetVal(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
colDataSetNULL(pDestTableNameInxCol, pDestBlock->info.rows);
|
||||
|
||||
pDestBlock->info.rows++;
|
||||
}
|
||||
|
||||
|
@ -3061,6 +3081,7 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S
|
|||
colDataSetNULL(taosArrayGet(pDst->pDataBlock, GROUPID_COLUMN_INDEX), j);
|
||||
colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), j);
|
||||
colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), j);
|
||||
colDataSetNULL(taosArrayGet(pDst->pDataBlock, TABLE_NAME_COLUMN_INDEX), j);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
@ -3657,6 +3678,9 @@ FETCH_NEXT_BLOCK:
|
|||
// printDataBlock(pInfo->pCheckpointRes, "stream scan ck", GET_TASKID(pTaskInfo));
|
||||
(*ppRes) = pInfo->pCheckpointRes;
|
||||
return code;
|
||||
} else {
|
||||
qError("stream scan error, invalid block type %d, %s", pInfo->blockType, id);
|
||||
code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
_end:
|
||||
|
|
|
@ -334,10 +334,14 @@ static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock,
|
|||
|
||||
int32_t loadNextDataBlock(void* param, SSDataBlock** ppBlock) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*)param;
|
||||
int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock);
|
||||
blockDataCheck(*ppBlock, false);
|
||||
int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock);
|
||||
if (code) {
|
||||
qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code));
|
||||
} else {
|
||||
code = blockDataCheck(*ppBlock);
|
||||
if (code) {
|
||||
qError("failed to check block data, %s code:%s", __func__, tstrerror(code));
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -630,7 +634,8 @@ int32_t fetchNextGroupSortDataBlock(void* param, SSDataBlock** ppBlock) {
|
|||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
if (block != NULL) {
|
||||
blockDataCheck(block, false);
|
||||
code = blockDataCheck(block);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (block->info.id.groupId == grpSortOpInfo->currGroupId) {
|
||||
grpSortOpInfo->childOpStatus = CHILD_OP_SAME_GROUP;
|
||||
*ppBlock = block;
|
||||
|
|
|
@ -474,8 +474,8 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
|||
SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param;
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
if (pInfo->pOperator) {
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf,
|
||||
&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable);
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup,
|
||||
false);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
|
|
|
@ -38,6 +38,7 @@ typedef struct SSessionAggOperatorInfo {
|
|||
int32_t tsSlotId; // primary timestamp slot id
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SOperatorInfo* pOperator;
|
||||
bool cleanGroupResInfo;
|
||||
} SSessionAggOperatorInfo;
|
||||
|
||||
typedef struct SStateWindowOperatorInfo {
|
||||
|
@ -52,6 +53,7 @@ typedef struct SStateWindowOperatorInfo {
|
|||
int32_t tsSlotId; // primary timestamp column slot id
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SOperatorInfo* pOperator;
|
||||
bool cleanGroupResInfo;
|
||||
} SStateWindowOperatorInfo;
|
||||
|
||||
typedef enum SResultTsInterpType {
|
||||
|
@ -943,6 +945,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
|||
int32_t scanFlag = MAIN_SCAN;
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0);
|
||||
if (pBlock == NULL) {
|
||||
|
@ -965,6 +968,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pInfo->cleanGroupResInfo = true;
|
||||
|
||||
OPTR_SET_OPENED(pOperator);
|
||||
|
||||
|
@ -1092,6 +1096,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) {
|
|||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0);
|
||||
if (pBlock == NULL) {
|
||||
|
@ -1120,7 +1125,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) {
|
|||
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
||||
code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
pInfo->cleanGroupResInfo = true;
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
|
||||
_end:
|
||||
|
@ -1230,8 +1235,8 @@ static void destroyStateWindowOperatorInfo(void* param) {
|
|||
cleanupBasicInfo(&pInfo->binfo);
|
||||
taosMemoryFreeClear(pInfo->stateKey.pData);
|
||||
if (pInfo->pOperator) {
|
||||
cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf,
|
||||
&pInfo->groupResInfo);
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup,
|
||||
pInfo->cleanGroupResInfo);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
|
||||
|
@ -1257,8 +1262,8 @@ void destroyIntervalOperatorInfo(void* param) {
|
|||
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
if (pInfo->pOperator) {
|
||||
cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf,
|
||||
&pInfo->groupResInfo);
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup,
|
||||
pInfo->cleanGroupResInfo);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
|
||||
|
@ -1452,6 +1457,7 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode
|
|||
}
|
||||
|
||||
pInfo->pOperator = pOperator;
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
initResultRowInfo(&pInfo->binfo.resultRowInfo);
|
||||
setOperatorInfo(pOperator, "TimeIntervalAggOperator", QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
|
@ -1573,6 +1579,7 @@ static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** pp
|
|||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
while (1) {
|
||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||
|
@ -1628,6 +1635,7 @@ static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** pp
|
|||
|
||||
code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pInfo->cleanGroupResInfo = true;
|
||||
|
||||
code = blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
@ -1731,6 +1739,7 @@ int32_t createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhy
|
|||
|
||||
pInfo->tsSlotId = tsSlotId;
|
||||
pInfo->pOperator = pOperator;
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
setOperatorInfo(pOperator, "StateWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, true, OP_NOT_OPENED, pInfo,
|
||||
pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(openStateWindowAggOptr, doStateWindowAggNext, NULL, destroyStateWindowOperatorInfo,
|
||||
|
@ -1763,8 +1772,8 @@ void destroySWindowOperatorInfo(void* param) {
|
|||
cleanupBasicInfo(&pInfo->binfo);
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
if (pInfo->pOperator) {
|
||||
cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf,
|
||||
&pInfo->groupResInfo);
|
||||
cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup,
|
||||
pInfo->cleanGroupResInfo);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
|
||||
|
@ -1835,6 +1844,7 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh
|
|||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
pInfo->pOperator = pOperator;
|
||||
pInfo->cleanGroupResInfo = false;
|
||||
setOperatorInfo(pOperator, "SessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSessionWindowAggNext, NULL, destroySWindowOperatorInfo,
|
||||
|
|
|
@ -144,6 +144,11 @@ target_link_libraries(
|
|||
|
||||
# SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin)
|
||||
add_executable(udfd src/udfd.c)
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(udfd PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
udfd
|
||||
PUBLIC
|
||||
|
|
|
@ -1385,9 +1385,12 @@ static int32_t translateRepeat(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
|
||||
uint8_t type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type;
|
||||
int32_t orgLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes;
|
||||
int32_t count = TMAX((int32_t)((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i, 1);
|
||||
|
||||
int32_t resLen = orgLen * count;
|
||||
int32_t resLen;
|
||||
if (nodeType(nodesListGetNode(pFunc->pParameterList, 1)) == QUERY_NODE_VALUE) {
|
||||
resLen = orgLen * TMAX((int32_t)((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i, 1);
|
||||
} else {
|
||||
resLen = TSDB_MAX_BINARY_LEN;
|
||||
}
|
||||
pFunc->node.resType = (SDataType){.bytes = resLen, .type = type};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1535,14 +1538,16 @@ static int32_t translateToJson(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
|
||||
static int32_t translateOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len));
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_GEOMETRY].bytes, .type = TSDB_DATA_TYPE_GEOMETRY};
|
||||
SDataType dt = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0));
|
||||
pFunc->node.resType = (SDataType){.bytes = dt.bytes, .type = TSDB_DATA_TYPE_GEOMETRY};
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateInGeomOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len));
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_VARCHAR].bytes, .type = TSDB_DATA_TYPE_VARCHAR};
|
||||
SDataType dt = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0));
|
||||
pFunc->node.resType = (SDataType){.bytes = dt.bytes, .type = TSDB_DATA_TYPE_VARCHAR};
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1613,7 +1618,7 @@ static int32_t translateOutVarchar(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
|||
break;
|
||||
case FUNCTION_TYPE_BLOCK_DIST:
|
||||
case FUNCTION_TYPE_BLOCK_DIST_INFO:
|
||||
bytes = 128;
|
||||
bytes = sizeof(STableBlockDistInfo);
|
||||
break;
|
||||
case FUNCTION_TYPE_TO_CHAR:
|
||||
bytes = 4096;
|
||||
|
@ -1655,7 +1660,7 @@ static int32_t translateOutVarchar(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
|||
bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE;
|
||||
break;
|
||||
case FUNCTION_TYPE_TIMEZONE:
|
||||
bytes = TD_TIMEZONE_LEN;
|
||||
bytes = timeZoneStrLen();
|
||||
break;
|
||||
case FUNCTION_TYPE_IRATE_PARTIAL:
|
||||
bytes = getIrateInfoSize((pFunc->hasPk) ? pFunc->pkBytes : 0) + VARSTR_HEADER_SIZE;
|
||||
|
|
|
@ -412,6 +412,27 @@ int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNo
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** ppFunc) {
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)ppFunc);
|
||||
if (NULL == *ppFunc) {
|
||||
return code;
|
||||
}
|
||||
|
||||
(*ppFunc)->hasPk = pSrcFunc->hasPk;
|
||||
(*ppFunc)->pkBytes = pSrcFunc->pkBytes;
|
||||
|
||||
(void)snprintf((*ppFunc)->functionName, sizeof((*ppFunc)->functionName), "%s", pName);
|
||||
(*ppFunc)->pParameterList = pParameterList;
|
||||
code = getFuncInfo((*ppFunc));
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
(*ppFunc)->pParameterList = NULL;
|
||||
nodesDestroyNode((SNode*)*ppFunc);
|
||||
*ppFunc = NULL;
|
||||
return code;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t createColumnByFunc(const SFunctionNode* pFunc, SColumnNode** ppCol) {
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)ppCol);
|
||||
if (NULL == *ppCol) {
|
||||
|
@ -438,7 +459,8 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod
|
|||
if (NULL == pParameterList) {
|
||||
return code;
|
||||
}
|
||||
code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pParameterList,pPartialFunc );
|
||||
code =
|
||||
createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pSrcFunc, pParameterList, pPartialFunc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesDestroyList(pParameterList);
|
||||
return code;
|
||||
|
@ -452,8 +474,6 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
tstrncpy((*pPartialFunc)->node.aliasName, name, TSDB_COL_NAME_LEN);
|
||||
(*pPartialFunc)->hasPk = pSrcFunc->hasPk;
|
||||
(*pPartialFunc)->pkBytes = pSrcFunc->pkBytes;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -479,9 +499,9 @@ static int32_t createMidFunction(const SFunctionNode* pSrcFunc, const SFunctionN
|
|||
int32_t code = createMergeFuncPara(pSrcFunc, pPartialFunc, &pParameterList);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc != NULL){
|
||||
code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc, pParameterList, &pFunc);
|
||||
code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc, pSrcFunc, pParameterList, &pFunc);
|
||||
}else{
|
||||
code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList, &pFunc);
|
||||
code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pSrcFunc, pParameterList, &pFunc);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -493,8 +513,6 @@ static int32_t createMidFunction(const SFunctionNode* pSrcFunc, const SFunctionN
|
|||
} else {
|
||||
nodesDestroyList(pParameterList);
|
||||
}
|
||||
(*pMidFunc)->hasPk = pPartialFunc->hasPk;
|
||||
(*pMidFunc)->pkBytes = pPartialFunc->pkBytes;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -505,7 +523,7 @@ static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctio
|
|||
|
||||
int32_t code = createMergeFuncPara(pSrcFunc, pPartialFunc, &pParameterList);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList, &pFunc);
|
||||
code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pSrcFunc, pParameterList, &pFunc);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pFunc->hasOriginalFunc = true;
|
||||
|
@ -522,8 +540,6 @@ static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctio
|
|||
} else {
|
||||
nodesDestroyList(pParameterList);
|
||||
}
|
||||
(*pMergeFunc)->hasPk = pPartialFunc->hasPk;
|
||||
(*pMergeFunc)->pkBytes = pPartialFunc->pkBytes;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index
|
|||
|
||||
*index = -1;
|
||||
|
||||
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) {
|
||||
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v) || isinf(v)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -232,6 +232,8 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index
|
|||
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
|
||||
if (fabs(span) < DBL_EPSILON) {
|
||||
*index = 0;
|
||||
} else if (isinf(span)) {
|
||||
*index = -1;
|
||||
} else {
|
||||
double slotSpan = span / pBucket->numOfSlots;
|
||||
*index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef struct SUdfdData {
|
|||
#ifdef WINDOWS
|
||||
HANDLE jobHandle;
|
||||
#endif
|
||||
int spawnErr;
|
||||
int32_t spawnErr;
|
||||
uv_pipe_t ctrlPipe;
|
||||
uv_async_t stopAsync;
|
||||
int32_t stopCalled;
|
||||
|
@ -51,15 +51,17 @@ typedef struct SUdfdData {
|
|||
SUdfdData udfdGlobal = {0};
|
||||
|
||||
int32_t udfStartUdfd(int32_t startDnodeId);
|
||||
void udfStopUdfd();
|
||||
void udfStopUdfd();
|
||||
|
||||
extern char **environ;
|
||||
|
||||
static int32_t udfSpawnUdfd(SUdfdData *pData);
|
||||
void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal);
|
||||
void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal);
|
||||
static void udfUdfdCloseWalkCb(uv_handle_t *handle, void *arg);
|
||||
static void udfUdfdStopAsyncCb(uv_async_t *async);
|
||||
static void udfWatchUdfd(void *args);
|
||||
|
||||
void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) {
|
||||
void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal) {
|
||||
fnInfo("udfd process exited with status %" PRId64 ", signal %d", exitStatus, termSignal);
|
||||
SUdfdData *pData = process->data;
|
||||
if (exitStatus == 0 && termSignal == 0 || atomic_load_32(&pData->stopCalled)) {
|
||||
|
@ -67,7 +69,7 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) {
|
|||
} else {
|
||||
fnInfo("udfd process restart");
|
||||
int32_t code = udfSpawnUdfd(pData);
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
fnError("udfd process restart failed with code:%d", code);
|
||||
}
|
||||
}
|
||||
|
@ -75,6 +77,8 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) {
|
|||
|
||||
static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
||||
fnInfo("start to init udfd");
|
||||
|
||||
int32_t err = 0;
|
||||
uv_process_options_t options = {0};
|
||||
|
||||
char path[PATH_MAX] = {0};
|
||||
|
@ -126,17 +130,17 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
char thrdPoolSizeEnvItem[32] = {0};
|
||||
snprintf(dnodeIdEnvItem, 32, "%s=%d", "DNODE_ID", pData->dnodeId);
|
||||
|
||||
float numCpuCores = 4;
|
||||
float numCpuCores = 4;
|
||||
int32_t code = taosGetCpuCores(&numCpuCores, false);
|
||||
if(code != 0) {
|
||||
fnError("failed to get cpu cores, code:%d", code);
|
||||
if (code != 0) {
|
||||
fnError("failed to get cpu cores, code:0x%x", code);
|
||||
}
|
||||
numCpuCores = TMAX(numCpuCores, 2);
|
||||
snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2);
|
||||
snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int32_t)numCpuCores * 2);
|
||||
|
||||
char pathTaosdLdLib[512] = {0};
|
||||
size_t taosdLdLibPathLen = sizeof(pathTaosdLdLib);
|
||||
int ret = uv_os_getenv("LD_LIBRARY_PATH", pathTaosdLdLib, &taosdLdLibPathLen);
|
||||
char pathTaosdLdLib[512] = {0};
|
||||
size_t taosdLdLibPathLen = sizeof(pathTaosdLdLib);
|
||||
int32_t ret = uv_os_getenv("LD_LIBRARY_PATH", pathTaosdLdLib, &taosdLdLibPathLen);
|
||||
if (ret != UV_ENOBUFS) {
|
||||
taosdLdLibPathLen = strlen(pathTaosdLdLib);
|
||||
}
|
||||
|
@ -158,8 +162,8 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
char *taosFqdnEnvItem = NULL;
|
||||
char *taosFqdn = getenv("TAOS_FQDN");
|
||||
if (taosFqdn != NULL) {
|
||||
int subLen = strlen(taosFqdn);
|
||||
int len = strlen("TAOS_FQDN=") + subLen + 1;
|
||||
int32_t subLen = strlen(taosFqdn);
|
||||
int32_t len = strlen("TAOS_FQDN=") + subLen + 1;
|
||||
taosFqdnEnvItem = taosMemoryMalloc(len);
|
||||
if (taosFqdnEnvItem != NULL) {
|
||||
tstrncpy(taosFqdnEnvItem, "TAOS_FQDN=", len);
|
||||
|
@ -171,11 +175,53 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
}
|
||||
}
|
||||
|
||||
char *envUdfd[] = {dnodeIdEnvItem, thrdPoolSizeEnvItem, ldLibPathEnvItem,taosFqdnEnvItem, NULL};
|
||||
char *envUdfd[] = {dnodeIdEnvItem, thrdPoolSizeEnvItem, ldLibPathEnvItem, taosFqdnEnvItem, NULL};
|
||||
|
||||
options.env = envUdfd;
|
||||
char **envUdfdWithPEnv = NULL;
|
||||
if (environ != NULL) {
|
||||
int32_t lenEnvUdfd = ARRAY_SIZE(envUdfd);
|
||||
int32_t numEnviron = 0;
|
||||
while (environ[numEnviron] != NULL) {
|
||||
numEnviron++;
|
||||
}
|
||||
|
||||
int err = uv_spawn(&pData->loop, &pData->process, &options);
|
||||
envUdfdWithPEnv = (char **)taosMemoryCalloc(numEnviron + lenEnvUdfd, sizeof(char *));
|
||||
if (envUdfdWithPEnv == NULL) {
|
||||
err = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numEnviron; i++) {
|
||||
int32_t len = strlen(environ[i]) + 1;
|
||||
envUdfdWithPEnv[i] = (char *)taosMemoryCalloc(len, 1);
|
||||
if (envUdfdWithPEnv[i] == NULL) {
|
||||
err = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
tstrncpy(envUdfdWithPEnv[i], environ[i], len);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < lenEnvUdfd; i++) {
|
||||
if (envUdfd[i] != NULL) {
|
||||
int32_t len = strlen(envUdfd[i]) + 1;
|
||||
envUdfdWithPEnv[numEnviron + i] = (char *)taosMemoryCalloc(len, 1);
|
||||
if (envUdfdWithPEnv[numEnviron + i] == NULL) {
|
||||
err = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
tstrncpy(envUdfdWithPEnv[numEnviron + i], envUdfd[i], len);
|
||||
}
|
||||
}
|
||||
envUdfdWithPEnv[numEnviron + lenEnvUdfd - 1] = NULL;
|
||||
|
||||
options.env = envUdfdWithPEnv;
|
||||
} else {
|
||||
options.env = envUdfd;
|
||||
}
|
||||
|
||||
err = uv_spawn(&pData->loop, &pData->process, &options);
|
||||
pData->process.data = (void *)pData;
|
||||
|
||||
#ifdef WINDOWS
|
||||
|
@ -202,7 +248,21 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
} else {
|
||||
fnInfo("udfd is initialized");
|
||||
}
|
||||
if(taosFqdnEnvItem) taosMemoryFree(taosFqdnEnvItem);
|
||||
|
||||
_OVER:
|
||||
if (taosFqdnEnvItem) {
|
||||
taosMemoryFree(taosFqdnEnvItem);
|
||||
}
|
||||
|
||||
if (envUdfdWithPEnv != NULL) {
|
||||
int32_t i = 0;
|
||||
while (envUdfdWithPEnv[i] != NULL) {
|
||||
taosMemoryFree(envUdfdWithPEnv[i]);
|
||||
i++;
|
||||
}
|
||||
taosMemoryFree(envUdfdWithPEnv);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -225,13 +285,13 @@ static void udfWatchUdfd(void *args) {
|
|||
TAOS_UV_CHECK_ERRNO(udfSpawnUdfd(pData));
|
||||
atomic_store_32(&pData->spawnErr, 0);
|
||||
(void)uv_barrier_wait(&pData->barrier);
|
||||
int num = uv_run(&pData->loop, UV_RUN_DEFAULT);
|
||||
int32_t num = uv_run(&pData->loop, UV_RUN_DEFAULT);
|
||||
fnInfo("udfd loop exit with %d active handles, line:%d", num, __LINE__);
|
||||
|
||||
uv_walk(&pData->loop, udfUdfdCloseWalkCb, NULL);
|
||||
num = uv_run(&pData->loop, UV_RUN_DEFAULT);
|
||||
fnInfo("udfd loop exit with %d active handles, line:%d", num, __LINE__);
|
||||
if(uv_loop_close(&pData->loop) != 0) {
|
||||
if (uv_loop_close(&pData->loop) != 0) {
|
||||
fnError("udfd loop close failed, lino:%d", __LINE__);
|
||||
}
|
||||
return;
|
||||
|
@ -240,7 +300,7 @@ _exit:
|
|||
if (terrno != 0) {
|
||||
(void)uv_barrier_wait(&pData->barrier);
|
||||
atomic_store_32(&pData->spawnErr, terrno);
|
||||
if(uv_loop_close(&pData->loop) != 0) {
|
||||
if (uv_loop_close(&pData->loop) != 0) {
|
||||
fnError("udfd loop close failed, lino:%d", __LINE__);
|
||||
}
|
||||
fnError("udfd thread exit with code:%d lino:%d", terrno, terrln);
|
||||
|
@ -271,10 +331,10 @@ int32_t udfStartUdfd(int32_t startDnodeId) {
|
|||
int32_t err = atomic_load_32(&pData->spawnErr);
|
||||
if (err != 0) {
|
||||
uv_barrier_destroy(&pData->barrier);
|
||||
if(uv_async_send(&pData->stopAsync) != 0) {
|
||||
if (uv_async_send(&pData->stopAsync) != 0) {
|
||||
fnError("start udfd: failed to send stop async");
|
||||
}
|
||||
if(uv_thread_join(&pData->thread)!= 0) {
|
||||
if (uv_thread_join(&pData->thread) != 0) {
|
||||
fnError("start udfd: failed to join udfd thread");
|
||||
}
|
||||
pData->needCleanUp = false;
|
||||
|
@ -299,10 +359,10 @@ void udfStopUdfd() {
|
|||
atomic_store_32(&pData->stopCalled, 1);
|
||||
pData->needCleanUp = false;
|
||||
uv_barrier_destroy(&pData->barrier);
|
||||
if(uv_async_send(&pData->stopAsync) != 0) {
|
||||
if (uv_async_send(&pData->stopAsync) != 0) {
|
||||
fnError("stop udfd: failed to send stop async");
|
||||
}
|
||||
if(uv_thread_join(&pData->thread) != 0) {
|
||||
if (uv_thread_join(&pData->thread) != 0) {
|
||||
fnError("stop udfd: failed to join udfd thread");
|
||||
}
|
||||
|
||||
|
@ -341,7 +401,7 @@ typedef void *QUEUE[2];
|
|||
#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
|
||||
|
||||
/* Public macros. */
|
||||
#define QUEUE_DATA(ptr, type, field) ((type *)((char *)(ptr)-offsetof(type, field)))
|
||||
#define QUEUE_DATA(ptr, type, field) ((type *)((char *)(ptr) - offsetof(type, field)))
|
||||
|
||||
/* Important note: mutating the list while QUEUE_FOREACH is
|
||||
* iterating over its elements results in undefined behavior.
|
||||
|
@ -434,8 +494,8 @@ typedef struct SUdfcProxy {
|
|||
QUEUE uvProcTaskQueue;
|
||||
|
||||
uv_mutex_t udfStubsMutex;
|
||||
SArray *udfStubs; // SUdfcFuncStub
|
||||
SArray *expiredUdfStubs; //SUdfcFuncStub
|
||||
SArray *udfStubs; // SUdfcFuncStub
|
||||
SArray *expiredUdfStubs; // SUdfcFuncStub
|
||||
|
||||
uv_mutex_t udfcUvMutex;
|
||||
int8_t initialized;
|
||||
|
@ -458,7 +518,7 @@ typedef struct SUdfcUvSession {
|
|||
typedef struct SClientUvTaskNode {
|
||||
SUdfcProxy *udfc;
|
||||
int8_t type;
|
||||
int errCode;
|
||||
int32_t errCode;
|
||||
|
||||
uv_pipe_t *pipe;
|
||||
|
||||
|
@ -516,7 +576,7 @@ enum {
|
|||
UDFC_STATE_STOPPING, // stopping after udfcClose
|
||||
};
|
||||
|
||||
void getUdfdPipeName(char *pipeName, int32_t size);
|
||||
void getUdfdPipeName(char *pipeName, int32_t size);
|
||||
int32_t encodeUdfSetupRequest(void **buf, const SUdfSetupRequest *setup);
|
||||
void *decodeUdfSetupRequest(const void *buf, SUdfSetupRequest *request);
|
||||
int32_t encodeUdfInterBuf(void **buf, const SUdfInterBuf *state);
|
||||
|
@ -801,12 +861,12 @@ void *decodeUdfResponse(const void *buf, SUdfResponse *rsp) {
|
|||
buf = decodeUdfTeardownResponse(buf, &rsp->teardownRsp);
|
||||
break;
|
||||
default:
|
||||
rsp->code = TSDB_CODE_UDF_INTERNAL_ERROR;
|
||||
rsp->code = TSDB_CODE_UDF_INTERNAL_ERROR;
|
||||
fnError("decode udf response, invalid udf response type %d", rsp->type);
|
||||
break;
|
||||
}
|
||||
if(buf == NULL) {
|
||||
rsp->code = terrno;
|
||||
if (buf == NULL) {
|
||||
rsp->code = terrno;
|
||||
fnError("decode udf response failed, code:0x%x", rsp->code);
|
||||
}
|
||||
return (void *)buf;
|
||||
|
@ -847,12 +907,12 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
|
|||
udfBlock->numOfRows = block->info.rows;
|
||||
udfBlock->numOfCols = taosArrayGetSize(block->pDataBlock);
|
||||
udfBlock->udfCols = taosMemoryCalloc(taosArrayGetSize(block->pDataBlock), sizeof(SUdfColumn *));
|
||||
if((udfBlock->udfCols) == NULL) {
|
||||
if ((udfBlock->udfCols) == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
for (int32_t i = 0; i < udfBlock->numOfCols; ++i) {
|
||||
udfBlock->udfCols[i] = taosMemoryCalloc(1, sizeof(SUdfColumn));
|
||||
if(udfBlock->udfCols[i] == NULL) {
|
||||
if (udfBlock->udfCols[i] == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
SColumnInfoData *col = (SColumnInfoData *)taosArrayGet(block->pDataBlock, i);
|
||||
|
@ -866,18 +926,18 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
|
|||
if (IS_VAR_DATA_TYPE(udfCol->colMeta.type)) {
|
||||
udfCol->colData.varLenCol.varOffsetsLen = sizeof(int32_t) * udfBlock->numOfRows;
|
||||
udfCol->colData.varLenCol.varOffsets = taosMemoryMalloc(udfCol->colData.varLenCol.varOffsetsLen);
|
||||
if(udfCol->colData.varLenCol.varOffsets == NULL) {
|
||||
if (udfCol->colData.varLenCol.varOffsets == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
memcpy(udfCol->colData.varLenCol.varOffsets, col->varmeta.offset, udfCol->colData.varLenCol.varOffsetsLen);
|
||||
udfCol->colData.varLenCol.payloadLen = colDataGetLength(col, udfBlock->numOfRows);
|
||||
udfCol->colData.varLenCol.payload = taosMemoryMalloc(udfCol->colData.varLenCol.payloadLen);
|
||||
if(udfCol->colData.varLenCol.payload == NULL) {
|
||||
if (udfCol->colData.varLenCol.payload == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
if (col->reassigned) {
|
||||
for (int32_t row = 0; row < udfCol->colData.numOfRows; ++row) {
|
||||
char* pColData = col->pData + col->varmeta.offset[row];
|
||||
char *pColData = col->pData + col->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (col->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
|
@ -894,7 +954,7 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
|
|||
udfCol->colData.fixLenCol.nullBitmapLen = BitmapLen(udfCol->colData.numOfRows);
|
||||
int32_t bitmapLen = udfCol->colData.fixLenCol.nullBitmapLen;
|
||||
udfCol->colData.fixLenCol.nullBitmap = taosMemoryMalloc(udfCol->colData.fixLenCol.nullBitmapLen);
|
||||
if(udfCol->colData.fixLenCol.nullBitmap == NULL) {
|
||||
if (udfCol->colData.fixLenCol.nullBitmap == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
char *bitmap = udfCol->colData.fixLenCol.nullBitmap;
|
||||
|
@ -927,11 +987,11 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
|
|||
code = bdGetColumnInfoData(block, 0, &col);
|
||||
TAOS_CHECK_GOTO(code, &lino, _exit);
|
||||
|
||||
for (int i = 0; i < udfCol->colData.numOfRows; ++i) {
|
||||
for (int32_t i = 0; i < udfCol->colData.numOfRows; ++i) {
|
||||
if (udfColDataIsNull(udfCol, i)) {
|
||||
colDataSetNULL(col, i);
|
||||
} else {
|
||||
char* data = udfColDataGetData(udfCol, i);
|
||||
char *data = udfColDataGetData(udfCol, i);
|
||||
code = colDataSetVal(col, i, data, false);
|
||||
TAOS_CHECK_GOTO(code, &lino, _exit);
|
||||
}
|
||||
|
@ -953,32 +1013,32 @@ int32_t convertScalarParamToDataBlock(SScalarParam *input, int32_t numOfCols, SS
|
|||
}
|
||||
|
||||
// create the basic block info structure
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pInfo = input[i].columnData;
|
||||
SColumnInfoData d = {0};
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData *pInfo = input[i].columnData;
|
||||
SColumnInfoData d = {0};
|
||||
d.info = pInfo->info;
|
||||
|
||||
TAOS_CHECK_GOTO(blockDataAppendColInfo(output, &d), &lino, _exit);
|
||||
}
|
||||
|
||||
TAOS_CHECK_GOTO(blockDataEnsureCapacity(output, numOfRows), &lino, _exit);
|
||||
TAOS_CHECK_GOTO(blockDataEnsureCapacity(output, numOfRows), &lino, _exit);
|
||||
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pDest = taosArrayGet(output->pDataBlock, i);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData *pDest = taosArrayGet(output->pDataBlock, i);
|
||||
|
||||
SColumnInfoData* pColInfoData = input[i].columnData;
|
||||
SColumnInfoData *pColInfoData = input[i].columnData;
|
||||
TAOS_CHECK_GOTO(colDataAssign(pDest, pColInfoData, input[i].numOfRows, &output->info), &lino, _exit);
|
||||
|
||||
if (input[i].numOfRows < numOfRows) {
|
||||
int32_t startRow = input[i].numOfRows;
|
||||
int expandRows = numOfRows - startRow;
|
||||
bool isNull = colDataIsNull_s(pColInfoData, (input+i)->numOfRows - 1);
|
||||
int32_t expandRows = numOfRows - startRow;
|
||||
bool isNull = colDataIsNull_s(pColInfoData, (input + i)->numOfRows - 1);
|
||||
if (isNull) {
|
||||
colDataSetNNULL(pDest, startRow, expandRows);
|
||||
} else {
|
||||
char* src = colDataGetData(pColInfoData, (input + i)->numOfRows - 1);
|
||||
for (int j = 0; j < expandRows; ++j) {
|
||||
TAOS_CHECK_GOTO(colDataSetVal(pDest, startRow+j, src, false), &lino, _exit);
|
||||
char *src = colDataGetData(pColInfoData, (input + i)->numOfRows - 1);
|
||||
for (int32_t j = 0; j < expandRows; ++j) {
|
||||
TAOS_CHECK_GOTO(colDataSetVal(pDest, startRow + j, src, false), &lino, _exit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1000,7 +1060,7 @@ int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) {
|
|||
output->numOfRows = input->info.rows;
|
||||
|
||||
output->columnData = taosMemoryMalloc(sizeof(SColumnInfoData));
|
||||
if(output->columnData == NULL) {
|
||||
if (output->columnData == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
memcpy(output->columnData, taosArrayGet(input->pDataBlock, 0), sizeof(SColumnInfoData));
|
||||
|
@ -1012,11 +1072,11 @@ int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) {
|
|||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// memory layout |---SUdfAggRes----|-----final result-----|---inter result----|
|
||||
typedef struct SUdfAggRes {
|
||||
int8_t finalResNum;
|
||||
int8_t interResNum;
|
||||
int8_t finalResNum;
|
||||
int8_t interResNum;
|
||||
int32_t interResBufLen;
|
||||
char *finalResBuf;
|
||||
char *interResBuf;
|
||||
char *finalResBuf;
|
||||
char *interResBuf;
|
||||
} SUdfAggRes;
|
||||
|
||||
void onUdfcPipeClose(uv_handle_t *handle);
|
||||
|
@ -1026,8 +1086,8 @@ bool isUdfcUvMsgComplete(SClientConnBuf *connBuf);
|
|||
void udfcUvHandleRsp(SClientUvConn *conn);
|
||||
void udfcUvHandleError(SClientUvConn *conn);
|
||||
void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf);
|
||||
void onUdfcPipeWrite(uv_write_t *write, int status);
|
||||
void onUdfcPipeConnect(uv_connect_t *connect, int status);
|
||||
void onUdfcPipeWrite(uv_write_t *write, int32_t status);
|
||||
void onUdfcPipeConnect(uv_connect_t *connect, int32_t status);
|
||||
int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskNode *uvTask);
|
||||
int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask);
|
||||
int32_t udfcStartUvTask(SClientUvTaskNode *uvTask);
|
||||
|
@ -1037,7 +1097,7 @@ void udfStopAsyncCb(uv_async_t *async);
|
|||
void constructUdfService(void *argsThread);
|
||||
int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType);
|
||||
int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle);
|
||||
int compareUdfcFuncSub(const void *elem1, const void *elem2);
|
||||
int32_t compareUdfcFuncSub(const void *elem1, const void *elem2);
|
||||
int32_t doTeardownUdf(UdfcFuncHandle handle);
|
||||
|
||||
int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2,
|
||||
|
@ -1062,9 +1122,9 @@ int32_t udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pRes
|
|||
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx);
|
||||
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock);
|
||||
|
||||
void cleanupNotExpiredUdfs();
|
||||
void cleanupExpiredUdfs();
|
||||
int compareUdfcFuncSub(const void *elem1, const void *elem2) {
|
||||
void cleanupNotExpiredUdfs();
|
||||
void cleanupExpiredUdfs();
|
||||
int32_t compareUdfcFuncSub(const void *elem1, const void *elem2) {
|
||||
SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1;
|
||||
SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2;
|
||||
return strcmp(stub1->udfName, stub2->udfName);
|
||||
|
@ -1150,21 +1210,22 @@ void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle) {
|
|||
void cleanupExpiredUdfs() {
|
||||
int32_t i = 0;
|
||||
SArray *expiredUdfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
|
||||
if(expiredUdfStubs == NULL) {
|
||||
if (expiredUdfStubs == NULL) {
|
||||
fnError("cleanupExpiredUdfs: failed to init array");
|
||||
return;
|
||||
}
|
||||
while (i < taosArrayGetSize(gUdfcProxy.expiredUdfStubs)) {
|
||||
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.expiredUdfStubs, i);
|
||||
if (stub->refCount == 0) {
|
||||
fnInfo("tear down udf. expired. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
|
||||
fnInfo("tear down udf. expired. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle,
|
||||
stub->refCount);
|
||||
(void)doTeardownUdf(stub->handle);
|
||||
} else {
|
||||
fnInfo("udf still in use. expired. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", stub->udfName,
|
||||
stub->refCount, stub->createTime, stub->handle);
|
||||
fnInfo("udf still in use. expired. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p",
|
||||
stub->udfName, stub->refCount, stub->createTime, stub->handle);
|
||||
UdfcFuncHandle handle = stub->handle;
|
||||
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
|
||||
if(taosArrayPush(expiredUdfStubs, stub) == NULL) {
|
||||
if (taosArrayPush(expiredUdfStubs, stub) == NULL) {
|
||||
fnError("cleanupExpiredUdfs: failed to push udf stub to array");
|
||||
}
|
||||
} else {
|
||||
|
@ -1347,7 +1408,8 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
|
|||
return code;
|
||||
}
|
||||
|
||||
SUdfInterBuf state = {.buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum};
|
||||
SUdfInterBuf state = {
|
||||
.buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum};
|
||||
SUdfInterBuf newState = {0};
|
||||
|
||||
udfCode = doCallUdfAggProcess(session, inputBlock, &state, &newState);
|
||||
|
@ -1391,8 +1453,9 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock) {
|
|||
udfRes->interResBuf = (char *)udfRes + sizeof(SUdfAggRes) + session->bytes;
|
||||
|
||||
SUdfInterBuf resultBuf = {0};
|
||||
SUdfInterBuf state = {.buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum};
|
||||
int32_t udfCallCode = 0;
|
||||
SUdfInterBuf state = {
|
||||
.buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum};
|
||||
int32_t udfCallCode = 0;
|
||||
udfCallCode = doCallUdfAggFinalize(session, &state, &resultBuf);
|
||||
if (udfCallCode != 0) {
|
||||
fnError("udfAggFinalize error. doCallUdfAggFinalize step. udf code:%d", udfCallCode);
|
||||
|
@ -1448,7 +1511,7 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *
|
|||
SUdfResponse rsp = {0};
|
||||
void *buf = decodeUdfResponse(uvTask->rspBuf.base, &rsp);
|
||||
code = rsp.code;
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
fnError("udfc get udf task result failure. code: %d", code);
|
||||
}
|
||||
|
||||
|
@ -1474,18 +1537,18 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *
|
|||
taosMemoryFree(uvTask->rspBuf.base);
|
||||
} else {
|
||||
code = uvTask->errCode;
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
fnError("udfc get udf task result failure. code: %d, line:%d", code, __LINE__);
|
||||
}
|
||||
}
|
||||
} else if (uvTask->type == UV_TASK_CONNECT) {
|
||||
code = uvTask->errCode;
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
fnError("udfc get udf task result failure. code: %d, line:%d", code, __LINE__);
|
||||
}
|
||||
} else if (uvTask->type == UV_TASK_DISCONNECT) {
|
||||
code = uvTask->errCode;
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
fnError("udfc get udf task result failure. code: %d, line:%d", code, __LINE__);
|
||||
}
|
||||
}
|
||||
|
@ -1620,7 +1683,7 @@ void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
|
|||
}
|
||||
}
|
||||
|
||||
void onUdfcPipeWrite(uv_write_t *write, int status) {
|
||||
void onUdfcPipeWrite(uv_write_t *write, int32_t status) {
|
||||
SClientUvConn *conn = write->data;
|
||||
if (status < 0) {
|
||||
fnError("udfc client connection %p write failed. status: %d(%s)", conn, status, uv_strerror(status));
|
||||
|
@ -1631,7 +1694,7 @@ void onUdfcPipeWrite(uv_write_t *write, int status) {
|
|||
taosMemoryFree(write);
|
||||
}
|
||||
|
||||
void onUdfcPipeConnect(uv_connect_t *connect, int status) {
|
||||
void onUdfcPipeConnect(uv_connect_t *connect, int32_t status) {
|
||||
SClientUvTaskNode *uvTask = connect->data;
|
||||
if (status != 0) {
|
||||
fnError("client connect error, task seq: %" PRId64 ", code: %s", uvTask->seqNum, uv_strerror(status));
|
||||
|
@ -1639,7 +1702,7 @@ void onUdfcPipeConnect(uv_connect_t *connect, int status) {
|
|||
uvTask->errCode = status;
|
||||
|
||||
int32_t code = uv_read_start((uv_stream_t *)uvTask->pipe, udfcAllocateBuffer, onUdfcPipeRead);
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
fnError("udfc client connection %p read start failed. code: %d(%s)", uvTask->pipe, code, uv_strerror(code));
|
||||
uvTask->errCode = code;
|
||||
}
|
||||
|
@ -1678,13 +1741,12 @@ int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvT
|
|||
}
|
||||
request.msgLen = bufLen;
|
||||
void *bufBegin = taosMemoryMalloc(bufLen);
|
||||
if(bufBegin == NULL) {
|
||||
if (bufBegin == NULL) {
|
||||
fnError("udfc create uv task, malloc buffer failed. size: %d", bufLen);
|
||||
return terrno;
|
||||
}
|
||||
void *buf = bufBegin;
|
||||
if(encodeUdfRequest(&buf, &request) <= 0)
|
||||
{
|
||||
if (encodeUdfRequest(&buf, &request) <= 0) {
|
||||
fnError("udfc create uv task, encode request failed. size: %d", bufLen);
|
||||
taosMemoryFree(bufBegin);
|
||||
return TSDB_CODE_UDF_UV_EXEC_FAILURE;
|
||||
|
@ -1695,9 +1757,8 @@ int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvT
|
|||
} else if (uvTaskType == UV_TASK_DISCONNECT) {
|
||||
uvTask->pipe = task->session->udfUvPipe;
|
||||
}
|
||||
if (uv_sem_init(&uvTask->taskSem, 0) != 0)
|
||||
{
|
||||
if (uvTaskType == UV_TASK_REQ_RSP) {
|
||||
if (uv_sem_init(&uvTask->taskSem, 0) != 0) {
|
||||
if (uvTaskType == UV_TASK_REQ_RSP) {
|
||||
taosMemoryFree(uvTask->reqBuf.base);
|
||||
}
|
||||
fnError("udfc create uv task, init semaphore failed.");
|
||||
|
@ -1733,7 +1794,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
|
|||
switch (uvTask->type) {
|
||||
case UV_TASK_CONNECT: {
|
||||
uv_pipe_t *pipe = taosMemoryMalloc(sizeof(uv_pipe_t));
|
||||
if(pipe == NULL) {
|
||||
if (pipe == NULL) {
|
||||
fnError("udfc event loop start connect task malloc pipe failed.");
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1745,7 +1806,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
|
|||
uvTask->pipe = pipe;
|
||||
|
||||
SClientUvConn *conn = taosMemoryCalloc(1, sizeof(SClientUvConn));
|
||||
if(conn == NULL) {
|
||||
if (conn == NULL) {
|
||||
fnError("udfc event loop start connect task malloc conn failed.");
|
||||
taosMemoryFree(pipe);
|
||||
return terrno;
|
||||
|
@ -1760,7 +1821,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
|
|||
pipe->data = conn;
|
||||
|
||||
uv_connect_t *connReq = taosMemoryMalloc(sizeof(uv_connect_t));
|
||||
if(connReq == NULL) {
|
||||
if (connReq == NULL) {
|
||||
fnError("udfc event loop start connect task malloc connReq failed.");
|
||||
taosMemoryFree(pipe);
|
||||
taosMemoryFree(conn);
|
||||
|
@ -1777,14 +1838,14 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
|
|||
code = TSDB_CODE_UDF_PIPE_NOT_EXIST;
|
||||
} else {
|
||||
uv_write_t *write = taosMemoryMalloc(sizeof(uv_write_t));
|
||||
if(write == NULL) {
|
||||
if (write == NULL) {
|
||||
fnError("udfc event loop start req_rsp task malloc write failed.");
|
||||
return terrno;
|
||||
}
|
||||
write->data = pipe->data;
|
||||
QUEUE *connTaskQueue = &((SClientUvConn *)pipe->data)->taskQueue;
|
||||
QUEUE_INSERT_TAIL(connTaskQueue, &uvTask->connTaskQueue);
|
||||
int err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipeWrite);
|
||||
int32_t err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipeWrite);
|
||||
if (err != 0) {
|
||||
taosMemoryFree(write);
|
||||
fnError("udfc event loop start req_rsp task uv_write failed. uvtask: %p, code: %s", uvTask, uv_strerror(err));
|
||||
|
@ -1874,7 +1935,7 @@ void udfStopAsyncCb(uv_async_t *async) {
|
|||
}
|
||||
|
||||
void constructUdfService(void *argsThread) {
|
||||
int32_t code = 0, lino = 0;
|
||||
int32_t code = 0, lino = 0;
|
||||
SUdfcProxy *udfc = (SUdfcProxy *)argsThread;
|
||||
code = uv_loop_init(&udfc->uvLoop);
|
||||
TAOS_CHECK_GOTO(code, &lino, _exit);
|
||||
|
@ -1891,7 +1952,7 @@ void constructUdfService(void *argsThread) {
|
|||
QUEUE_INIT(&udfc->uvProcTaskQueue);
|
||||
(void)uv_barrier_wait(&udfc->initBarrier);
|
||||
// TODO return value of uv_run
|
||||
int num = uv_run(&udfc->uvLoop, UV_RUN_DEFAULT);
|
||||
int32_t num = uv_run(&udfc->uvLoop, UV_RUN_DEFAULT);
|
||||
fnInfo("udfc uv loop exit. active handle num: %d", num);
|
||||
(void)uv_loop_close(&udfc->uvLoop);
|
||||
|
||||
|
@ -1909,7 +1970,7 @@ _exit:
|
|||
|
||||
int32_t udfcOpen() {
|
||||
int32_t code = 0, lino = 0;
|
||||
int8_t old = atomic_val_compare_exchange_8(&gUdfcProxy.initialized, 0, 1);
|
||||
int8_t old = atomic_val_compare_exchange_8(&gUdfcProxy.initialized, 0, 1);
|
||||
if (old == 1) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -1927,12 +1988,12 @@ int32_t udfcOpen() {
|
|||
code = uv_mutex_init(&proxy->udfStubsMutex);
|
||||
TAOS_CHECK_GOTO(code, &lino, _exit);
|
||||
proxy->udfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
|
||||
if(proxy->udfStubs == NULL) {
|
||||
if (proxy->udfStubs == NULL) {
|
||||
fnError("udfc init failed. udfStubs: %p", proxy->udfStubs);
|
||||
return -1;
|
||||
}
|
||||
proxy->expiredUdfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
|
||||
if(proxy->expiredUdfStubs == NULL) {
|
||||
if (proxy->expiredUdfStubs == NULL) {
|
||||
taosArrayDestroy(proxy->udfStubs);
|
||||
fnError("udfc init failed. expiredUdfStubs: %p", proxy->expiredUdfStubs);
|
||||
return -1;
|
||||
|
@ -1956,10 +2017,10 @@ int32_t udfcClose() {
|
|||
|
||||
SUdfcProxy *udfc = &gUdfcProxy;
|
||||
udfc->udfcState = UDFC_STATE_STOPPING;
|
||||
if(uv_async_send(&udfc->loopStopAsync) != 0) {
|
||||
if (uv_async_send(&udfc->loopStopAsync) != 0) {
|
||||
fnError("udfc close error to send stop async");
|
||||
}
|
||||
if(uv_thread_join(&udfc->loopThread) != 0 ) {
|
||||
if (uv_thread_join(&udfc->loopThread) != 0) {
|
||||
fnError("udfc close errir to join loop thread");
|
||||
}
|
||||
uv_mutex_destroy(&udfc->taskQueueMutex);
|
||||
|
@ -1974,9 +2035,9 @@ int32_t udfcClose() {
|
|||
}
|
||||
|
||||
int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) {
|
||||
int32_t code = 0, lino = 0;
|
||||
int32_t code = 0, lino = 0;
|
||||
SClientUvTaskNode *uvTask = taosMemoryCalloc(1, sizeof(SClientUvTaskNode));
|
||||
if(uvTask == NULL) {
|
||||
if (uvTask == NULL) {
|
||||
fnError("udfc client task: %p failed to allocate memory for uvTask", task);
|
||||
return terrno;
|
||||
}
|
||||
|
@ -2006,14 +2067,14 @@ _exit:
|
|||
}
|
||||
|
||||
int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
|
||||
int32_t code = TSDB_CODE_SUCCESS, lino = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS, lino = 0;
|
||||
SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask));
|
||||
if(task == NULL) {
|
||||
if (task == NULL) {
|
||||
fnError("doSetupUdf, failed to allocate memory for task");
|
||||
return terrno;
|
||||
}
|
||||
task->session = taosMemoryCalloc(1, sizeof(SUdfcUvSession));
|
||||
if(task->session == NULL) {
|
||||
if (task->session == NULL) {
|
||||
fnError("doSetupUdf, failed to allocate memory for session");
|
||||
taosMemoryFree(task);
|
||||
return terrno;
|
||||
|
@ -2059,7 +2120,7 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf
|
|||
return TSDB_CODE_UDF_PIPE_NOT_EXIST;
|
||||
}
|
||||
SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask));
|
||||
if(task == NULL) {
|
||||
if (task == NULL) {
|
||||
fnError("udfc call udf. failed to allocate memory for task");
|
||||
return terrno;
|
||||
}
|
||||
|
@ -2163,8 +2224,8 @@ int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdf
|
|||
int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output) {
|
||||
int8_t callType = TSDB_UDF_CALL_SCALA_PROC;
|
||||
SSDataBlock inputBlock = {0};
|
||||
int32_t code = convertScalarParamToDataBlock(input, numOfCols, &inputBlock);
|
||||
if(code != 0) {
|
||||
int32_t code = convertScalarParamToDataBlock(input, numOfCols, &inputBlock);
|
||||
if (code != 0) {
|
||||
fnError("doCallUdfScalarFunc, convertScalarParamToDataBlock failed. code: %d", code);
|
||||
return code;
|
||||
}
|
||||
|
@ -2174,13 +2235,13 @@ int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t
|
|||
err = convertDataBlockToScalarParm(&resultBlock, output);
|
||||
taosArrayDestroy(resultBlock.pDataBlock);
|
||||
}
|
||||
|
||||
|
||||
blockDataFreeRes(&inputBlock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int32_t doTeardownUdf(UdfcFuncHandle handle) {
|
||||
int32_t code = TSDB_CODE_SUCCESS, lino = 0;;
|
||||
int32_t code = TSDB_CODE_SUCCESS, lino = 0;
|
||||
SUdfcUvSession *session = (SUdfcUvSession *)handle;
|
||||
|
||||
if (session->udfUvPipe == NULL) {
|
||||
|
@ -2190,7 +2251,7 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) {
|
|||
}
|
||||
|
||||
SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask));
|
||||
if(task == NULL) {
|
||||
if (task == NULL) {
|
||||
fnError("doTeardownUdf, failed to allocate memory for task");
|
||||
taosMemoryFree(session);
|
||||
return terrno;
|
||||
|
|
|
@ -1217,7 +1217,7 @@ int32_t udfdOpenClientRpc() {
|
|||
connLimitNum = TMIN(connLimitNum, 500);
|
||||
rpcInit.connLimitNum = connLimitNum;
|
||||
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||
TAOS_CHECK_RETURN(taosVersionStrToInt(version, &(rpcInit.compatibilityVer)));
|
||||
TAOS_CHECK_RETURN(taosVersionStrToInt(td_version, &rpcInit.compatibilityVer));
|
||||
global.clientRpc = rpcOpen(&rpcInit);
|
||||
if (global.clientRpc == NULL) {
|
||||
fnError("failed to init dnode rpc client");
|
||||
|
@ -1470,9 +1470,9 @@ static int32_t udfdParseArgs(int32_t argc, char *argv[]) {
|
|||
}
|
||||
|
||||
static void udfdPrintVersion() {
|
||||
(void)printf("udfd version: %s compatible_version: %s\n", version, compatible_version);
|
||||
(void)printf("git: %s\n", gitinfo);
|
||||
(void)printf("build: %s\n", buildinfo);
|
||||
(void)printf("udfd version: %s compatible_version: %s\n", td_version, td_compatible_version);
|
||||
(void)printf("git: %s\n", td_gitinfo);
|
||||
(void)printf("build: %s\n", td_buildinfo);
|
||||
}
|
||||
|
||||
static int32_t udfdInitLog() {
|
||||
|
|
|
@ -30,7 +30,7 @@ typedef struct SInsertParseContext {
|
|||
bool forceUpdate;
|
||||
bool needTableTagVal;
|
||||
bool needRequest; // whether or not request server
|
||||
bool isStmtBind; // whether is stmt bind
|
||||
bool isStmtBind; // whether is stmt bind
|
||||
} SInsertParseContext;
|
||||
|
||||
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
|
||||
|
@ -757,7 +757,7 @@ int32_t parseTagValue(SMsgBuf* pMsgBuf, const char** pSql, uint8_t precision, SS
|
|||
STagVal val = {0};
|
||||
int32_t code = parseTagToken(pSql, pToken, pTagSchema, precision, &val, pMsgBuf);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (NULL == taosArrayPush(pTagVals, &val)){
|
||||
if (NULL == taosArrayPush(pTagVals, &val)) {
|
||||
code = terrno;
|
||||
}
|
||||
}
|
||||
|
@ -775,11 +775,14 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p
|
|||
return terrno;
|
||||
}
|
||||
return insBuildCreateTbReq(pStmt->pCreateTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid,
|
||||
pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags,
|
||||
TSDB_DEFAULT_TABLE_TTL);
|
||||
pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags,
|
||||
TSDB_DEFAULT_TABLE_TTL);
|
||||
}
|
||||
|
||||
int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) {
|
||||
if (pToken->type == TK_NK_QUESTION) {
|
||||
return buildInvalidOperationMsg(pMsgBuf, "insert into super table syntax is not supported for stmt");
|
||||
}
|
||||
if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER &&
|
||||
pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL &&
|
||||
pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT && pToken->type != TK_NK_BIN &&
|
||||
|
@ -810,7 +813,7 @@ typedef struct SRewriteTagCondCxt {
|
|||
|
||||
static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) {
|
||||
SValueNode* pValue = NULL;
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue);
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue);
|
||||
if (NULL == pValue) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1041,7 +1044,7 @@ static int32_t storeChildTableMeta(SInsertParseContext* pCxt, SVnodeModifyOpStmt
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
taosMemoryFree(pBackup);
|
||||
|
@ -1236,7 +1239,7 @@ static int32_t getTargetTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModi
|
|||
}
|
||||
|
||||
static int32_t collectUseTable(const SName* pName, SHashObj* pTable) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(pName, fullName);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
|
@ -1382,7 +1385,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
|
|||
pStmt->pTableMeta, &pStmt->pCreateTblReq, pTableCxt, false, false);
|
||||
}
|
||||
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
|
@ -1824,37 +1827,49 @@ static int32_t doGetStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt*
|
|||
code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
|
||||
break;
|
||||
}
|
||||
if (pCols->pColIndex[i] < numOfCols) {
|
||||
const SSchema* pSchema = &pSchemas[pCols->pColIndex[i]];
|
||||
SColVal* pVal = taosArrayGet(pStbRowsCxt->aColVals, pCols->pColIndex[i]);
|
||||
code = parseValueToken(pCxt, ppSql, pToken, (SSchema*)pSchema, precision, pVal);
|
||||
if (TK_NK_VARIABLE == pToken->type) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value");
|
||||
|
||||
if (TK_NK_QUESTION == pToken->type) {
|
||||
pCxt->isStmtBind = true;
|
||||
if (pCols->pColIndex[i] == tbnameIdx) {
|
||||
*bFoundTbName = true;
|
||||
}
|
||||
} else if (pCols->pColIndex[i] < tbnameIdx) {
|
||||
const SSchema* pTagSchema = &pSchemas[pCols->pColIndex[i]];
|
||||
if (canParseTagsAfter) {
|
||||
tagTokens[(*pNumOfTagTokens)] = *pToken;
|
||||
tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema;
|
||||
++(*pNumOfTagTokens);
|
||||
} else {
|
||||
code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type);
|
||||
if (code == TSDB_CODE_SUCCESS && TK_NK_VARIABLE == pToken->type) {
|
||||
if (NULL == pCxt->pComCxt->pStmtCb) {
|
||||
code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pToken->z);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (pCols->pColIndex[i] < numOfCols) {
|
||||
const SSchema* pSchema = &pSchemas[pCols->pColIndex[i]];
|
||||
SColVal* pVal = taosArrayGet(pStbRowsCxt->aColVals, pCols->pColIndex[i]);
|
||||
code = parseValueToken(pCxt, ppSql, pToken, (SSchema*)pSchema, precision, pVal);
|
||||
if (TK_NK_VARIABLE == pToken->type) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value");
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = parseTagValue(&pCxt->msg, ppSql, precision, (SSchema*)pTagSchema, pToken, pTagNames, pTagVals,
|
||||
&pStbRowsCxt->pTag);
|
||||
} else if (pCols->pColIndex[i] < tbnameIdx) {
|
||||
const SSchema* pTagSchema = &pSchemas[pCols->pColIndex[i]];
|
||||
if (canParseTagsAfter) {
|
||||
tagTokens[(*pNumOfTagTokens)] = *pToken;
|
||||
tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema;
|
||||
++(*pNumOfTagTokens);
|
||||
} else {
|
||||
code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type);
|
||||
if (code == TSDB_CODE_SUCCESS && TK_NK_VARIABLE == pToken->type) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value");
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = parseTagValue(&pCxt->msg, ppSql, precision, (SSchema*)pTagSchema, pToken, pTagNames, pTagVals,
|
||||
&pStbRowsCxt->pTag);
|
||||
}
|
||||
}
|
||||
} else if (pCols->pColIndex[i] == tbnameIdx) {
|
||||
code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY);
|
||||
if (TK_NK_VARIABLE == pToken->type) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname");
|
||||
}
|
||||
}
|
||||
} else if (pCols->pColIndex[i] == tbnameIdx) {
|
||||
code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY);
|
||||
if (TK_NK_VARIABLE == pToken->type) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname");
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = parseTbnameToken(&pCxt->msg, pStbRowsCxt->ctbName.tname, pToken, bFoundTbName);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = parseTbnameToken(&pCxt->msg, pStbRowsCxt->ctbName.tname, pToken, bFoundTbName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1888,6 +1903,11 @@ static int32_t getStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
|
|||
code = buildSyntaxErrMsg(&pCxt->msg, "tbname value expected", pOrigSql);
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS && pStbRowsCxt->ctbName.tname[0] == '\0') {
|
||||
*pGotRow = true;
|
||||
return TSDB_CODE_TSC_STMT_TBNAME_ERROR;
|
||||
}
|
||||
|
||||
bool ctbFirst = true;
|
||||
char ctbFName[TSDB_TABLE_FNAME_LEN];
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -1923,8 +1943,8 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod
|
|||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = insBuildCreateTbReq(pStbRowsCxt->pCreateCtbReq, pStbRowsCxt->ctbName.tname, pStbRowsCxt->pTag,
|
||||
pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames,
|
||||
getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL);
|
||||
pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames,
|
||||
getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL);
|
||||
pStbRowsCxt->pTag = NULL;
|
||||
}
|
||||
|
||||
|
@ -1933,9 +1953,9 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod
|
|||
code = tNameExtractFullName(&pStbRowsCxt->ctbName, ctbFName);
|
||||
SVgroupInfo vg;
|
||||
SRequestConnInfo conn = {.pTrans = pCxt->pComCxt->pTransporter,
|
||||
.requestId = pCxt->pComCxt->requestId,
|
||||
.requestObjRefId = pCxt->pComCxt->requestRid,
|
||||
.mgmtEps = pCxt->pComCxt->mgmtEpSet};
|
||||
.requestId = pCxt->pComCxt->requestId,
|
||||
.requestObjRefId = pCxt->pComCxt->requestRid,
|
||||
.mgmtEps = pCxt->pComCxt->mgmtEpSet};
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetTableHashVgroup(pCxt->pComCxt->pCatalog, &conn, &pStbRowsCxt->ctbName, &vg);
|
||||
}
|
||||
|
@ -1979,11 +1999,47 @@ static void clearStbRowsDataContext(SStbRowsDataContext* pStbRowsCxt) {
|
|||
taosMemoryFreeClear(pStbRowsCxt->pCreateCtbReq);
|
||||
}
|
||||
|
||||
static int32_t parseStbBoundInfo(SVnodeModifyOpStmt* pStmt, SStbRowsDataContext* pStbRowsCxt,
|
||||
STableDataCxt** ppTableDataCxt) {
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
if (pStmt->usingTableProcessing) {
|
||||
pStmt->pTableMeta->uid = 0;
|
||||
}
|
||||
|
||||
code = insGetTableDataCxt(pStmt->pTableBlockHashObj, tbFName, strlen(tbFName), pStmt->pTableMeta,
|
||||
&pStmt->pCreateTblReq, ppTableDataCxt, false, true);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
insDestroyBoundColInfo(&((*ppTableDataCxt)->boundColsInfo));
|
||||
(*ppTableDataCxt)->boundColsInfo = pStbRowsCxt->boundColsInfo;
|
||||
(*ppTableDataCxt)->boundColsInfo.numOfCols = pStbRowsCxt->boundColsInfo.numOfBound;
|
||||
(*ppTableDataCxt)->boundColsInfo.numOfBound = pStbRowsCxt->boundColsInfo.numOfBound;
|
||||
(*ppTableDataCxt)->boundColsInfo.hasBoundCols = pStbRowsCxt->boundColsInfo.hasBoundCols;
|
||||
(*ppTableDataCxt)->boundColsInfo.pColIndex = taosMemoryCalloc(pStbRowsCxt->boundColsInfo.numOfBound, sizeof(int16_t));
|
||||
if (NULL == (*ppTableDataCxt)->boundColsInfo.pColIndex) {
|
||||
return terrno;
|
||||
}
|
||||
(void)memcpy((*ppTableDataCxt)->boundColsInfo.pColIndex, pStbRowsCxt->boundColsInfo.pColIndex,
|
||||
sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t parseOneStbRow(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, const char** ppSql,
|
||||
SStbRowsDataContext* pStbRowsCxt, bool* pGotRow, SToken* pToken,
|
||||
STableDataCxt** ppTableDataCxt) {
|
||||
bool bFirstTable = false;
|
||||
int32_t code = getStbRowValues(pCxt, pStmt, ppSql, pStbRowsCxt, pGotRow, pToken, &bFirstTable);
|
||||
|
||||
if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR && *pGotRow) {
|
||||
return parseStbBoundInfo(pStmt, pStbRowsCxt, ppTableDataCxt);
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS || !*pGotRow) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2176,8 +2232,8 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
|
|||
if (code == TSDB_CODE_SUCCESS) {
|
||||
SStbRowsDataContext* pStbRowsCxt = rowsDataCxt.pStbRowsCxt;
|
||||
void* pData = pTableDataCxt;
|
||||
code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), &pData,
|
||||
POINTER_BYTES);
|
||||
code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid),
|
||||
&pData, POINTER_BYTES);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
break;
|
||||
}
|
||||
|
@ -2249,7 +2305,7 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt
|
|||
if (!pStmt->stbSyntax && numOfRows > 0) {
|
||||
void* pData = rowsDataCxt.pTableDataCxt;
|
||||
code = taosHashPut(pStmt->pTableCxtHashObj, &pStmt->pTableMeta->uid, sizeof(pStmt->pTableMeta->uid), &pData,
|
||||
POINTER_BYTES);
|
||||
POINTER_BYTES);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -2363,8 +2419,7 @@ static int32_t constructStbRowsDataContext(SVnodeModifyOpStmt* pStmt, SStbRowsDa
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
// col values and bound cols info of STableDataContext is not used
|
||||
pStbRowsCxt->aColVals = taosArrayInit(getNumOfColumns(pStbRowsCxt->pStbMeta), sizeof(SColVal));
|
||||
if (!pStbRowsCxt->aColVals)
|
||||
code = terrno;
|
||||
if (!pStbRowsCxt->aColVals) code = terrno;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = insInitColValues(pStbRowsCxt->pStbMeta, pStbRowsCxt->aColVals);
|
||||
|
@ -2422,9 +2477,6 @@ static int32_t parseInsertStbClauseBottom(SInsertParseContext* pCxt, SVnodeModif
|
|||
// 1. [(tag1_name, ...)] ...
|
||||
// 2. VALUES ... | FILE ...
|
||||
static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
|
||||
if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) {
|
||||
return buildInvalidOperationMsg(&pCxt->msg, "insert into super table syntax is not supported for stmt");
|
||||
}
|
||||
if (!pStmt->stbSyntax) {
|
||||
STableDataCxt* pTableCxt = NULL;
|
||||
int32_t code = parseSchemaClauseBottom(pCxt, pStmt, &pTableCxt);
|
||||
|
@ -2511,9 +2563,9 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
|
|||
}
|
||||
|
||||
// db.? situation,ensure that the only thing following the '.' mark is '?'
|
||||
char *tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true);
|
||||
char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true);
|
||||
if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) {
|
||||
char *tbName = NULL;
|
||||
char* tbName = NULL;
|
||||
if (NULL == pCxt->pComCxt->pStmtCb) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
|
||||
}
|
||||
|
@ -2528,7 +2580,8 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
|
|||
if (pCxt->isStmtBind) {
|
||||
if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) {
|
||||
// In SQL statements, the table name has already been specified.
|
||||
parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", pCxt->pComCxt->requestId);
|
||||
parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param",
|
||||
pCxt->pComCxt->requestId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2614,7 +2667,7 @@ static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p);
|
|||
|
||||
static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) {
|
||||
SVnodeModifyOpStmt* pStmt = NULL;
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt);
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt);
|
||||
if (NULL == pStmt) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2729,7 +2782,7 @@ static int32_t buildTagNameFromMeta(STableMeta* pMeta, SArray** pTagName) {
|
|||
return terrno;
|
||||
}
|
||||
SSchema* pSchema = getTableTagSchema(pMeta);
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) {
|
||||
if (NULL == taosArrayPush(*pTagName, pSchema[i].name)) {
|
||||
code = terrno;
|
||||
|
@ -2834,7 +2887,7 @@ static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery)
|
|||
}
|
||||
if (NULL == pStmt->pTableBlockHashObj) {
|
||||
pStmt->pTableBlockHashObj =
|
||||
taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
}
|
||||
if (NULL == pStmt->pVgroupsHashObj || NULL == pStmt->pTableBlockHashObj) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -2866,7 +2919,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR
|
|||
|
||||
static int32_t setRefreshMeta(SQuery* pQuery) {
|
||||
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot;
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
|
||||
if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) {
|
||||
taosArrayDestroy(pQuery->pTableList);
|
||||
|
@ -3065,9 +3118,10 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal
|
|||
.forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false),
|
||||
.isStmtBind = pCxt->isStmtBind};
|
||||
|
||||
int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery);
|
||||
int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery);
|
||||
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)((*pQuery)->pRoot);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = parseInsertSqlImpl(&context, (SVnodeModifyOpStmt*)(*pQuery)->pRoot);
|
||||
code = parseInsertSqlImpl(&context, pStmt);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = setNextStageInfo(&context, *pQuery, pCatalogReq);
|
||||
|
@ -3076,8 +3130,8 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal
|
|||
QUERY_EXEC_STAGE_SCHEDULE == (*pQuery)->execStage) {
|
||||
code = setRefreshMeta(*pQuery);
|
||||
}
|
||||
insDestroyBoundColInfo(&context.tags);
|
||||
|
||||
insDestroyBoundColInfo(&context.tags);
|
||||
// if no data to insert, set emptyMode to avoid request server
|
||||
if (!context.needRequest) {
|
||||
(*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT;
|
||||
|
|
|
@ -242,7 +242,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
|
|||
}
|
||||
|
||||
code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName,
|
||||
pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL);
|
||||
pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL);
|
||||
pTag = NULL;
|
||||
|
||||
end:
|
||||
|
@ -594,7 +594,7 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c
|
|||
}
|
||||
|
||||
code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName,
|
||||
pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL);
|
||||
pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL);
|
||||
pTag = NULL;
|
||||
|
||||
end:
|
||||
|
@ -797,6 +797,10 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind,
|
|||
for (int c = 0; c < boundInfo->numOfBound; ++c) {
|
||||
SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]];
|
||||
SColData* pCol = taosArrayGet(pCols, c);
|
||||
if (pCol == NULL || pColSchema == NULL) {
|
||||
code = buildInvalidOperationMsg(&pBuf, "get column schema or column data failed");
|
||||
goto _return;
|
||||
}
|
||||
|
||||
if (bind[c].num != rowNum) {
|
||||
code = buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same");
|
||||
|
@ -886,7 +890,7 @@ _return:
|
|||
|
||||
int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum,
|
||||
TAOS_FIELD_E** fields, uint8_t timePrec) {
|
||||
if (fields) {
|
||||
if (fields != NULL) {
|
||||
*fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E));
|
||||
if (NULL == *fields) {
|
||||
return terrno;
|
||||
|
@ -910,6 +914,44 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_STB** fields,
|
||||
STableMeta* pMeta) {
|
||||
if (fields != NULL) {
|
||||
*fields = taosMemoryCalloc(boundColsInfo.numOfBound, sizeof(TAOS_FIELD_STB));
|
||||
if (NULL == *fields) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
SSchema* schema = &pSchema[boundColsInfo.pColIndex[0]];
|
||||
if (TSDB_DATA_TYPE_TIMESTAMP == schema->type) {
|
||||
(*fields)[0].precision = pMeta->tableInfo.precision;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < boundColsInfo.numOfBound; ++i) {
|
||||
int16_t idx = boundColsInfo.pColIndex[i];
|
||||
|
||||
if (idx == pMeta->tableInfo.numOfColumns + pMeta->tableInfo.numOfTags) {
|
||||
(*fields)[i].field_type = TAOS_FIELD_TBNAME;
|
||||
tstrncpy((*fields)[i].name, "tbname", sizeof((*fields)[i].name));
|
||||
continue;
|
||||
} else if (idx < pMeta->tableInfo.numOfColumns) {
|
||||
(*fields)[i].field_type = TAOS_FIELD_COL;
|
||||
} else {
|
||||
(*fields)[i].field_type = TAOS_FIELD_TAG;
|
||||
}
|
||||
|
||||
schema = &pSchema[idx];
|
||||
tstrncpy((*fields)[i].name, schema->name, sizeof((*fields)[i].name));
|
||||
(*fields)[i].type = schema->type;
|
||||
(*fields)[i].bytes = schema->bytes;
|
||||
}
|
||||
}
|
||||
|
||||
*fieldNum = boundColsInfo.numOfBound;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields) {
|
||||
STableDataCxt* pDataBlock = (STableDataCxt*)pBlock;
|
||||
SBoundColInfo* tags = (SBoundColInfo*)boundTags;
|
||||
|
@ -939,7 +981,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel
|
|||
SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta);
|
||||
if (pDataBlock->boundColsInfo.numOfBound <= 0) {
|
||||
*fieldNum = 0;
|
||||
if (fields) {
|
||||
if (fields != NULL) {
|
||||
*fields = NULL;
|
||||
}
|
||||
|
||||
|
@ -952,6 +994,23 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields) {
|
||||
STableDataCxt* pDataBlock = (STableDataCxt*)pBlock;
|
||||
SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta);
|
||||
if (pDataBlock->boundColsInfo.numOfBound <= 0) {
|
||||
*fieldNum = 0;
|
||||
if (fields != NULL) {
|
||||
*fields = NULL;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
CHECK_CODE(buildStbBoundFields(pDataBlock->boundColsInfo, pSchema, fieldNum, fields, pDataBlock->pMeta));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qResetStmtColumns(SArray* pCols, bool deepClear) {
|
||||
int32_t colNum = taosArrayGetSize(pCols);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue