Merge branch 'main' into enh/TD-32615/lemon

This commit is contained in:
Shengliang Guan 2024-11-06 20:52:36 +08:00
commit 1f7bd1d179
448 changed files with 44073 additions and 5141 deletions

58
.github/workflows/taoskeeper-ci.yml vendored Normal file
View File

@ -0,0 +1,58 @@
name: TaosKeeper CI
on:
push:
paths:
- tools/keeper/**
jobs:
build:
runs-on: ubuntu-latest
name: Run unit tests
steps:
- name: Checkout the repository
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.18
- name: Install system dependencies
run: |
sudo apt update -y
sudo apt install -y build-essential cmake libgeos-dev
- name: Install TDengine
run: |
mkdir debug
cd debug
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false
make -j 4
sudo make install
which taosd
which taosadapter
which taoskeeper
- name: Start taosd
run: |
cp /etc/taos/taos.cfg ./
sudo echo "supportVnodes 256" >> taos.cfg
nohup sudo taosd -c taos.cfg &
- name: Start taosadapter
run: nohup sudo taosadapter &
- name: Run tests with coverage
working-directory: tools/keeper
run: |
go mod tidy
sudo go test -v -ldflags="-X 'github.com/taosdata/taoskeeper/version.IsEnterprise=true'" -coverpkg=./... -coverprofile=coverage.out ./...
go tool cover -func=coverage.out
- name: Clean up
if: always()
run: |
if pgrep taosd; then sudo pkill taosd; fi
if pgrep taosadapter; then sudo pkill taosadapter; fi

View File

@ -1,6 +1,7 @@
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE FALSE) set(CMAKE_VERBOSE_MAKEFILE FALSE)
set(TD_BUILD_TAOSA_INTERNAL FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE)
set(TD_BUILD_KEEPER_INTERNAL FALSE)
# set output directory # set output directory
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
@ -57,6 +58,19 @@ IF(TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED) ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF() ENDIF()
IF("${BUILD_KEEPER}" STREQUAL "")
SET(TD_BUILD_KEEPER FALSE)
ELSEIF(${BUILD_KEEPER} MATCHES "false")
SET(TD_BUILD_KEEPER FALSE)
ELSEIF(${BUILD_KEEPER} MATCHES "true")
SET(TD_BUILD_KEEPER TRUE)
ELSEIF(${BUILD_KEEPER} MATCHES "internal")
SET(TD_BUILD_KEEPER FALSE)
SET(TD_BUILD_KEEPER_INTERNAL TRUE)
ELSE()
SET(TD_BUILD_KEEPER FALSE)
ENDIF()
IF("${BUILD_TOOLS}" STREQUAL "") IF("${BUILD_TOOLS}" STREQUAL "")
IF(TD_LINUX) IF(TD_LINUX)
IF(TD_ARM_32) IF(TD_ARM_32)

View File

@ -272,11 +272,19 @@ if(${TD_DARWIN})
endif(${TD_DARWIN}) endif(${TD_DARWIN})
add_subdirectory(zlib EXCLUDE_FROM_ALL) add_subdirectory(zlib EXCLUDE_FROM_ALL)
if(${TD_DARWIN})
target_compile_options(zlibstatic PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories( target_include_directories(
zlibstatic zlibstatic
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib
) )
if(${TD_DARWIN})
target_compile_options(zlib PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories( target_include_directories(
zlib zlib
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib

View File

@ -7,8 +7,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
``` ```
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library. If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.

View File

@ -4,6 +4,6 @@
"main": "index.js", "main": "index.js",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@tdengine/websocket": "^3.1.0" "@tdengine/websocket": "^3.1.1"
} }
} }

View File

@ -3,7 +3,6 @@ const taos = require("@tdengine/websocket");
let dsn = 'ws://localhost:6041'; let dsn = 'ws://localhost:6041';
async function createConnect() { async function createConnect() {
try { try {
let conf = new taos.WSConfig(dsn); let conf = new taos.WSConfig(dsn);
conf.setUser('root'); conf.setUser('root');

View File

@ -10,7 +10,6 @@ const groupId = "group1";
const clientId = "client1"; const clientId = "client1";
async function createConsumer() { async function createConsumer() {
let groupId = "group1"; let groupId = "group1";
let clientId = "client1"; let clientId = "client1";
let configMap = new Map([ let configMap = new Map([

View File

@ -23,17 +23,18 @@ docker pull tdengine/tdengine:3.3.3.0
然后只需执行下面的命令: 然后只需执行下面的命令:
```shell ```shell
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine docker run -d -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine
``` ```
注意TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 注意TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043 为 taosKeeper 使用端口。6044-6049 TCP 端口为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
6044 和 6045 UDP 端口为 statsd 和 collectd 格式写入接口可根据需要选择是否打开。6060 为 taosExplorer 使用端口。具体端口使用情况请参考[网络端口要求](../../operation/planning#网络端口要求)。
如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令: 如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令:
```shell ```shell
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \ -v ~/data/taos/dnode/log:/var/log/taos \
-p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine
``` ```
:::note :::note
@ -121,4 +122,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
``` ```
在上面的查询中使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。

View File

@ -319,4 +319,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
``` ```
在上面的查询中使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。

View File

@ -54,4 +54,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
``` ```
在上面的查询中使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。

View File

@ -106,7 +106,7 @@ CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16;
``` ```
该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下: 该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下:
- `PRECISION 'ms'` 这个数据库的时序数据使用毫秒ms精度的时间戳 - `PRECISION 'ms'` 这个数据库的时序数据使用毫秒ms精度的时间戳
- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 - `KEEP 3650`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除
- `DURATION 10` :每 10 天的数据放在一个数据文件中 - `DURATION 10` :每 10 天的数据放在一个数据文件中
- `BUFFER 16` :写入使用大小为 16MB 的内存池。 - `BUFFER 16` :写入使用大小为 16MB 的内存池。

View File

@ -116,10 +116,11 @@ create stream if not exists count_history_s fill_history 1 into count_history as
### 流计算的触发模式 ### 流计算的触发模式
在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE。 在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE。
1. AT_ONCE写入立即触发。 1. AT_ONCE写入立即触发。
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)。 2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)。
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。 3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV 、NULL、 NONE、VALUE。
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。 窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。

View File

@ -14,18 +14,26 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx
## 支持的数据源 ## 支持的数据源
目前 TDengine 支持的数据源如下: 目前 TDengine 支持的数据源如下
1. Aveva PI System一个工业数据管理和分析平台前身为 OSIsoft PI System它能够实时采集、整合、分析和可视化工业数据助力企业实现智能化决策和精细化管理 | 数据源 | 支持版本 | 描述 |
2. Aveva Historian一个工业大数据分析软件前身为 Wonderware Historian专为工业环境设计用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据。 | --- | --- | --- |
3. OPC DA/UAOPC 是 Open Platform Communications 的缩写是一种开放式、标准化的通信协议用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发旨在解决工业控制领域中不同设备之间互操作性差的问题。OPC 协议最初于 1996 年发布,当时称为 OPC DA Data Access主要用于实时数据采集和控制2006 年OPC 基金会发布了 OPC UA Unified Architecture 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本。 | Aveva PI System | PI AF Server Version 2.10.9.593 或以上 | 工业数据管理和分析平台,前身为 OSIsoft PI System它能够实时采集、整合、分析和可视化工业数据助力企业实现智能化决策和精细化管理 |
4. MQTTMessage Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 | Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian专为工业环境设计用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 |
5. Kafka由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 | OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写是一种开放式、标准化的通信协议用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发旨在解决工业控制领域中不同设备之间互操作性差的问题OPC 协议最初于 1996 年发布,当时称为 OPC DA Data Access主要用于实时数据采集和控制。 |
6. OpenTSDB基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 | OPC UA | KeepWare KEPServerEx 6.5 | 2006 年OPC 基金会发布了 OPC UA Unified Architecture 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 |
7. CSVComma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 | MQTT | emqx: 3.0.0 到 5.7.1<br/> hivemq: 4.0.0 到 4.31.0<br/> mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 |
8. TDengine 2泛指运行 TDengine 2.x 版本的 TDengine 实例。 | Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 |
9. TDengine 3泛指运行 TDengine 3.x 版本的 TDengine 实例。 | InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。|
10. MySQL PostgreSQL Oracle 等关系型数据库。 | OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 |
| MySQL | 5.6,5.7,8.0+ | MySQL是最流行的关系型数据库管理系统之一由于其体积小、速度快、总体拥有成本低尤其是开放源码这一特点一般中小型和大型网站的开发都选择 MySQL 作为网站数据库。 |
| Oracle | 11G/12c/19c | Oracle 数据库系统是世界上流行的关系数据库管理系统,系统可移植性好、使用方便、功能强,适用于各类大、中、小微机环境。它是一种高效率的、可靠性好的、适应高吞吐量的数据库方案。 |
| PostgreSQL | v15.0+ | PostgreSQL 是一个功能非常强大的、源代码开放的客户/服务器关系型数据库管理系统, 有很多在大型商业RDBMS中所具有的特性包括事务、子选择、触发器、视图、外键引用完整性和复杂锁定功能。|
| SQL Server | 2012/2022 | Microsoft SQL Server 是一种关系型数据库管理系统,由 Microsoft 公司开发,具有使用方便可伸缩性好与相关软件集成程度高等优点。 |
| MongoDB | 3.6+ | MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。 |
| CSV | - | Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 |
| TDengine 2.x | 2.4 或 2.6+ | TDengine 旧版本,已不再维护,推荐升级到 3.0 最新版本。 |
| TDengine 3.x | 源端版本+ | 使用 TMQ 进行 TDengine 指定从数据库或超级表的订阅。 |
## 数据提取、过滤和转换 ## 数据提取、过滤和转换

View File

@ -0,0 +1,54 @@
---
title: "ARIMA"
sidebar_label: "ARIMA"
---
本节讲述 ARIMA 算法模型的使用方法。
## 功能概述
ARIMA 即自回归移动平均模型Autoregressive Integrated Moving Average, ARIMA也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
ARIMA 模型是一种自回归模型只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
>平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。
- p= 自回归模型阶数
- d= 差分阶数
- q= 移动平均模型阶数
### 参数
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数|说明|必填项|
|---|---|-----|
|period|输入时间序列每个周期包含的数据点个数,如果不设置该参数或该参数设置为 0将使用非季节性/周期性的 ARIMA 模型预测|选填|
|start_p|自回归模型阶数的起始值0 开始的整数,不推荐大于 10|选填|
|max_p|自回归模型阶数的结束值0 开始的整数,不推荐大于 10|选填|
|start_q|移动平均模型阶数的起始值0 开始的整数,不推荐大于 10|选填|
|max_q|移动平均模型阶数的结束值0 开始的整数,不推荐大于 10|选填|
|d|差分阶数|选填|
`start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。
### 示例及结果
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期start_p 起始是 1 最大拟合是 5start_q 是 1最大值是 5预测结果中返回 95% 置信区间范围边界。
```
FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5")
```
```json5
{
"rows": fc_rows, // 返回结果的行数
"period": period, // 返回结果的周期性,同输入
"alpha": alpha, // 返回结果的置信区间,同输入
"algo": "arima", // 返回结果使用的算法
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
"res": res // 列模式的结果
}
```
### 参考文献
- https://en.wikipedia.org/wiki/Autoregressive_moving-average_model
- https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931?fromtitle=ARMA%E6%A8%A1%E5%9E%8B&fromid=8048415

View File

@ -0,0 +1,43 @@
---
title: "HoltWinters"
sidebar_label: "HoltWinters"
---
本节讲述 HoltWinters 算法模型的使用方法。
## 功能概述
HoltWinters 模型又称为多次指数平滑模型EMA。适用于含有线性趋势和周期波动的非平稳序列利用指数平滑法让模型参数不断适应非平稳序列的变化并对未来趋势进行**短期**预测。
HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。
该模型对于返回数据不提供计算的置信区间范围结果,在 95% 置信区间的上下界结果与预测结果相同。
### 参数
分析平台中使用自动化的 HoltWinters 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数|说明|必填项|
|---|---|---|
|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0将使用一次简单指数平滑方式进行数据拟合并据此进行未来数据的预测|选填|
|trend|趋势模型使用加法模型还是乘法模型|选填|
|seasonal|季节性采用加法模型还是乘法模型|选填|
参数 `trend``seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。
### 示例及结果
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势采用乘法模型,季节采用乘法模型
```
FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul")
```
```json5
{
"rows": rows, // 返回结果的行数
"period": period, // 返回结果的周期性,该结果与输入的周期性相同,如果没有周期性,该值为 0
"algo": 'holtwinters' // 返回结果使用的计算模型
"mse": mse, // 最小均方误差minmum square error
"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了两列 [timestamp][fc_results]。
}
```
### 参考文献
- https://en.wikipedia.org/wiki/Exponential_smoothing
- https://orangematter.solarwinds.com/2019/12/15/holt-winters-forecasting-simplified/

View File

@ -0,0 +1,46 @@
---
title: "Anomaly-detection"
sidebar_label: "Anomaly-detection"
---
本节讲述异常检测算法模型的使用方法。
## 概述
分析平台提供了 6 种异常检查模型6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。
### 统计学异常检测方法
- k-sigma<sup>[1]</sup>: 即 ***689599.7 rule*** 。***k***值默认为 3即序列均值的 3 倍标准差范围为边界超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点.
|参数|说明|是否必选|默认值|
|---|---|---|---|
|k|标准差倍数|选填|3|
- IQR<sup>[2]</sup>:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1第 1 个四分位数、Q2第 2 个四分位数)和 Q3第 3 个四分位数。IQR 定义为 Q3Q1位于 Q3+1.5。无输入参数。
- Grubbs<sup>[3]</sup>: 又称为 Grubbs' test即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
- SHESD<sup>[4]</sup> 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5%
|参数|说明|是否必选|默认值|
|---|---|---|---|
|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
### 基于数据密度的检测方法
LOF<sup>[5]</sup>: 局部离群因子LOF又叫局部异常因子算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。
### 基于深度学习的检测方法
使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。
### 参考文献
1. https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
2. https://en.wikipedia.org/wiki/Interquartile_range
3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 19. doi:10.1155/2015/708948.
4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017).
5. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93104. doi:10.1145/335191.335388. ISBN 1-58113-217-4.

View File

@ -0,0 +1,170 @@
---
title: "addins"
sidebar_label: "addins"
---
本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。
## 目录结构
![数据分析功能架构图](./pic/dir.png)
|目录|说明|
|---|---|
|taos|Python 源代码目录,其下包含了算法具体保存目录 algo放置杂项目录 misc单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码fc 放置预测算法代码|
|script|是安装脚本和发布脚本放置目录|
|model|放置针对数据集完成的训练模型|
|cfg|配置文件目录|
## 约定与限制
定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
### 类命名规范
算法类的名称需要以下划线开始,以 Service 结尾。例如_KsigmaService 是 KSigma 异常检测算法的实现类。
### 类继承约定
- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
### 类属性初始化
每个算法实现的类需要静态初始化两个类属性,分别是:
- `name`:触发调用的关键词,全小写英文字母
- `desc`:算法的描述信息
### 核心方法输入与输出约定
`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。
异常检测输出结果
`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100] 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
预测输出结果
对于预测算法,`AbstractForecastService` 的对象属性说明如下:
|属性名称|说明|默认值|
|---|---|---|
|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
|start_ts|预测结果的开始时间| 0|
|time_step|预测结果的两个数据点之间时间间隔|0 |
|fc_rows|预测结果的数量| 0 |
|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
|conf|置信区间分位数 0.05|
预测返回结果如下:
```python
return {
"rows": self.fc_rows, # 预测数据行数
"period": self.period, # 数据周期性,同输入
"algo": "holtwinters", # 预测使用的算法
"mse": mse, # 预测算法的 mse
"res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
}
```
## 示例代码
```python
import numpy as np
from service import AbstractAnomalyDetectionService
# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。
class _IqrService(AbstractAnomalyDetectionService):
""" IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """
# 定义算法调用关键词全小写ASCII码(必须添加)
name = 'iqr'
# 该算法的描述信息(建议添加)
desc = """found the anomaly data according to the inter-quartile range"""
def __init__(self):
super().__init__()
def execute(self):
""" execute 是算法实现逻辑的核心实现,直接修改该实现即可 """
# self.list 是输入数值列list 类型,例如:[1,2,3,4,5]。设置 self.list 的方法在父类中已经进行了定义。实现自己的算法,修改该文件即可,以下代码使用自己的实现替换即可。
#lower = np.quantile(self.list, 0.25)
#upper = np.quantile(self.list, 0.75)
#min_val = lower - 1.5 * (upper - lower)
#max_val = upper + 1.5 * (upper - lower)
#threshold = [min_val, max_val]
# 返回值是与输入数值列长度相同的数据列,异常值对应位置是 -1。例如上述输入数据列返回数值列是 [1, 1, 1, 1, -1],表示 [5] 是异常值。
return [-1 if k < threshold[0] or k > threshold[1] else 1 for k in self.list]
def set_params(self, params):
"""该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
pass
```
## 单元测试
在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
```python
def test_iqr(self):
""" 测试 _IqrService 类 """
s = loader.get_service("iqr")
# 设置需要进行检测的输入数据
s.set_input_list(AnomalyDetectionTest.input_list)
# 测试 set_params 的处理逻辑
try:
s.set_params({"k": 2})
except ValueError as e:
self.assertEqual(1, 0)
r = s.execute()
# 绘制异常检测结果
draw_ad_results(AnomalyDetectionTest.input_list, r, "iqr")
# 检查结果
self.assertEqual(r[-1], -1)
self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
```
## 需要模型的算法
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
训练完成后的模型,使用 joblib 进行保存。
并在 model 目录下建立对应的文件夹存放该模型。
保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。
具体的调用方式如下:
```python
def test_autoencoder_ad(self):
# 获取特定的算法服务
s = loader.get_service("ac")
data = self.__load_remote_data_for_ad()
# 设置异常检查的输入数据
s.set_input_list(data)
# 指定调用的模型,该模型是之前针对该数据集进行训练获得
s.set_params({"model": "ad_encoder_keras"})
# 执行检查动作,并返回结果
r = s.execute()
num_of_error = -(sum(filter(lambda x: x == -1, r)))
self.assertEqual(num_of_error, 109)
```

View File

@ -0,0 +1,322 @@
---
sidebar_label: 数据分析
title: 数据分析功能
---
## 概述
ANodeAnalysis Node是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。
ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
下图是数据分析的技术架构示意图。
![数据分析功能架构图](./pic/data-analysis.png)
## 安装部署
### 环境准备
ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip同时请确保能够正常连接互联网。
### 安装及卸载
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。
```bash
tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
cd TDengine-enterprise-anode-1.0.0
sudo ./install.sh
```
卸载 ANode执行命令 `rmtaosanode` 即可。
### 其他
为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。
## 启动及停止服务
安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
```bash
systemctl start taosanoded
systemctl stop taosanoded
systemctl status taosanoded
```
## 目录及配置说明
|目录/文件|说明|
|---------------|------|
|/usr/local/taos/taosanode/bin|可执行文件目录|
|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
|/usr/local/taos/taosanode/lib|库文件目录|
|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
|/var/log/taos/taosanode/|日志文件目录|
|/etc/taos/taosanode.ini|配置文件|
### 配置说明
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下:
```ini
[uwsgi]
# charset
env = LC_ALL = en_US.UTF-8
# ip:port
http = 127.0.0.1:6050
# the local unix socket file than communicate to Nginx
#socket = 127.0.0.1:8001
#socket-timeout = 10
# base directory
chdir = /usr/local/taos/taosanode/lib
# initialize python file
wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
# call module of uWSGI
callable = app
# auto remove unix Socket and pid file when stopping
vacuum = true
# socket exec model
#chmod-socket = 664
# uWSGI pid
uid = root
# uWSGI gid
gid = root
# main process
master = true
# the number of worker processes
processes = 2
# pid file
pidfile = /usr/local/taos/taosanode/taosanode.pid
# enable threads
enable-threads = true
# the number of threads for each process
threads = 4
# memory useage report
memory-report = true
# smooth restart
reload-mercy = 10
# conflict with systemctl, so do NOT uncomment this
# daemonize = /var/log/taos/taosanode/taosanode.log
# log directory
logto = /var/log/taos/taosanode/taosanode.log
# wWSGI monitor port
stats = 127.0.0.1:8387
# python virtual environment directory
virtualenv = /usr/local/taos/taosanode/venv/
[taosanode]
# default app log file
app-log = /var/log/taos/taosanode/taosanode.app.log
# model storage directory
model-dir = /usr/local/taos/taosanode/model/
# default log level
log-level = DEBUG
# draw the query results
draw-result = 0
```
**提示**
请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。
## ANode 基本操作
### 管理 ANode
#### 创建 ANode
```sql
CREATE ANODE {node_url}
```
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
#### 查看 ANode
列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`
```sql
SHOW ANODES;
```
#### 查看提供的时序数据分析服务
```SQL
SHOW ANODES FULL;
```
#### 强制刷新集群中的分析算法缓存
```SQL
UPDATE ANODE {node_id}
UPDATE ALL ANODES
```
#### 删除 ANode
```sql
DROP ANODE {anode_id}
```
删除 ANode 只是将 ANode 从 TDengine 集群中删除,管理 ANode 的启停仍然需要使用`systemctl`命令。
### 时序数据分析功能
#### 白噪声检查
分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列White Noise Data, WND和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
#### 数据重采样和时间戳对齐
分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5输出结果的频率是 10输入时间序列 [0 5 10 15 20 25 30] 将被重采用为间隔 为 10 的序列 [0, 10, 2030][5, 15, 25] 处的数据将被丢弃。
需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
#### 时序数据异常检测
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
##### 语法
```SQL
ANOMALY_WINDOW(column_name, option_expr)
option_expr: {"
algo=expr1
[,wncheck=1|0]
[,expr2]
"}
```
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma该算法接受的输入参数是 2。
3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
**参数说明**
|参数|含义|默认值|
|---|---|---|
|algo|异常检测调用的算法|iqr|
|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1默认值为 1表示进行白噪声检查|
异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
1. `_WSTART` 异常窗口开始时间戳
2. `_WEND`:异常窗口结束时间戳
3. `_WDURATION`:异常窗口持续时间
**示例**
```SQL
--- 使用 iqr 算法进行异常检测,检测列 i32 列。
SELECT _wstart, _wend, SUM(i32)
FROM ai.atb
ANOMALY_WINDOW(i32, "algo=iqr");
--- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2检测列 i32 列
SELECT _wstart, _wend, SUM(i32)
FROM ai.atb
ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
```
```
taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
_wstart | _wend | count(*) |
====================================================================
2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
Query OK, 1 row(s) in set (0.028946s)
```
**可用异常检测算法**
- iqr
- ksigma
- grubbs
- lof
- shesd
- tac
#### 时序数据预测
数据预测以一段训练数据作为输入,预测接下来一个连续时间区间内,时序数据的趋势。
##### 语法
```SQL
FORECAST(column_expr, option_expr)
option_expr: {"
algo=expr1
[,wncheck=1|0]
[,conf=conf_val]
[,every=every_val]
[,rows=rows_val]
[,start=start_ts_val]
[,expr2]
"}
```
1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。
2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
**参数说明**
|参数|含义|默认值|
|---|---|---|
|algo|预测分析使用的算法|holtwinters|
|wncheck|白噪声white noise data检查|默认值为 10 表示不进行检查|
|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
|every|预测数据的采样间隔|输入数据的采样间隔|
|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
|rows|预测结果的记录数|10|
1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
**示例**
```SQL
--- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
FROM ai.ftb;
--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%.
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
FROM ai.ftb;
```
```
taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
_flow | _fhigh | _frowts | forecast(i32) |
========================================================================================
10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
-21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 |
-78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 |
-154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 |
-253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 |
-375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 |
-514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 |
-680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 |
-868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 |
-1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 |
```
**可用预测算法**
- arima
- holtwinters

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

View File

@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100)
与 WebSocket 连接方式相比RESTful 连接方式在内存占用上更大除了缓冲区所需的内存以外还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。 与 WebSocket 连接方式相比RESTful 连接方式在内存占用上更大除了缓冲区所需的内存以外还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。
由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险
**注意** **注意**
- 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群而不采用taosc 原生连接方式。 - 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群而不采用taosc 原生连接方式。
@ -146,11 +146,17 @@ TDengine 的多级存储功能在使用上还具备以下优点。
下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。 下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。
|接口或组件 | 端口 | | 接口或组件名称 | 端口 | 协议 |
|:---------------------------:|:---------:| |:-----------------------------------------:|:----------:|:--------:|
|原生接口taosc | 6030 | | 原生接口taosc | 6030 | TCP |
|RESTful 接口 | 6041 | | RESTful 接口 | 6041 | TCP |
|WebSocket 接口 |6041 | | WebSocket 接口 | 6041 | TCP |
|taosKeeper | 6043 | | taosKeeper | 6043 | TCP |
|taosX | 6050, 6055 | | statsd 格式写入接口 | 6044 | TCP/UDP |
|taosExplorer | 6060 | | collectd 格式写入接口 | 6045 | TCP/UDP |
| openTSDB Telnet 格式写入接口 | 6046 | TCP |
| collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP |
| icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP |
| tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP |
| taosX | 6050, 6055 | TCP |
| taosExplorer | 6060 | TCP |

View File

@ -76,7 +76,7 @@ dataDir /mnt/data6 2 0
|s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3单位秒。最小值1最大值2592000 (30天默认值 60 秒 | |s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3单位秒。最小值1最大值2592000 (30天默认值 60 秒 |
|s3PageCacheSize |s3 page cache 缓存页数目单位页。最小值4最大值1024*1024\*1024。 ,默认值 4096| |s3PageCacheSize |s3 page cache 缓存页数目单位页。最小值4最大值1024*1024\*1024。 ,默认值 4096|
|s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600 | |s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600 |
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 1表示开启自动 S3 迁移,可配置为 0。 | |s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0表示关闭自动 S3 迁移,可配置为 1。 |
### 检查配置参数可用性 ### 检查配置参数可用性
@ -108,9 +108,37 @@ s3migrate database <db_name>;
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 | | # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
| :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- | | :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- |
| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 | | 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 |
| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 | | 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 | | 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
### 对象存储读写次数估算
对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。
#### 数据上传
当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。
```math
上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1
```
在创建数据库时,可以通过 `s3_chunkpages` 参数调整每个文件块的大小,从而控制每个数据文件的上传次数。
其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。
#### 数据下载
在查询操作中如果需要访问对象存储中的数据TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。
相邻的多个数据页会作为一个数据块从对象存储下载一次,以减少从对象存储下载的次数。每个数据页的大小,在创建数据库时,通过 `tsdb_pagesize` 参数指定,默认 4K 字节。
```math
下载次数 = 查询需要的数据块数量 - 已缓存的数据块数量
```
页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。
## Azure Blob 存储 ## Azure Blob 存储
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。 本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
@ -135,3 +163,15 @@ s3BucketName td-test
- 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价 - 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码 - 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
- 最大支持的 S3 服务配置数为 10 - 最大支持的 S3 服务配置数为 10
### 不依赖 Flexify 服务
用户界面同 S3不同的地方在于下面三个参数的配置
| # | 参数 | 示例值 | 描述 |
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
| 3 | s3BucketName | test-container | Container name |
其中 fd2d01c73 是账户 ID微软 Blob 存储服务只支持 Https 协议,不支持 Http。

View File

@ -13,7 +13,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
taosKeeper 有两种安装方式: taosKeeper 有两种安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../../get-started/)。 - 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[TDengine 安装](../../../get-started/)。
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。 - 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
@ -22,34 +22,45 @@ taosKeeper 有两种安装方式:
taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:命令行参数、环境变量 和 配置文件。优先级为:命令行参数、环境变量、配置文件参数。 一般我们推荐使用配置文件。 taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:命令行参数、环境变量 和 配置文件。优先级为:命令行参数、环境变量、配置文件参数。 一般我们推荐使用配置文件。
### 命令行参数和环境变量 ### 命令行参数和环境变量
命令行参数 和 环境变量说明可以参考命令 `taoskeeper --help` 的输出。下面是一个例子: 命令行参数 和 环境变量说明可以参考命令 `taoskeeper --help` 的输出。下面是一个例子:
```shell ```shell
Usage of taosKeeper v3.3.2.0: Usage of taoskeeper v3.3.3.0:
--debug enable debug mode. Env "TAOS_KEEPER_DEBUG" -R, --RotationInterval string interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL" (default "15s")
-P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043) -c, --config string config path default /etc/taos/taoskeeper.toml
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_KEEPER_LOG_LEVEL" (default "info") --drop string run taoskeeper in command mode, only support old_taosd_metric_stables.
--gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000) --environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"
-R, --RotationInterval string interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL" (default "15s") --fromTime string parameter of transfer, example: 2020-01-01T00:00:00+08:00 (default "2020-01-01T00:00:00+08:00")
--tdengine.host string TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST" (default "127.0.0.1") --gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000)
--tdengine.port int TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT" (default 6041) -h, --help Print this help message and exit
--tdengine.username string TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME" (default "root") --instanceId int instance ID. Env "TAOS_KEEPER_INSTANCE_ID" (default 64)
--tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata") --log.compress whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"
--tdengine.usessl TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL" --log.keepDays uint log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS" (default 30)
--metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX" --log.level string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log") --log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos")
--metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES" --log.reservedDiskSize string reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE" (default "1GB")
--environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP" --log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5)
--log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos") --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5) --log.rotationTime duration deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.rotationTime duration log rotation time. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s) --logLevel string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "100000000") --metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log")
-c, --config string config path default /etc/taos/taoskeeper.toml --metrics.database.options.buffer int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER" (default 64)
-V, --version Print the version and exit --metrics.database.options.cachemodel string database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL" (default "both")
-h, --help Print this help message and exit --metrics.database.options.keep int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP" (default 90)
--metrics.database.options.vgroups int database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS" (default 1)
--metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"
--metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"
-P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043)
--tdengine.host string TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST" (default "127.0.0.1")
--tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata")
--tdengine.port int TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT" (default 6041)
--tdengine.username string TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME" (default "root")
--tdengine.usessl TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"
--transfer string run taoskeeper in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit
-V, --version Print the version and exit
``` ```
### 配置文件 ### 配置文件
taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置文件。 taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置文件。
@ -57,20 +68,18 @@ taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置
若既不指定 taosKeeper 配置文件,且 `/etc/taos/taoskeeper.toml` 也不存在,将使用默认配置。 若既不指定 taosKeeper 配置文件,且 `/etc/taos/taoskeeper.toml` 也不存在,将使用默认配置。
**下面是配置文件的示例:** **下面是配置文件的示例:**
```toml
# Start with debug middleware for gin
debug = false
# Listen port, default is 6043 ```toml
# The ID of the currently running taoskeeper instance, default is 64.
instanceId = 64
# Listening port, default is 6043.
port = 6043 port = 6043
# log level # Go pool size
loglevel = "info"
# go pool size
gopoolsize = 50000 gopoolsize = 50000
# interval for metrics # Interval for metrics
RotationInterval = "15s" RotationInterval = "15s"
[tdengine] [tdengine]
@ -81,20 +90,21 @@ password = "taosdata"
usessl = false usessl = false
[metrics] [metrics]
# metrics prefix in metrics names. # Metrics prefix in metrics names.
prefix = "taos" prefix = "taos"
# export some tables that are not super table # Export some tables that are not super table.
tables = [] tables = []
# database for storing metrics data # Database for storing metrics data.
[metrics.database] [metrics.database]
name = "log" name = "log"
# database options for db storing metrics data
# Database options for db storing metrics data.
[metrics.database.options] [metrics.database.options]
vgroups = 1 vgroups = 1
buffer = 64 buffer = 64
KEEP = 90 keep = 90
cachemodel = "both" cachemodel = "both"
[environment] [environment]
@ -102,9 +112,19 @@ cachemodel = "both"
incgroup = false incgroup = false
[log] [log]
rotationCount = 5 # The directory where log files are stored.
rotationTime = "24h" # path = "/var/log/taos"
rotationSize = 100000000 level = "info"
# Number of log file rotations before deletion.
rotationCount = 30
# The number of days to retain log files.
keepDays = 30
# The maximum size of a log file before rotation.
rotationSize = "1GB"
# If set to true, log files will be compressed.
compress = false
# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit.
reservedDiskSize = "1GB"
``` ```
## 启动 ## 启动
@ -118,7 +138,6 @@ monitorFqdn localhost # taoskeeper 服务的 FQDN
TDengine 监控配置相关,具体请参考:[TDengine 监控配置](../../../operation/monitor)。 TDengine 监控配置相关,具体请参考:[TDengine 监控配置](../../../operation/monitor)。
<Tabs> <Tabs>
<TabItem label="Linux" value="linux"> <TabItem label="Linux" value="linux">
@ -188,7 +207,6 @@ Active: inactive (dead)
</TabItem> </TabItem>
</Tabs> </Tabs>
## 健康检查 ## 健康检查
可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码: 可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码:
@ -208,7 +226,6 @@ Content-Length: 21
{"version":"3.3.2.3"} {"version":"3.3.2.3"}
``` ```
## 数据收集与监控 ## 数据收集与监控
taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中(默认的监控数据是 `log`),这些监控数据可以用来配置 TDengine 监控。 taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中(默认的监控数据是 `log`),这些监控数据可以用来配置 TDengine 监控。
@ -216,6 +233,7 @@ taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产
### 查看监控数据 ### 查看监控数据
可以查看 `log` 库下的超级表,每个超级表都对应一组监控指标,具体指标不再赘述。 可以查看 `log` 库下的超级表,每个超级表都对应一组监控指标,具体指标不再赘述。
```shell ```shell
taos> use log; taos> use log;
Database changed. Database changed.
@ -251,17 +269,14 @@ taos> select last_row(*) from taosd_dnodes_info;
Query OK, 1 row(s) in set (0.003168s) Query OK, 1 row(s) in set (0.003168s)
``` ```
### 使用 TDInsight 配置监控 ### 使用 TDInsight 配置监控
收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/) 收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/)。
## 集成 Prometheus ## 集成 Prometheus
taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。 taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。
### 导出监控指标 ### 导出监控指标
下面通过 `curl` 命令展示 `/metrics` 接口返回的数据格式: 下面通过 `curl` 命令展示 `/metrics` 接口返回的数据格式:
@ -298,9 +313,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### taosd 集群 #### taosd 集群
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `cluster_id` 集群 id - `cluster_id` 集群 id
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| ----------------------------------- | ------- | ------------------------------------- | | ----------------------------------- | ------- | ------------------------------------- |
| taos_cluster_info_connections_total | counter | 总连接数 | | taos_cluster_info_connections_total | counter | 总连接数 |
@ -328,11 +345,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### dnode #### dnode
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `cluster_id` 集群 id - `cluster_id` 集群 id
- `dnode_ep` dnode 端点 - `dnode_ep` dnode 端点
- `dnode_id`dnode id - `dnode_id`dnode id
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| ------------------------------ | ------- | ---------------------------------------------------------------------------------------- | | ------------------------------ | ------- | ---------------------------------------------------------------------------------------- |
| taos_d_info_status | gauge | dnode 状态,标签 value 表示状态, ready 表示正常, offline 表示下线, unknown 表示未知。 | | taos_d_info_status | gauge | dnode 状态,标签 value 表示状态, ready 表示正常, offline 表示下线, unknown 表示未知。 |
@ -361,6 +380,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 数据目录 #### 数据目录
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `cluster_id` 集群 id - `cluster_id` 集群 id
- `dnode_ep` dnode 端点 - `dnode_ep` dnode 端点
- `dnode_id`dnode id - `dnode_id`dnode id
@ -368,6 +388,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- `data_dir_level`:数据目录级别 - `data_dir_level`:数据目录级别
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| --------------------------------- | ----- | -------------------- | | --------------------------------- | ----- | -------------------- |
| taos_taosd_dnodes_data_dirs_avail | gauge | 可用空间(单位 Byte) | | taos_taosd_dnodes_data_dirs_avail | gauge | 可用空间(单位 Byte) |
@ -377,12 +398,14 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 日志目录 #### 日志目录
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `cluster_id` 集群 id - `cluster_id` 集群 id
- `dnode_ep` dnode 端点 - `dnode_ep` dnode 端点
- `dnode_id`dnode id - `dnode_id`dnode id
- `log_dir_name`:日志目录名 - `log_dir_name`:日志目录名
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| -------------------------------- | ----- | -------------------- | | -------------------------------- | ----- | -------------------- |
| taos_taosd_dnodes_log_dirs_avail | gauge | 可用空间(单位 Byte) | | taos_taosd_dnodes_log_dirs_avail | gauge | 可用空间(单位 Byte) |
@ -392,11 +415,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 日志数量 #### 日志数量
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `cluster_id` 集群 id - `cluster_id` 集群 id
- `dnode_ep` dnode 端点 - `dnode_ep` dnode 端点
- `dnode_id`dnode id - `dnode_id`dnode id
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| ---------------------- | ------- | ------------ | | ---------------------- | ------- | ------------ |
| taos_log_summary_debug | counter | 调试日志数量 | | taos_log_summary_debug | counter | 调试日志数量 |
@ -404,14 +429,15 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
| taos_log_summary_info | counter | 信息日志数量 | | taos_log_summary_info | counter | 信息日志数量 |
| taos_log_summary_trace | counter | 跟踪日志数量 | | taos_log_summary_trace | counter | 跟踪日志数量 |
#### taosadapter #### taosadapter
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `endpoint`:端点 - `endpoint`:端点
- `req_type`请求类型0 表示 rest1 表示 websocket - `req_type`请求类型0 表示 rest1 表示 websocket
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| -------------------------------------- | ------- | -------------------- | | -------------------------------------- | ------- | -------------------- |
| taos_adapter_requests_fail | counter | 失败的请求数 | | taos_adapter_requests_fail | counter | 失败的请求数 |
@ -433,9 +459,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### taoskeeper #### taoskeeper
##### 监控信息支持的标签 ##### 监控信息支持的标签
- `identify` 节点 endpoint - `identify` 节点 endpoint
##### 相关指标及其含义 ##### 相关指标及其含义
| 指标名称 | 类型 | 含义 | | 指标名称 | 类型 | 含义 |
| ----------------------- | ----- | ------------------------------------- | | ----------------------- | ----- | ------------------------------------- |
| taos_keeper_monitor_cpu | gauge | taoskeeper CPU 使用率(取值范围 0~1 | | taos_keeper_monitor_cpu | gauge | taoskeeper CPU 使用率(取值范围 0~1 |
@ -444,6 +472,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 其他 taosd 集群监控项 #### 其他 taosd 集群监控项
##### taos_m_info_role ##### taos_m_info_role
- **标签**: - **标签**:
- `cluster_id`: 集群 id - `cluster_id`: 集群 id
- `mnode_ep`: mnode 端点 - `mnode_ep`: mnode 端点
@ -453,6 +482,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: mnode 角色 - **含义**: mnode 角色
##### taos_taos_sql_req_count ##### taos_taos_sql_req_count
- **标签**: - **标签**:
- `cluster_id`: 集群 id - `cluster_id`: 集群 id
- `result`: 请求结果(取值范围: Success, Failed - `result`: 请求结果(取值范围: Success, Failed
@ -462,6 +492,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: SQL 请求数量 - **含义**: SQL 请求数量
##### taos_taosd_sql_req_count ##### taos_taosd_sql_req_count
- **标签**: - **标签**:
- `cluster_id`: 集群 id - `cluster_id`: 集群 id
- `dnode_ep`: dnode 端点 - `dnode_ep`: dnode 端点
@ -474,6 +505,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: SQL 请求数量 - **含义**: SQL 请求数量
##### taos_taosd_vgroups_info_status ##### taos_taosd_vgroups_info_status
- **标签**: - **标签**:
- `cluster_id`: 集群 id - `cluster_id`: 集群 id
- `database_name`: 数据库名称 - `database_name`: 数据库名称
@ -482,6 +514,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: 虚拟组状态。 0 为 unsynced表示没有leader选出1 为 ready。 - **含义**: 虚拟组状态。 0 为 unsynced表示没有leader选出1 为 ready。
##### taos_taosd_vgroups_info_tables_num ##### taos_taosd_vgroups_info_tables_num
- **标签**: - **标签**:
- `cluster_id`: 集群 id - `cluster_id`: 集群 id
- `database_name`: 数据库名称 - `database_name`: 数据库名称
@ -490,6 +523,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: 虚拟组表数量 - **含义**: 虚拟组表数量
##### taos_taosd_vnodes_info_role ##### taos_taosd_vnodes_info_role
- **标签**: - **标签**:
- `cluster_id`: 集群 id - `cluster_id`: 集群 id
- `database_name`: 数据库名称 - `database_name`: 数据库名称
@ -499,7 +533,6 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **类型**: gauge - **类型**: gauge
- **含义**: 虚拟节点角色 - **含义**: 虚拟节点角色
### 抽取配置 ### 抽取配置
Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。 Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。
@ -521,8 +554,6 @@ scrape_configs:
在 Grafana Dashboard 菜单点击 `import`dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。 在 Grafana Dashboard 菜单点击 `import`dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。
## taosKeeper 监控指标 ## taosKeeper 监控指标
taosKeeper 也会将自己采集的监控数据写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。 taosKeeper 也会将自己采集的监控数据写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。

View File

@ -30,6 +30,7 @@ database_option: {
| SINGLE_STABLE {0 | 1} | SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value | TABLE_PREFIX value
| TABLE_SUFFIX value | TABLE_SUFFIX value
| DNODES value
| TSDB_PAGESIZE value | TSDB_PAGESIZE value
| WAL_LEVEL {1 | 2} | WAL_LEVEL {1 | 2}
| WAL_FSYNC_PERIOD value | WAL_FSYNC_PERIOD value
@ -70,6 +71,7 @@ database_option: {
- TABLE_PREFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup - TABLE_PREFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
- TABLE_SUFFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。 - TABLE_SUFFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
- TSDB_PAGESIZE一个 VNODE 中时序数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB到 16 MB。 - TSDB_PAGESIZE一个 VNODE 中时序数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB到 16 MB。
- DNODES指定 VNODE 所在的 DNODE 列表,如 '1,2,3',以逗号区分且字符间不能有空格,仅企业版支持。
- WAL_LEVELWAL 级别,默认为 1。 - WAL_LEVELWAL 级别,默认为 1。
- 1写 WAL但不执行 fsync。 - 1写 WAL但不执行 fsync。
- 2写 WAL而且执行 fsync。 - 2写 WAL而且执行 fsync。

View File

@ -1569,7 +1569,7 @@ COUNT({* | expr})
ELAPSED(ts_primary_key [, time_unit]) ELAPSED(ts_primary_key [, time_unit])
``` ```
**功能说明**elapsed函数表达了统计周期内连续的时间长度和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下统计在给定时间范围内的每个窗口内有数据覆盖的时间范围如果没有INTERVAL子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED返回的并不是时间范围的绝对值而是绝对值除以time_unit所得到的单位个数。 **功能说明**elapsed函数表达了统计周期内连续的时间长度和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下统计在给定时间范围内的每个窗口内有数据覆盖的时间范围如果没有INTERVAL子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED返回的并不是时间范围的绝对值而是绝对值除以time_unit所得到的单位个数。流计算仅在FORCE_WINDOW_CLOSE模式下支持该函数。
**返回结果类型**DOUBLE。 **返回结果类型**DOUBLE。
@ -1829,7 +1829,7 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 - INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 - INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。 - INTERP SQL查询需要同时与 RANGEEVERY 和 FILL 关键字一起使用流计算不能使用RANGE需要EVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 \<= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 - INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 \<= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. - INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) - INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
@ -2180,7 +2180,7 @@ STATEDURATION(expr, oper, val, unit)
TWA(expr) TWA(expr)
``` ```
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。 **功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。流计算仅在FORCE_WINDOW_CLOSE模式下支持该函数。
**返回数据类型**DOUBLE。 **返回数据类型**DOUBLE。

View File

@ -143,13 +143,14 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE 对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE
1. AT_ONCE写入立即触发 1. AT_ONCE写入立即触发
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用) 2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。 3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持PREV 、NULL、 NONE、VALUE。
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
@ -248,11 +249,8 @@ T = 最新事件时间 - DELETE_MARK
- [percentile](../function/#percentile) - [percentile](../function/#percentile)
- [top](../function/#top) - [top](../function/#top)
- [bottom](../function/#bottom) - [bottom](../function/#bottom)
- [elapsed](../function/#elapsed)
- [interp](../function/#interp)
- [derivative](../function/#derivative) - [derivative](../function/#derivative)
- [irate](../function/#irate) - [irate](../function/#irate)
- [twa](../function/#twa)
- [histogram](../function/#histogram) - [histogram](../function/#histogram)
- [diff](../function/#diff) - [diff](../function/#diff)
- [statecount](../function/#statecount) - [statecount](../function/#statecount)
@ -291,3 +289,4 @@ RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
CREATE SNODE ON DNODE [id] CREATE SNODE ON DNODE [id]
``` ```
其中的 id 是集群中的 dnode 的序号。请注意选择的dnode流计算的中间状态将自动在其上进行备份。 其中的 id 是集群中的 dnode 的序号。请注意选择的dnode流计算的中间状态将自动在其上进行备份。
从 3.3.4.0 版本开始,在多副本环境中创建流会进行 snode 的**存在性检查**,要求首先创建 snode。如果 snode 不存在,无法创建流。

View File

@ -26,6 +26,7 @@ Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 | | Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------: | :----------------: | | :------------------: | :----------------------: | :----------------: |
| 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 |
| 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 | | 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 |
## 处理异常 ## 处理异常

View File

@ -3,11 +3,14 @@ sidebar_label: ODBC
title: TDengine ODBC title: TDengine ODBC
--- ---
TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库的数据表/视图 TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)以及用户自定义开发的应用程序,通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。
TDengine ODBC 提供基于 WebSocket推荐和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 TDengine ODBC 提供基于 WebSocket推荐和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。
注意TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 WebSocket 连接方式。
**注意:**
- 驱动管理器:确保使用与应用程序架构匹配的 ODBC 驱动管理器。32 位应用程序需要使用 32 位 ODBC 驱动管理器64 位应用程序需要使用 64 位 ODBC 驱动管理器。
- 数据源名称DSN32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN用户 DSN 标签页下的 DSN 如果名字相同会共用,因此需要在 DSN 名称上去区分。
想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。
@ -24,17 +27,17 @@ TDengine ODBC 提供基于 WebSocket推荐和 原生连接两种方式连
### 数据源连接类型与区别 ### 数据源连接类型与区别
TDengine ODBC 支持两种连接 TDengine 数据库方式Websocket 连接与 Native 连接,其区别如下: TDengine ODBC 支持两种连接 TDengine 数据库方式WebSocket 连接与 Native 连接,其区别如下:
1. 访问云服务仅支持使用 Websocket 连接方式。 1. 访问云服务仅支持使用 WebSocket 连接方式。
2. 32 位应用程序仅支持使用 WebSocket 连接方式。 2. 32 位应用程序仅支持使用 WebSocket 连接方式。
3. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 3. WebSocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。
4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。
5. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 5. 对于一般用户,建议使用 **WebSocket** 连接方式,性能与 Native 差别不大,兼容性更好。
### WebSocket 连接 ### WebSocket 连接
@ -46,11 +49,11 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式Websocket 连接与
4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息
![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp) ![ODBC WebSocket connection config](./assets/odbc-ws-config-zh.webp)
4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名
4.2【连接类型】 : 必选,选择连接类型,这里选择 【Websocket】 4.2【连接类型】 : 必选,选择连接类型,这里选择 【WebSocket】
4.3【URL】必填ODBC 数据源 URL示例: `http://localhost:6041` 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token` 4.3【URL】必填ODBC 数据源 URL示例: `http://localhost:6041` 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token`
@ -111,7 +114,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位
| taos_odbc 版本 | 主要变化 | TDengine 版本 | | taos_odbc 版本 | 主要变化 | TDengine 版本 |
| :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- | | :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- |
| v1.1.0 | 1. 支持视图功能;<br/>2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 | | v1.1.0 | 1. 支持视图功能;<br/>2. 支持 VARBINARY/GEOMETRY 数据类型;<br/>3. 支持 ODBC 32 位 WebSocket 连接方式(仅企业版支持);<br/>4. 支持 ODBC 数据源配置对话框设置对工业软件 KingSCADA、Kepware 等的兼容性适配选项(仅企业版支持); | 3.3.3.0及更高版本 |
| v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 | | v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 |
| v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;<br/>2. 重构字符集转换模块,提升读写性能;<br/>3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”<br/>4. ODBC 数据源配置对话框增加“测试连接”控件;<br/>5. ODBC 数据源配置支持中文/英文界面; | - | | v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;<br/>2. 重构字符集转换模块,提升读写性能;<br/>3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”<br/>4. ODBC 数据源配置对话框增加“测试连接”控件;<br/>5. ODBC 数据源配置支持中文/英文界面; | - |
| v1.0.0.0 | 发布初始版本支持与Tdengine数据库交互以读写数据具体请参考“API 参考”一节 | 3.2.2.0及更高版本 | | v1.0.0.0 | 发布初始版本支持与Tdengine数据库交互以读写数据具体请参考“API 参考”一节 | 3.2.2.0及更高版本 |

View File

@ -39,10 +39,10 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
| ----------- | ------------- | ------------- | ------------- | --------- | --------- | | ----------- | ------------- | ------------- | ------------- | --------- | --------- |
| **OS** | **Linux** | **Win64** | **macOS** | **Linux** | **macOS** | | **OS** | **Linux** | **Win64** | **macOS** | **Linux** | **macOS** |
| **C/C++** | ● | ● | ● | ● | ● | | **C/C++** | ● | ● | ● | ● | ● |
| **JDBC** | ● | ● | ○ | ● | ○ | | **JDBC** | ● | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ● | ● | | **Python** | ● | ● | ● | ● | ● |
| **Go** | ● | ● | ● | ● | ● | | **Go** | ● | ● | ● | ● | ● |
| **NodeJs** | ● | ● | ○ | ● | ○ | | **NodeJs** | ● | ● | ● | ● | ● |
| **C#** | ● | ● | ○ | ● | ○ | | **C#** | ● | ● | ○ | ● | ○ |
| **Rust** | ● | ● | ● | ○ | ● | | **Rust** | ● | ● | ● | ○ | ● |
| **RESTful** | ● | ● | ● | ● | ● | | **RESTful** | ● | ● | ● | ● | ● |

View File

@ -81,6 +81,13 @@ typedef enum {
TSDB_SML_TIMESTAMP_NANO_SECONDS, TSDB_SML_TIMESTAMP_NANO_SECONDS,
} TSDB_SML_TIMESTAMP_TYPE; } TSDB_SML_TIMESTAMP_TYPE;
typedef enum TAOS_FIELD_T {
TAOS_FIELD_COL = 1,
TAOS_FIELD_TAG,
TAOS_FIELD_QUERY,
TAOS_FIELD_TBNAME,
} TAOS_FIELD_T;
typedef struct taosField { typedef struct taosField {
char name[65]; char name[65];
int8_t type; int8_t type;
@ -95,6 +102,15 @@ typedef struct TAOS_FIELD_E {
int32_t bytes; int32_t bytes;
} TAOS_FIELD_E; } TAOS_FIELD_E;
typedef struct TAOS_FIELD_STB {
char name[65];
int8_t type;
uint8_t precision;
uint8_t scale;
int32_t bytes;
TAOS_FIELD_T field_type;
} TAOS_FIELD_STB;
#ifdef WINDOWS #ifdef WINDOWS
#define DLL_EXPORT __declspec(dllexport) #define DLL_EXPORT __declspec(dllexport)
#else #else
@ -195,13 +211,6 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt);
typedef void TAOS_STMT2; typedef void TAOS_STMT2;
typedef enum TAOS_FIELD_T {
TAOS_FIELD_COL = 1,
TAOS_FIELD_TAG,
TAOS_FIELD_QUERY,
TAOS_FIELD_TBNAME,
} TAOS_FIELD_T;
typedef struct TAOS_STMT2_OPTION { typedef struct TAOS_STMT2_OPTION {
int64_t reqid; int64_t reqid;
bool singleStbInsert; bool singleStbInsert;
@ -232,7 +241,9 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows);
DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt); DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt);
DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert); DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert);
DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields); DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields);
DLL_EXPORT int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields);
DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields); DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields);
DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields);
DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt);
DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt);
@ -251,17 +262,17 @@ DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res);
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
DLL_EXPORT void taos_stop_query(TAOS_RES *res); DLL_EXPORT void taos_stop_query(TAOS_RES *res);
DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col);
DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows);
DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); DLL_EXPORT bool taos_is_update_query(TAOS_RES *res);
DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows);
DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData);
DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex);
DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
DLL_EXPORT void taos_reset_current_db(TAOS *taos); DLL_EXPORT void taos_reset_current_db(TAOS *taos);
DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res);
DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res);

View File

@ -154,6 +154,7 @@ typedef enum EStreamType {
STREAM_TRANS_STATE, STREAM_TRANS_STATE,
STREAM_MID_RETRIEVE, STREAM_MID_RETRIEVE,
STREAM_PARTITION_DELETE_DATA, STREAM_PARTITION_DELETE_DATA,
STREAM_GET_RESULT,
} EStreamType; } EStreamType;
#pragma pack(push, 1) #pragma pack(push, 1)
@ -383,6 +384,10 @@ typedef struct STUidTagInfo {
#define TABLE_NAME_COLUMN_INDEX 6 #define TABLE_NAME_COLUMN_INDEX 6
#define PRIMARY_KEY_COLUMN_INDEX 7 #define PRIMARY_KEY_COLUMN_INDEX 7
//steam get result block column
#define DATA_TS_COLUMN_INDEX 0
#define DATA_VERSION_COLUMN_INDEX 1
// stream create table block column // stream create table block column
#define UD_TABLE_NAME_COLUMN_INDEX 0 #define UD_TABLE_NAME_COLUMN_INDEX 0
#define UD_GROUPID_COLUMN_INDEX 1 #define UD_GROUPID_COLUMN_INDEX 1

View File

@ -189,7 +189,12 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint
int32_t getJsonValueLen(const char* data); int32_t getJsonValueLen(const char* data);
// For the VAR_DATA_TYPE type, new data is inserted strictly according to the position of SVarColAttr.length.
// If the same row is inserted repeatedly, data holes will result.
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
// For the VAR_DATA_TYPE type, if a row already has data before inserting it (judged by offset != -1),
// it will be inserted at the original position and the old data will be overwritten.
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData); int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows,
bool trimValue); bool trimValue);
@ -233,7 +238,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo);
* @brief find how many rows already in order start from first row * @brief find how many rows already in order start from first row
*/ */
int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo); int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo);
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk); int32_t blockDataCheck(const SSDataBlock* pDataBlock);
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload); int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows); int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
@ -266,7 +271,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId)
int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData); int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData);
int32_t blockGetEncodeSize(const SSDataBlock* pBlock); int32_t blockGetEncodeSize(const SSDataBlock* pBlock);
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols); int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataLen, int32_t numOfCols);
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos); int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos);
// for debug // for debug

View File

@ -142,6 +142,7 @@ extern bool tsMonitorForceV2;
// audit // audit
extern bool tsEnableAudit; extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable; extern bool tsEnableAuditCreateTable;
extern bool tsEnableAuditDelete;
extern int32_t tsAuditInterval; extern int32_t tsAuditInterval;
// telem // telem
@ -153,6 +154,12 @@ extern bool tsEnableCrashReport;
extern char *tsTelemUri; extern char *tsTelemUri;
extern char *tsClientCrashReportUri; extern char *tsClientCrashReportUri;
extern char *tsSvrCrashReportUri; extern char *tsSvrCrashReportUri;
extern int8_t tsSafetyCheckLevel;
enum {
TSDB_SAFETY_CHECK_LEVELL_NEVER = 0,
TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1,
TSDB_SAFETY_CHECK_LEVELL_BYROW = 2,
};
// query buffer management // query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
@ -194,10 +201,10 @@ extern int32_t tsMinIntervalTime;
extern int32_t tsMaxInsertBatchRows; extern int32_t tsMaxInsertBatchRows;
// build info // build info
extern char version[]; extern char td_version[];
extern char compatible_version[]; extern char td_compatible_version[];
extern char gitinfo[]; extern char td_gitinfo[];
extern char buildinfo[]; extern char td_buildinfo[];
// lossy // lossy
extern char tsLossyColumns[]; extern char tsLossyColumns[];

View File

@ -467,9 +467,11 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT, QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY, QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY,
QUERY_NODE_PHYSICAL_PLAN_STREAM_ANOMALY, QUERY_NODE_PHYSICAL_PLAN_STREAM_ANOMALY,
QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC, QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC,
} ENodeType; } ENodeType;
typedef struct { typedef struct {
@ -1022,6 +1024,7 @@ typedef struct {
char sDetailVer[128]; char sDetailVer[128];
int64_t whiteListVer; int64_t whiteListVer;
SMonitorParas monitorParas; SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SConnectRsp; } SConnectRsp;
int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp); int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp);
@ -1215,6 +1218,7 @@ typedef struct {
int32_t bytes; int32_t bytes;
int8_t type; int8_t type;
uint8_t pk; uint8_t pk;
bool noData;
} SColumnInfo; } SColumnInfo;
typedef struct STimeWindow { typedef struct STimeWindow {
@ -1337,6 +1341,7 @@ typedef struct {
char* sql; char* sql;
int8_t withArbitrator; int8_t withArbitrator;
int8_t encryptAlgorithm; int8_t encryptAlgorithm;
char dnodeListStr[TSDB_DNODE_LIST_LEN];
} SCreateDbReq; } SCreateDbReq;
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq); int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
@ -1825,6 +1830,17 @@ int32_t tSerializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq); int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
void tFreeSStatisReq(SStatisReq* pReq); void tFreeSStatisReq(SStatisReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
char table[TSDB_TABLE_NAME_LEN];
char operation[AUDIT_OPERATION_LEN];
int32_t sqlLen;
char* pSql;
} SAuditReq;
int32_t tSerializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
int32_t tDeserializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
void tFreeSAuditReq(SAuditReq* pReq);
typedef struct { typedef struct {
int32_t dnodeId; int32_t dnodeId;
int64_t clusterId; int64_t clusterId;
@ -2813,9 +2829,11 @@ typedef struct {
int32_t code; int32_t code;
} STaskDropRsp; } STaskDropRsp;
#define STREAM_TRIGGER_AT_ONCE 1 #define STREAM_TRIGGER_AT_ONCE 1
#define STREAM_TRIGGER_WINDOW_CLOSE 2 #define STREAM_TRIGGER_WINDOW_CLOSE 2
#define STREAM_TRIGGER_MAX_DELAY 3 #define STREAM_TRIGGER_MAX_DELAY 3
#define STREAM_TRIGGER_FORCE_WINDOW_CLOSE 4
#define STREAM_DEFAULT_IGNORE_EXPIRED 1 #define STREAM_DEFAULT_IGNORE_EXPIRED 1
#define STREAM_FILL_HISTORY_ON 1 #define STREAM_FILL_HISTORY_ON 1
#define STREAM_FILL_HISTORY_OFF 0 #define STREAM_FILL_HISTORY_OFF 0
@ -3413,6 +3431,7 @@ typedef struct {
int32_t svrTimestamp; int32_t svrTimestamp;
SArray* rsps; // SArray<SClientHbRsp> SArray* rsps; // SArray<SClientHbRsp>
SMonitorParas monitorParas; SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SClientHbBatchRsp; } SClientHbBatchRsp;
static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); } static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); }
@ -4104,18 +4123,16 @@ void tDeleteMqMetaRsp(SMqMetaRsp* pRsp);
#define MQ_DATA_RSP_VERSION 100 #define MQ_DATA_RSP_VERSION 100
typedef struct { typedef struct {
struct { SMqRspHead head;
SMqRspHead head; STqOffsetVal rspOffset;
STqOffsetVal rspOffset; STqOffsetVal reqOffset;
STqOffsetVal reqOffset; int32_t blockNum;
int32_t blockNum; int8_t withTbName;
int8_t withTbName; int8_t withSchema;
int8_t withSchema; SArray* blockDataLen;
SArray* blockDataLen; SArray* blockData;
SArray* blockData; SArray* blockTbName;
SArray* blockTbName; SArray* blockSchema;
SArray* blockSchema;
};
union{ union{
struct{ struct{

View File

@ -259,6 +259,7 @@
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8 TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8

View File

@ -238,12 +238,26 @@ typedef struct {
case TSDB_DATA_TYPE_UBIGINT: \ case TSDB_DATA_TYPE_UBIGINT: \
snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \ snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \
break; \ break; \
case TSDB_DATA_TYPE_FLOAT: \ case TSDB_DATA_TYPE_FLOAT: { \
snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ int32_t n = snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \
if (n >= (_outputBytes)) { \
n = snprintf(_output, (int32_t)(_outputBytes), "%.7e", *(float *)(_input)); \
if (n >= (_outputBytes)) { \
snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \
} \
} \
break; \ break; \
case TSDB_DATA_TYPE_DOUBLE: \ } \
snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ case TSDB_DATA_TYPE_DOUBLE: { \
int32_t n = snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \
if (n >= (_outputBytes)) { \
snprintf(_output, (int32_t)(_outputBytes), "%.15e", *(double *)(_input)); \
if (n >= (_outputBytes)) { \
snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \
} \
} \
break; \ break; \
} \
case TSDB_DATA_TYPE_UINT: \ case TSDB_DATA_TYPE_UINT: \
snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \ snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \
break; \ break; \
@ -284,6 +298,7 @@ typedef struct {
#define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX) #define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX)
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) #define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) #define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
#define IS_INVALID_TYPE(_t) ((_t) < TSDB_DATA_TYPE_NULL || (_t) >= TSDB_DATA_TYPE_MAX)
#define IS_CONVERT_AS_SIGNED(_t) \ #define IS_CONVERT_AS_SIGNED(_t) \
(IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP)) (IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))

View File

@ -29,7 +29,6 @@ extern "C" {
#endif #endif
#define AUDIT_DETAIL_MAX 65472 #define AUDIT_DETAIL_MAX 65472
#define AUDIT_OPERATION_LEN 20
typedef struct { typedef struct {
const char *server; const char *server;

View File

@ -210,7 +210,7 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo);
const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); const char* qExtractTbnameFromTask(qTaskInfo_t tinfo);
void* qExtractReaderFromStreamScanner(void* scanner); void* qExtractReaderFromStreamScanner(void* scanner);
@ -222,8 +222,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
bool qStreamScanhistoryFinished(qTaskInfo_t tinfo); bool qStreamScanhistoryFinished(qTaskInfo_t tinfo);
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo); int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
void resetTaskInfo(qTaskInfo_t tinfo); void qResetTaskInfoCode(qTaskInfo_t tinfo);
int32_t qGetStreamIntervalExecInfo(qTaskInfo_t tinfo, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow);
int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo); int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo);
int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo); int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo);

View File

@ -39,8 +39,10 @@ extern "C" {
#define META_READER_LOCK 0x0 #define META_READER_LOCK 0x0
#define META_READER_NOLOCK 0x1 #define META_READER_NOLOCK 0x1
#define STREAM_STATE_BUFF_HASH 1 #define STREAM_STATE_BUFF_HASH 1
#define STREAM_STATE_BUFF_SORT 2 #define STREAM_STATE_BUFF_SORT 2
#define STREAM_STATE_BUFF_HASH_SORT 3
#define STREAM_STATE_BUFF_HASH_SEARCH 4
typedef struct SMeta SMeta; typedef struct SMeta SMeta;
typedef TSKEY (*GetTsFun)(void*); typedef TSKEY (*GetTsFun)(void*);
@ -325,6 +327,9 @@ typedef struct {
int64_t number; int64_t number;
void* pStreamFileState; void* pStreamFileState;
int32_t buffIndex; int32_t buffIndex;
int32_t hashIter;
void* pHashData;
int64_t minGpId;
} SStreamStateCur; } SStreamStateCur;
typedef struct SStateStore { typedef struct SStateStore {
@ -337,6 +342,8 @@ typedef struct SStateStore {
void (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used); void (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used);
void (*streamStateClearBuff)(SStreamState* pState, void* pVal); void (*streamStateClearBuff)(SStreamState* pState, void* pVal);
void (*streamStateFreeVal)(void* val); void (*streamStateFreeVal)(void* val);
int32_t (*streamStateGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode); int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode);
@ -349,8 +356,15 @@ typedef struct SStateStore {
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen); int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t (*streamStateFillAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void (*streamStateFillDel)(SStreamState* pState, const SWinKey* key); void (*streamStateFillDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateFillGetNext)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStateFillGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
void (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur); void (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
void (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur); void (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
@ -361,9 +375,12 @@ typedef struct SStateStore {
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key); SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur); void (*streamStateFreeCur)(SStreamStateCur* pCur);
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t (*streamStateFillGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
void (*streamStateSetFillInfo)(SStreamState* pState);
void (*streamStateClearExpiredState)(SStreamState* pState);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen, int32_t* pWinCode); int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen); int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
@ -400,8 +417,8 @@ typedef struct SStateStore {
SUpdateInfo** ppInfo); SUpdateInfo** ppInfo);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo); void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo); void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen); int32_t (*updateInfoSerialize)(SEncoder* pEncoder, const SUpdateInfo* pInfo);
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo); int32_t (*updateInfoDeserialize)(SDecoder* pDeCoder, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key); SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count); SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
@ -412,6 +429,11 @@ typedef struct SStateStore {
GetTsFun fp, void* pFile, TSKEY delMark, const char* id, int64_t ckId, int8_t type, GetTsFun fp, void* pFile, TSKEY delMark, const char* id, int64_t ckId, int8_t type,
struct SStreamFileState** ppFileState); struct SStreamFileState** ppFileState);
int32_t (*streamStateGroupPut)(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
SStreamStateCur* (*streamStateGroupGetCur)(SStreamState* pState);
void (*streamStateGroupCurNext)(SStreamStateCur* pCur);
int32_t (*streamStateGroupGetKVByCur)(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState); void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
void (*streamFileStateClear)(struct SStreamFileState* pFileState); void (*streamFileStateClear)(struct SStreamFileState* pFileState);
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState); bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);

View File

@ -292,6 +292,7 @@ bool fmIsElapsedFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType, int32_t pkBytes); void getLastCacheDataType(SDataType* pType, int32_t pkBytes);
int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc); int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc);
int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** pFunc);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc); int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc);

View File

@ -24,6 +24,7 @@ extern "C" {
#include "thash.h" #include "thash.h"
#include "query.h" #include "query.h"
#include "tqueue.h" #include "tqueue.h"
#include "clientInt.h"
typedef enum { typedef enum {
SQL_RESULT_SUCCESS = 0, SQL_RESULT_SUCCESS = 0,
@ -81,6 +82,8 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name,
void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values); void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values);
const char* monitorResultStr(SQL_RESULT_CODE code); const char* monitorResultStr(SQL_RESULT_CODE code);
int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data); int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data);
void clientOperateReport(SRequestObj* pRequest);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -72,6 +72,7 @@ typedef struct SDatabaseOptions {
int8_t compressionLevel; int8_t compressionLevel;
int8_t encryptAlgorithm; int8_t encryptAlgorithm;
int32_t daysPerFile; int32_t daysPerFile;
char dnodeListStr[TSDB_DNODE_LIST_LEN];
char encryptAlgorithmStr[TSDB_ENCRYPT_ALGO_STR_LEN]; char encryptAlgorithmStr[TSDB_ENCRYPT_ALGO_STR_LEN];
SValueNode* pDaysPerFile; SValueNode* pDaysPerFile;
int32_t fsyncPeriod; int32_t fsyncPeriod;

View File

@ -194,14 +194,26 @@ typedef struct SIndefRowsFuncLogicNode {
bool isTimeLineFunc; bool isTimeLineFunc;
} SIndefRowsFuncLogicNode; } SIndefRowsFuncLogicNode;
typedef struct SStreamNodeOption {
int8_t triggerType;
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
int8_t igCheckUpdate;
int8_t destHasPrimaryKey;
} SStreamNodeOption;
typedef struct SInterpFuncLogicNode { typedef struct SInterpFuncLogicNode {
SLogicNode node; SLogicNode node;
SNodeList* pFuncs; SNodeList* pFuncs;
STimeWindow timeRange; STimeWindow timeRange;
int64_t interval; int64_t interval;
EFillMode fillMode; int8_t intervalUnit;
SNode* pFillValues; // SNodeListNode int8_t precision;
SNode* pTimeSeries; // SColumnNode EFillMode fillMode;
SNode* pFillValues; // SNodeListNode
SNode* pTimeSeries; // SColumnNode
SStreamNodeOption streamNodeOption;
} SInterpFuncLogicNode; } SInterpFuncLogicNode;
typedef struct SForecastFuncLogicNode { typedef struct SForecastFuncLogicNode {
@ -505,17 +517,21 @@ typedef struct SIndefRowsFuncPhysiNode {
} SIndefRowsFuncPhysiNode; } SIndefRowsFuncPhysiNode;
typedef struct SInterpFuncPhysiNode { typedef struct SInterpFuncPhysiNode {
SPhysiNode node; SPhysiNode node;
SNodeList* pExprs; SNodeList* pExprs;
SNodeList* pFuncs; SNodeList* pFuncs;
STimeWindow timeRange; STimeWindow timeRange;
int64_t interval; int64_t interval;
int8_t intervalUnit; int8_t intervalUnit;
EFillMode fillMode; int8_t precision;
SNode* pFillValues; // SNodeListNode EFillMode fillMode;
SNode* pTimeSeries; // SColumnNode SNode* pFillValues; // SNodeListNode
SNode* pTimeSeries; // SColumnNode
SStreamNodeOption streamNodeOption;
} SInterpFuncPhysiNode; } SInterpFuncPhysiNode;
typedef SInterpFuncPhysiNode SStreamInterpFuncPhysiNode;
typedef struct SForecastFuncPhysiNode { typedef struct SForecastFuncPhysiNode {
SPhysiNode node; SPhysiNode node;
SNodeList* pExprs; SNodeList* pExprs;
@ -650,7 +666,7 @@ typedef struct SWindowPhysiNode {
int64_t watermark; int64_t watermark;
int64_t deleteMark; int64_t deleteMark;
int8_t igExpired; int8_t igExpired;
int8_t destHasPrimayKey; int8_t destHasPrimaryKey;
bool mergeDataBlock; bool mergeDataBlock;
} SWindowPhysiNode; } SWindowPhysiNode;

View File

@ -457,6 +457,7 @@ typedef struct SSelectStmt {
bool hasCountFunc; bool hasCountFunc;
bool hasUdaf; bool hasUdaf;
bool hasStateKey; bool hasStateKey;
bool hasTwaOrElapsedFunc;
bool onlyHasKeepOrderFunc; bool onlyHasKeepOrderFunc;
bool groupSort; bool groupSort;
bool tagScan; bool tagScan;

View File

@ -65,7 +65,7 @@ typedef struct SParseCsvCxt {
const char* pLastSqlPos; // the location of the last parsed sql const char* pLastSqlPos; // the location of the last parsed sql
} SParseCsvCxt; } SParseCsvCxt;
typedef void(*setQueryFn)(int64_t); typedef void (*setQueryFn)(int64_t);
typedef struct SParseContext { typedef struct SParseContext {
uint64_t requestId; uint64_t requestId;
@ -147,6 +147,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
int32_t colIdx, int32_t rowNum); int32_t colIdx, int32_t rowNum);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields); int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields); int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen); TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
@ -176,8 +177,8 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen); char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* fields, int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields,
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen); int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut); int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut);

View File

@ -105,6 +105,7 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara
int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t timeZoneStrLen();
int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -49,6 +49,8 @@ void streamStateClear(SStreamState* pState);
void streamStateSetNumber(SStreamState* pState, int32_t number, int32_t tsIdex); void streamStateSetNumber(SStreamState* pState, int32_t number, int32_t tsIdex);
void streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen); void streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen); int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t streamStateGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
// session window // session window
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen, int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen,
@ -75,8 +77,14 @@ int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, ch
// fill // fill
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode);
int32_t streamStateFillAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void streamStateFillDel(SStreamState* pState, const SWinKey* key); void streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateFillGetNext(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t streamStateFillGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode); int32_t* pWinCode);
@ -96,15 +104,25 @@ SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey*
void streamStateFreeCur(SStreamStateCur* pCur); void streamStateFreeCur(SStreamStateCur* pCur);
void streamStateResetCur(SStreamStateCur* pCur); void streamStateResetCur(SStreamStateCur* pCur);
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t streamStateFillGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
// twa
void streamStateSetFillInfo(SStreamState* pState);
void streamStateClearExpiredState(SStreamState* pState);
void streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); void streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname); int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode); int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode);
// group id
int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
SStreamStateCur* streamStateGroupGetCur(SStreamState* pState);
void streamStateGroupCurNext(SStreamStateCur* pCur);
int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
void streamStateReloadInfo(SStreamState* pState, TSKEY ts); void streamStateReloadInfo(SStreamState* pState, TSKEY ts);
void streamStateCopyBackend(SStreamState* src, SStreamState* dst); void streamStateCopyBackend(SStreamState* src, SStreamState* dst);

View File

@ -70,7 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo;
#define SSTREAM_TASK_NEED_CONVERT_VER 2 #define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 #define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
extern int32_t streamMetaId; extern int32_t streamMetaRefPool;
extern int32_t streamTaskRefPool;
enum { enum {
STREAM_STATUS__NORMAL = 0, STREAM_STATUS__NORMAL = 0,
@ -113,7 +114,7 @@ enum {
enum { enum {
TASK_TRIGGER_STATUS__INACTIVE = 1, TASK_TRIGGER_STATUS__INACTIVE = 1,
TASK_TRIGGER_STATUS__ACTIVE, TASK_TRIGGER_STATUS__MAY_ACTIVE,
}; };
typedef enum { typedef enum {
@ -258,6 +259,7 @@ typedef struct STaskId {
typedef struct SStreamTaskId { typedef struct SStreamTaskId {
int64_t streamId; int64_t streamId;
int32_t taskId; int32_t taskId;
int64_t refId;
const char* idStr; const char* idStr;
} SStreamTaskId; } SStreamTaskId;
@ -291,12 +293,12 @@ typedef struct SStreamStatus {
int8_t schedStatus; int8_t schedStatus;
int8_t statusBackup; int8_t statusBackup;
int32_t schedIdleTime; // idle time before invoke again int32_t schedIdleTime; // idle time before invoke again
int32_t timerActive; // timer is active
int64_t lastExecTs; // last exec time stamp int64_t lastExecTs; // last exec time stamp
int32_t inScanHistorySentinel; int32_t inScanHistorySentinel;
bool appendTranstateBlock; // has append the transfer state data block already bool appendTranstateBlock; // has appended the transfer state data block already
bool removeBackendFiles; // remove backend files on disk when free stream tasks bool removeBackendFiles; // remove backend files on disk when free stream tasks
SConsenChkptInfo consenChkptInfo; SConsenChkptInfo consenChkptInfo;
STimeWindow latestForceWindow; // latest generated time window, only valid in
} SStreamStatus; } SStreamStatus;
typedef struct SDataRange { typedef struct SDataRange {
@ -305,14 +307,16 @@ typedef struct SDataRange {
} SDataRange; } SDataRange;
typedef struct SSTaskBasicInfo { typedef struct SSTaskBasicInfo {
int32_t nodeId; // vgroup id or snode id int32_t nodeId; // vgroup id or snode id
SEpSet epSet; SEpSet epSet;
SEpSet mnodeEpset; // mnode epset for send heartbeat SEpSet mnodeEpset; // mnode epset for send heartbeat
int32_t selfChildId; int32_t selfChildId;
int32_t totalLevel; int32_t trigger;
int8_t taskLevel; int8_t taskLevel;
int8_t fillHistory; // is fill history task or not int8_t fillHistory; // is fill history task or not
int64_t delaySchedParam; // in msec int64_t delaySchedParam; // in msec
int64_t watermark; // extracted from operators
SInterval interval;
} SSTaskBasicInfo; } SSTaskBasicInfo;
typedef struct SStreamRetrieveReq SStreamRetrieveReq; typedef struct SStreamRetrieveReq SStreamRetrieveReq;
@ -454,7 +458,6 @@ struct SStreamTask {
// the followings attributes don't be serialized // the followings attributes don't be serialized
SScanhistorySchedInfo schedHistoryInfo; SScanhistorySchedInfo schedHistoryInfo;
int32_t refCnt;
int32_t transferStateAlignCnt; int32_t transferStateAlignCnt;
struct SStreamMeta* pMeta; struct SStreamMeta* pMeta;
SSHashObj* pNameMap; SSHashObj* pNameMap;
@ -544,9 +547,10 @@ typedef struct STaskUpdateEntry {
typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param); typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam, int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int32_t trigger,
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask); int64_t triggerParam, SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5,
void tFreeStreamTask(SStreamTask* pTask); SStreamTask** pTask);
void tFreeStreamTask(void* pTask);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask); int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask); int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver); int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
@ -664,6 +668,8 @@ void streamTaskResetStatus(SStreamTask* pTask);
void streamTaskSetStatusReady(SStreamTask* pTask); void streamTaskSetStatusReady(SStreamTask* pTask);
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask); ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
const char* streamTaskGetExecType(int32_t type); const char* streamTaskGetExecType(int32_t type);
int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId);
void streamTaskFreeRefId(int64_t* pRefId);
bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList); bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask); void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
@ -752,16 +758,15 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask);
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
void streamMetaClear(SStreamMeta* pMeta); void streamMetaClear(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta);
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta); int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta); void streamMetaStartHb(SStreamMeta* pMeta);
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
int64_t endTs, bool ready); int64_t endTs, bool ready);
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo); int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo);

View File

@ -16,8 +16,6 @@
#ifndef _STREAM_FILE_STATE_H_ #ifndef _STREAM_FILE_STATE_H_
#define _STREAM_FILE_STATE_H_ #define _STREAM_FILE_STATE_H_
#include "os.h"
#include "storageapi.h" #include "storageapi.h"
#include "tarray.h" #include "tarray.h"
#include "tdef.h" #include "tdef.h"
@ -37,7 +35,7 @@ typedef void (*_state_buff_cleanup_fn)(void* pRowBuff);
typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num); typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num);
typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey); typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey);
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen); typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen);
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState); typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal,
@ -45,6 +43,8 @@ typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, i
typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2); typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2);
typedef int (*__session_compare_fn_t)(const void* pWin, const void* pDatas, int pos);
int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp, int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp,
void* pFile, TSKEY delMark, const char* taskId, int64_t checkpointId, int8_t type, void* pFile, TSKEY delMark, const char* taskId, int64_t checkpointId, int8_t type,
struct SStreamFileState** ppFileState); struct SStreamFileState** ppFileState);
@ -54,6 +54,8 @@ bool needClearDiskBuff(SStreamFileState* pFileState);
void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used); void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used);
void streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos); void streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t addRowBuffIfNotExist(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen,
int32_t* pWinCode); int32_t* pWinCode);
void deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen); void deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
@ -71,9 +73,11 @@ int32_t streamFileStateGetSelectRowSize(SStreamFileState* pFileState);
void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts); void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts);
void* getRowStateBuff(SStreamFileState* pFileState); void* getRowStateBuff(SStreamFileState* pFileState);
void* getSearchBuff(SStreamFileState* pFileState);
void* getStateFileStore(SStreamFileState* pFileState); void* getStateFileStore(SStreamFileState* pFileState);
bool isDeteled(SStreamFileState* pFileState, TSKEY ts); bool isDeteled(SStreamFileState* pFileState, TSKEY ts);
bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap); bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap);
TSKEY getFlushMark(SStreamFileState* pFileState);
SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState); SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState);
int32_t getRowStateRowSize(SStreamFileState* pFileState); int32_t getRowStateRowSize(SStreamFileState* pFileState);
@ -94,6 +98,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
void sessionWinStateClear(SStreamFileState* pFileState); void sessionWinStateClear(SStreamFileState* pFileState);
void sessionWinStateCleanup(void* pBuff); void sessionWinStateCleanup(void* pBuff);
SStreamStateCur* createStateCursor(SStreamFileState* pFileState);
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey); SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey); SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey); SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
@ -103,6 +108,8 @@ void sessionWinStateMoveToNext(SStreamStateCur* pCur);
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey, int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey,
range_cmpr_fn cmpFn); range_cmpr_fn cmpFn);
int32_t binarySearch(void* keyList, int num, const void* key, __session_compare_fn_t cmpFn);
// state window // state window
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen, int32_t* pWinCode); state_key_cmpr_fn fn, void** pVal, int32_t* pVLen, int32_t* pWinCode);
@ -117,6 +124,34 @@ int32_t getSessionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyL
int32_t* pWinCode); int32_t* pWinCode);
int32_t getFunctionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen); int32_t getFunctionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
// time slice
int32_t getHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t hashSortFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen);
int32_t hashSortFileRemoveFn(SStreamFileState* pFileState, const void* pKey);
void clearSearchBuff(SStreamFileState* pFileState);
int32_t getHashSortNextRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t getHashSortPrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t recoverFillSnapshot(SStreamFileState* pFileState, int64_t ckId);
void deleteHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey);
//group
int32_t streamFileStateGroupPut(SStreamFileState* pFileState, int64_t groupId, void* value, int32_t vLen);
void streamFileStateGroupCurNext(SStreamStateCur* pCur);
int32_t streamFileStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
SSHashObj* getGroupIdCache(SStreamFileState* pFileState);
int fillStateKeyCompare(const void* pWin1, const void* pDatas, int pos);
int32_t getRowStatePrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t addSearchItem(SStreamFileState* pFileState, SArray* pWinStates, const SWinKey* pKey);
//twa
void setFillInfo(SStreamFileState* pFileState);
void clearExpiredState(SStreamFileState* pFileState);
int32_t addArrayBuffIfNotExist(SSHashObj* pSearchBuff, uint64_t groupId, SArray** ppResStates);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -36,8 +36,8 @@ bool updateInfoIsTableInserted(SUpdateInfo* pInfo, int64_t tbUid);
void updateInfoDestroy(SUpdateInfo* pInfo); void updateInfoDestroy(SUpdateInfo* pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo* pInfo); void updateInfoAddCloseWindowSBF(SUpdateInfo* pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo* pInfo); void updateInfoDestoryColseWinSBF(SUpdateInfo* pInfo);
int32_t updateInfoSerialize(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen); int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo);
int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo); int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo);
void windowSBfDelete(SUpdateInfo* pInfo, uint64_t count); void windowSBfDelete(SUpdateInfo* pInfo, uint64_t count);
int32_t windowSBfAdd(SUpdateInfo* pInfo, uint64_t count); int32_t windowSBfAdd(SUpdateInfo* pInfo, uint64_t count);
bool isIncrementalTimeStamp(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts, void* pPkVal, int32_t len); bool isIncrementalTimeStamp(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts, void* pPkVal, int32_t len);

View File

@ -352,6 +352,8 @@ int32_t taosGetErrSize();
#define TSDB_CODE_MND_INVALID_SYS_TABLENAME TAOS_DEF_ERROR_CODE(0, 0x039A) #define TSDB_CODE_MND_INVALID_SYS_TABLENAME TAOS_DEF_ERROR_CODE(0, 0x039A)
#define TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE TAOS_DEF_ERROR_CODE(0, 0x039B) #define TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE TAOS_DEF_ERROR_CODE(0, 0x039B)
#define TSDB_CODE_MND_INVALID_WAL_LEVEL TAOS_DEF_ERROR_CODE(0, 0x039C) #define TSDB_CODE_MND_INVALID_WAL_LEVEL TAOS_DEF_ERROR_CODE(0, 0x039C)
#define TSDB_CODE_MND_INVALID_DNODE_LIST_FMT TAOS_DEF_ERROR_CODE(0, 0x039D)
#define TSDB_CODE_MND_DNODE_LIST_REPEAT TAOS_DEF_ERROR_CODE(0, 0x039E)
// mnode-node // mnode-node
#define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03A0) #define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03A0)

View File

@ -453,6 +453,7 @@ typedef enum ELogicConditionType {
#define TSDB_CACHE_MODEL_LAST_ROW 1 #define TSDB_CACHE_MODEL_LAST_ROW 1
#define TSDB_CACHE_MODEL_LAST_VALUE 2 #define TSDB_CACHE_MODEL_LAST_VALUE 2
#define TSDB_CACHE_MODEL_BOTH 3 #define TSDB_CACHE_MODEL_BOTH 3
#define TSDB_DNODE_LIST_LEN 256
#define TSDB_ENCRYPT_ALGO_STR_LEN 16 #define TSDB_ENCRYPT_ALGO_STR_LEN 16
#define TSDB_ENCRYPT_ALGO_NONE_STR "none" #define TSDB_ENCRYPT_ALGO_NONE_STR "none"
#define TSDB_ENCRYPT_ALGO_SM4_STR "sm4" #define TSDB_ENCRYPT_ALGO_SM4_STR "sm4"
@ -492,13 +493,13 @@ typedef enum ELogicConditionType {
#define TSDB_MIN_S3_CHUNK_SIZE (128 * 1024) #define TSDB_MIN_S3_CHUNK_SIZE (128 * 1024)
#define TSDB_MAX_S3_CHUNK_SIZE (1024 * 1024) #define TSDB_MAX_S3_CHUNK_SIZE (1024 * 1024)
#define TSDB_DEFAULT_S3_CHUNK_SIZE (256 * 1024) #define TSDB_DEFAULT_S3_CHUNK_SIZE (128 * 1024)
#define TSDB_MIN_S3_KEEP_LOCAL (1 * 1440) // unit minute #define TSDB_MIN_S3_KEEP_LOCAL (1 * 1440) // unit minute
#define TSDB_MAX_S3_KEEP_LOCAL (365000 * 1440) #define TSDB_MAX_S3_KEEP_LOCAL (365000 * 1440)
#define TSDB_DEFAULT_S3_KEEP_LOCAL (3650 * 1440) #define TSDB_DEFAULT_S3_KEEP_LOCAL (365 * 1440)
#define TSDB_MIN_S3_COMPACT 0 #define TSDB_MIN_S3_COMPACT 0
#define TSDB_MAX_S3_COMPACT 1 #define TSDB_MAX_S3_COMPACT 1
#define TSDB_DEFAULT_S3_COMPACT 0 #define TSDB_DEFAULT_S3_COMPACT 1
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1 #define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600 #define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600
@ -652,6 +653,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
#define MONITOR_TAG_VALUE_LEN 300 #define MONITOR_TAG_VALUE_LEN 300
#define MONITOR_METRIC_NAME_LEN 100 #define MONITOR_METRIC_NAME_LEN 100
#define AUDIT_OPERATION_LEN 20
typedef enum { typedef enum {
ANAL_ALGO_TYPE_ANOMALY_DETECT = 0, ANAL_ALGO_TYPE_ANOMALY_DETECT = 0,
ANAL_ALGO_TYPE_FORECAST = 1, ANAL_ALGO_TYPE_FORECAST = 1,

View File

@ -70,7 +70,7 @@ extern int32_t tdbDebugFlag;
extern int32_t sndDebugFlag; extern int32_t sndDebugFlag;
extern int32_t simDebugFlag; extern int32_t simDebugFlag;
extern int32_t tqClientDebug; extern int32_t tqClientDebugFlag;
int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc); int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc);
void taosCloseLog(); void taosCloseLog();

View File

@ -20,11 +20,11 @@
extern "C" { extern "C" {
#endif #endif
extern char version[]; extern char td_version[];
extern char compatible_version[]; extern char td_compatible_version[];
extern char gitinfo[]; extern char td_gitinfo[];
extern char gitinfoOfInternal[]; extern char td_gitinfoOfInternal[];
extern char buildinfo[]; extern char td_buildinfo[];
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -185,7 +185,14 @@ function kill_process() {
function install_main_path() { function install_main_path() {
#create install main dir and all sub dir #create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || : ${csudo}rm -rf ${install_main_dir}/cfg || :
${csudo}rm -rf ${install_main_dir}/bin || :
${csudo}rm -rf ${install_main_dir}/driver || :
${csudo}rm -rf ${install_main_dir}/examples || :
${csudo}rm -rf ${install_main_dir}/include || :
${csudo}rm -rf ${install_main_dir}/share || :
${csudo}rm -rf ${install_main_dir}/log || :
${csudo}mkdir -p ${install_main_dir} ${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg ${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin ${csudo}mkdir -p ${install_main_dir}/bin

View File

@ -0,0 +1,4 @@
TDengine client is installed successfully. Please open a terminal and execute the commands below:
To configure TDengine client, sudo vi /etc/taos/taos.cfg
To access TDengine command line interface, taos -h YouServerName

View File

@ -12,9 +12,18 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
rem // stop and delete service rem // stop and delete service
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close) mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close)
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
echo This might take a few moment to accomplish deleting service taosd/taoskeeper ...
)
call :check_svc taosd call :check_svc taosd
call :check_svc taosadapter call :check_svc taosadapter
call :check_svc taoskeeper
set source_dir=%2 set source_dir=%2
set source_dir=%source_dir:/=\\% set source_dir=%source_dir:/=\\%
@ -46,6 +55,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul
) )
) )
if exist %binary_dir%\\test\\cfg\\taoskeeper.toml (
if not exist %target_dir%\\cfg\\taoskeeper.toml (
copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul
)
)
copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul
copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul
copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul
@ -104,6 +118,9 @@ copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
if exist %binary_dir%\\build\\bin\\taosadapter.exe ( if exist %binary_dir%\\build\\bin\\taosadapter.exe (
copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul
) )
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul
)
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close) mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)
@ -116,6 +133,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe (
echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m
) )
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m
)
goto :eof goto :eof
:hasAdmin :hasAdmin
@ -123,6 +144,7 @@ goto :eof
call :stop_delete call :stop_delete
call :check_svc taosd call :check_svc taosd
call :check_svc taosadapter call :check_svc taosadapter
call :check_svc taoskeeper
if exist c:\\windows\\sysnative ( if exist c:\\windows\\sysnative (
echo x86 echo x86
@ -141,6 +163,7 @@ if exist c:\\windows\\sysnative (
rem // create services rem // create services
sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND
set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment" set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\<Path\>"') do ( for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\<Path\>"') do (
@ -181,6 +204,8 @@ sc stop taosd
sc delete taosd sc delete taosd
sc stop taosadapter sc stop taosadapter
sc delete taosadapter sc delete taosadapter
sc stop taoskeeper
sc delete taoskeeper
exit /B 0 exit /B 0
:check_svc :check_svc

View File

@ -129,6 +129,13 @@ function kill_taosadapter() {
fi fi
} }
function kill_taoskeeper() {
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function kill_taosd() { function kill_taosd() {
pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}') pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then if [ -n "$pid" ]; then
@ -155,6 +162,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || :
${csudo}rm -f ${bin_link_dir}/taosadapter || : ${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || : ${csudo}rm -f ${bin_link_dir}/taosdump || :
@ -169,6 +177,7 @@ function install_bin() {
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || : ${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
@ -183,6 +192,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || :
@ -197,6 +207,7 @@ function install_bin() {
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || :
@ -208,6 +219,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
@ -407,6 +419,29 @@ function install_taosadapter_config() {
fi fi
} }
function install_taoskeeper_config() {
if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then
${csudo}mkdir -p ${cfg_install_dir} || :
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || :
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \
${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || :
else
if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
fi
fi
}
function install_log() { function install_log() {
${csudo}rm -rf ${log_dir} || : ${csudo}rm -rf ${log_dir} || :
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
@ -526,6 +561,15 @@ function install_taosadapter_service() {
fi fi
} }
function install_taoskeeper_service() {
if ((${service_mod} == 0)); then
[ -f ${binary_dir}/test/cfg/taoskeeper.service ] &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \
${service_config_dir}/ || :
${csudo}systemctl daemon-reload
fi
}
function install_service_on_launchctl() { function install_service_on_launchctl() {
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist ${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist
@ -534,6 +578,10 @@ function install_service_on_launchctl() {
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist ${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
} }
function install_service() { function install_service() {
@ -549,6 +597,7 @@ function install_service() {
install_service_on_launchctl install_service_on_launchctl
fi fi
} }
function install_app() { function install_app() {
if [ "$osType" = "Darwin" ]; then if [ "$osType" = "Darwin" ]; then
${csudo}rm -rf /Applications/TDengine.app && ${csudo}rm -rf /Applications/TDengine.app &&
@ -573,6 +622,7 @@ function update_TDengine() {
elif ((${service_mod} == 1)); then elif ((${service_mod} == 1)); then
${csudo}service ${serverName} stop || : ${csudo}service ${serverName} stop || :
else else
kill_taoskeeper
kill_taosadapter kill_taosadapter
kill_taosd kill_taosd
fi fi
@ -591,9 +641,11 @@ function update_TDengine() {
install_service install_service
install_taosadapter_service install_taosadapter_service
install_taoskeeper_service
install_config install_config
install_taosadapter_config install_taosadapter_config
install_taoskeeper_config
echo echo
echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" echo -e "\033[44;32;1m${productName} is updated successfully!${NC}"
@ -602,22 +654,33 @@ function update_TDengine() {
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
if ((${service_mod} == 0)); then if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
elif ((${service_mod} == 1)); then elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
else else
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
[ -f ${installDir}/bin/taosadapter ] && \ [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
else else
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
fi fi
fi fi
@ -643,9 +706,11 @@ function install_TDengine() {
install_service install_service
install_taosadapter_service install_taosadapter_service
install_taoskeeper_service
install_config install_config
install_taosadapter_config install_taosadapter_config
install_taoskeeper_config
# Ask if to start the service # Ask if to start the service
echo echo
@ -654,22 +719,33 @@ function install_TDengine() {
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
if ((${service_mod} == 0)); then if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
elif ((${service_mod} == 1)); then elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
else else
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
[ -f ${installDir}/bin/taosadapter ] && \ [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
else else
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
fi fi
fi fi

View File

@ -10,6 +10,10 @@ else()
add_library(taos SHARED ${CLIENT_SRC}) add_library(taos SHARED ${CLIENT_SRC})
endif() endif()
if(${TD_DARWIN})
target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype)
endif()
INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(jni)
target_include_directories( target_include_directories(
@ -46,6 +50,11 @@ set_target_properties(
) )
add_library(taos_static STATIC ${CLIENT_SRC}) add_library(taos_static STATIC ${CLIENT_SRC})
if(${TD_DARWIN})
target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories( target_include_directories(
taos_static taos_static
PUBLIC "${TD_SOURCE_DIR}/include/client" PUBLIC "${TD_SOURCE_DIR}/include/client"

View File

@ -108,6 +108,10 @@ typedef struct SQueryExecMetric {
int64_t execCostUs; int64_t execCostUs;
} SQueryExecMetric; } SQueryExecMetric;
typedef struct {
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SAppInstServerCFG;
struct SAppInstInfo { struct SAppInstInfo {
int64_t numOfConns; int64_t numOfConns;
SCorEpSet mgmtEp; SCorEpSet mgmtEp;
@ -121,7 +125,7 @@ struct SAppInstInfo {
void* pTransporter; void* pTransporter;
SAppHbMgr* pAppHbMgr; SAppHbMgr* pAppHbMgr;
char* instKey; char* instKey;
SMonitorParas monitorParas; SAppInstServerCFG serverCfg;
}; };
typedef struct SAppInfo { typedef struct SAppInfo {
@ -297,8 +301,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo); void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision); void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4); int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4);
bool convertUcs4);
int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols); int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo); void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq); int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);

View File

@ -92,6 +92,26 @@ extern "C" {
} \ } \
} }
#define SML_CHECK_CODE(CMD) \
code = (CMD); \
if (TSDB_CODE_SUCCESS != code) { \
lino = __LINE__; \
goto END; \
}
#define SML_CHECK_NULL(CMD) \
if (NULL == (CMD)) { \
code = terrno; \
lino = __LINE__; \
goto END; \
}
#define RETURN \
if (code != 0){ \
uError("%s failed code:%d line:%d", __FUNCTION__ , code, lino); \
} \
return code;
typedef enum { typedef enum {
SCHEMA_ACTION_NULL, SCHEMA_ACTION_NULL,
SCHEMA_ACTION_CREATE_STABLE, SCHEMA_ACTION_CREATE_STABLE,
@ -191,7 +211,6 @@ typedef struct {
cJSON *root; // for parse json cJSON *root; // for parse json
int8_t offset[OTD_JSON_FIELDS_NUM]; int8_t offset[OTD_JSON_FIELDS_NUM];
SSmlLineInfo *lines; // element is SSmlLineInfo SSmlLineInfo *lines; // element is SSmlLineInfo
bool parseJsonByLib;
SArray *tagJsonArray; SArray *tagJsonArray;
SArray *valueJsonArray; SArray *valueJsonArray;
@ -211,13 +230,8 @@ typedef struct {
extern int64_t smlFactorNS[]; extern int64_t smlFactorNS[];
extern int64_t smlFactorS[]; extern int64_t smlFactorS[];
typedef int32_t (*_equal_fn_sml)(const void *, const void *);
int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle); int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle);
void smlDestroyInfo(SSmlHandle *info); void smlDestroyInfo(SSmlHandle *info);
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset);
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
void smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2); void smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
int32_t smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg); int32_t smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision); int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision);
@ -237,7 +251,7 @@ void smlDestroyTableInfo(void *para);
void freeSSmlKv(void* data); void freeSSmlKv(void* data);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements); int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements); int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseJSON(SSmlHandle *info, char *payload); int32_t smlParseJSONExt(SSmlHandle *info, char *payload);
int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSmlSTableMeta** sMeta); int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSmlSTableMeta** sMeta);
bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv); bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv);
@ -246,7 +260,8 @@ int32_t smlProcessChildTable(SSmlHandle *info, SSmlLineInfo *elements);
int32_t smlProcessSuperTable(SSmlHandle *info, SSmlLineInfo *elements); int32_t smlProcessSuperTable(SSmlHandle *info, SSmlLineInfo *elements);
int32_t smlJoinMeasureTag(SSmlLineInfo *elements); int32_t smlJoinMeasureTag(SSmlLineInfo *elements);
void smlBuildTsKv(SSmlKv *kv, int64_t ts); void smlBuildTsKv(SSmlKv *kv, int64_t ts);
int32_t smlParseEndTelnetJson(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv); int32_t smlParseEndTelnetJsonFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv);
int32_t smlParseEndTelnetJsonUnFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv);
int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs); int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs);
static inline bool smlDoubleToInt64OverFlow(double num) { static inline bool smlDoubleToInt64OverFlow(double num) {

View File

@ -222,6 +222,7 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags);
int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx); int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx);
int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields);
int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields);
int stmtGetStbColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_STB **fields);
int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums); int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums);
int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums); int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums);
int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert); int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert);

View File

@ -166,11 +166,11 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType))); ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType)));
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject( ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(
json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows))); json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows)));
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen) { if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen) {
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen]; char tmp = pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen];
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = '\0'; pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = '\0';
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr))); ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = tmp; pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = tmp;
} else { } else {
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr))); ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
} }
@ -284,7 +284,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
} }
} }
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) { if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) { if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) {
sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT); sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
@ -294,15 +294,15 @@ static void deregisterRequest(SRequestObj *pRequest) {
} }
} }
if ((duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThreshold * 1000000UL || if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL ||
duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThresholdTest * 1000000UL) && duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->monitorParas.tsSlowLogExceptDb)) { checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1); (void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
if (pTscObj->pAppInfo->monitorParas.tsSlowLogScope & reqType) { if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s", taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s",
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration,
pRequest->sqlstr); pRequest->sqlstr);
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) { if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration); slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration);
if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) { if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) {
tscError("failed to generate write slow log"); tscError("failed to generate write slow log");
@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread,
rpcInit.startReadTimer = 1; rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout; rpcInit.readTimeout = tsReadTimeout;
int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
tscError("invalid version string."); tscError("invalid version string.");
return code; return code;
@ -689,7 +689,7 @@ void doDestroyRequest(void *p) {
int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
tscError("failed to remove request from hash, code:%s", tstrerror(code)); tscWarn("failed to remove request from hash, code:%s", tstrerror(code));
} }
schedulerFreeJob(&pRequest->body.queryJob, 0); schedulerFreeJob(&pRequest->body.queryJob, 0);

View File

@ -605,7 +605,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
return code; return code;
} }
pInst->monitorParas = pRsp.monitorParas; pInst->serverCfg.monitorParas = pRsp.monitorParas;
pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete;
tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId, tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId,
pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope); pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope);

View File

@ -1700,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf
tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app)); tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app));
tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user)); tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user));
tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd)); tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd));
tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer)); tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer));
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
void* pReq = taosMemoryMalloc(contLen); void* pReq = taosMemoryMalloc(contLen);
@ -1770,19 +1770,15 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
} }
} }
int32_t doProcessMsgFromServer(void* param) { int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) {
AsyncArg* arg = (AsyncArg*)param;
SRpcMsg* pMsg = &arg->msg;
SEpSet* pEpSet = arg->pEpset;
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
if (pMsg->info.ahandle == NULL) { if (pMsg->info.ahandle == NULL) {
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL"); tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
taosMemoryFree(arg->pEpset);
rpcFreeCont(pMsg->pCont); rpcFreeCont(pMsg->pCont);
taosMemoryFree(arg); taosMemoryFree(pEpSet);
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
STscObj* pTscObj = NULL; STscObj* pTscObj = NULL;
STraceId* trace = &pMsg->info.traceId; STraceId* trace = &pMsg->info.traceId;
@ -1802,10 +1798,9 @@ int32_t doProcessMsgFromServer(void* param) {
if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) { if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) {
tscError("doProcessMsgFromServer taosReleaseRef failed"); tscError("doProcessMsgFromServer taosReleaseRef failed");
} }
taosMemoryFree(arg->pEpset);
rpcFreeCont(pMsg->pCont); rpcFreeCont(pMsg->pCont);
taosMemoryFree(pEpSet);
destroySendMsgInfo(pSendInfo); destroySendMsgInfo(pSendInfo);
taosMemoryFree(arg);
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
pTscObj = pRequest->pTscObj; pTscObj = pRequest->pTscObj;
@ -1844,20 +1839,24 @@ int32_t doProcessMsgFromServer(void* param) {
rpcFreeCont(pMsg->pCont); rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pSendInfo); destroySendMsgInfo(pSendInfo);
taosMemoryFree(arg);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t doProcessMsgFromServer(void* param) {
AsyncArg* arg = (AsyncArg*)param;
int32_t code = doProcessMsgFromServerImpl(&arg->msg, arg->pEpset);
taosMemoryFree(arg);
return code;
}
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
int32_t code = 0;
SEpSet* tEpSet = NULL; SEpSet* tEpSet = NULL;
if (pEpSet != NULL) { if (pEpSet != NULL) {
tEpSet = taosMemoryCalloc(1, sizeof(SEpSet)); tEpSet = taosMemoryCalloc(1, sizeof(SEpSet));
if (NULL == tEpSet) { if (NULL == tEpSet) {
pMsg->code = TSDB_CODE_OUT_OF_MEMORY; code = terrno;
rpcFreeCont(pMsg->pCont); pMsg->code = terrno;
destroySendMsgInfo(pMsg->info.ahandle); goto _exit;
return;
} }
(void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet)); (void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet));
} }
@ -1879,21 +1878,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg)); AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg));
if (NULL == arg) { if (NULL == arg) {
pMsg->code = TSDB_CODE_OUT_OF_MEMORY; code = terrno;
taosMemoryFree(tEpSet); pMsg->code = code;
rpcFreeCont(pMsg->pCont); goto _exit;
destroySendMsgInfo(pMsg->info.ahandle);
return;
} }
arg->msg = *pMsg; arg->msg = *pMsg;
arg->pEpset = tEpSet; arg->pEpset = tEpSet;
if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) { if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) {
tscError("failed to sched msg to tsc, tsc ready to quit"); pMsg->code = code;
rpcFreeCont(pMsg->pCont);
taosMemoryFree(arg->pEpset);
destroySendMsgInfo(pMsg->info.ahandle);
taosMemoryFree(arg); taosMemoryFree(arg);
goto _exit;
}
return;
_exit:
tscError("failed to sched msg to tsc since %s", tstrerror(code));
code = doProcessMsgFromServerImpl(pMsg, tEpSet);
if (code != 0) {
tscError("failed to sched msg to tsc, tsc ready quit");
} }
} }
@ -2081,12 +2084,12 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) { static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
int32_t idx = -1; int32_t idx = -1;
iconv_t conv = taosAcquireConv(&idx, C2M); iconv_t conv = taosAcquireConv(&idx, C2M);
if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR; if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
int32_t type = pResultInfo->fields[i].type; int32_t type = pResultInfo->fields[i].type;
int32_t bytes = pResultInfo->fields[i].bytes; int32_t bytes = pResultInfo->fields[i].bytes;
@ -2100,7 +2103,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
pResultInfo->convertBuf[i] = p; pResultInfo->convertBuf[i] = p;
SResultColumn* pCol = &pResultInfo->pCol[i]; SResultColumn* pCol = &pResultInfo->pCol[i];
for (int32_t j = 0; j < numOfRows; ++j) { for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) {
if (pCol->offset[j] != -1) { if (pCol->offset[j] != -1) {
char* pStart = pCol->offset[j] + pCol->pData; char* pStart = pCol->offset[j] + pCol->pData;
@ -2133,10 +2136,13 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
numOfCols * (sizeof(int8_t) + sizeof(int32_t)); numOfCols * (sizeof(int8_t) + sizeof(int32_t));
} }
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) {
char* p = (char*)pResultInfo->pData; char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p; int32_t blockVersion = *(int32_t*)p;
int32_t numOfRows = pResultInfo->numOfRows;
int32_t numOfCols = pResultInfo->numOfCols;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length | // length |
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
@ -2195,10 +2201,16 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
} }
pStart += colLen; pStart += colLen;
} }
// Ensure the complete structure of the block, including the blankfill field,
// even though it is not used on the client side.
len += sizeof(bool);
return len; return len;
} }
static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
int32_t numOfRows = pResultInfo->numOfRows;
int32_t numOfCols = pResultInfo->numOfCols;
bool needConvert = false; bool needConvert = false;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
@ -2215,7 +2227,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* p = (char*)pResultInfo->pData; char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p; int32_t blockVersion = *(int32_t*)p;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); int32_t dataLen = estimateJsonLen(pResultInfo);
if (dataLen <= 0) { if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
@ -2338,27 +2350,36 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
pStart1 += colLen1; pStart1 += colLen1;
} }
// Ensure the complete structure of the block, including the blankfill field,
// even though it is not used on the client side.
// (void)memcpy(pStart1, pStart, sizeof(bool));
totalLen += sizeof(bool);
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen; *(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
pResultInfo->pData = pResultInfo->convertJson; pResultInfo->pData = pResultInfo->convertJson;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) {
bool convertUcs4) { if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) {
if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) {
tscError("setResultDataPtr paras error"); tscError("setResultDataPtr paras error");
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
if (numOfRows == 0) { if (pResultInfo->numOfRows == 0) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (pResultInfo->pData == NULL) {
tscError("setResultDataPtr error: pData is NULL");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
int32_t code = doPrepareResPtr(pResultInfo); int32_t code = doPrepareResPtr(pResultInfo);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
code = doConvertJson(pResultInfo, numOfCols, numOfRows); code = doConvertJson(pResultInfo);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -2378,9 +2399,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
int32_t cols = *(int32_t*)p; int32_t cols = *(int32_t*)p;
p += sizeof(int32_t); p += sizeof(int32_t);
if (rows != numOfRows || cols != numOfCols) { if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) {
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols,
numOfCols); pResultInfo->numOfCols);
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
@ -2391,7 +2412,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
p += sizeof(uint64_t); p += sizeof(uint64_t);
// check fields // check fields
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
int8_t type = *(int8_t*)p; int8_t type = *(int8_t*)p;
p += sizeof(int8_t); p += sizeof(int8_t);
@ -2400,10 +2421,14 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
} }
int32_t* colLength = (int32_t*)p; int32_t* colLength = (int32_t*)p;
p += sizeof(int32_t) * numOfCols; p += sizeof(int32_t) * pResultInfo->numOfCols;
char* pStart = p; char* pStart = p;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
if ((pStart - pResultInfo->pData) >= dataLen) {
tscError("setResultDataPtr invalid offset over dataLen %d", dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (blockVersion == BLOCK_VERSION_1) { if (blockVersion == BLOCK_VERSION_1) {
colLength[i] = htonl(colLength[i]); colLength[i] = htonl(colLength[i]);
} }
@ -2411,10 +2436,13 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen); tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
if (IS_INVALID_TYPE(pResultInfo->fields[i].type)) {
tscError("invalid type %d", pResultInfo->fields[i].type);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
pResultInfo->pCol[i].offset = (int32_t*)pStart; pResultInfo->pCol[i].offset = (int32_t*)pStart;
pStart += numOfRows * sizeof(int32_t); pStart += pResultInfo->numOfRows * sizeof(int32_t);
} else { } else {
pResultInfo->pCol[i].nullbitmap = pStart; pResultInfo->pCol[i].nullbitmap = pStart;
pStart += BitmapLen(pResultInfo->numOfRows); pStart += BitmapLen(pResultInfo->numOfRows);
@ -2427,11 +2455,17 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
pStart += colLength[i]; pStart += colLength[i];
} }
p = pStart;
// bool blankFill = *(bool*)p; // bool blankFill = *(bool*)p;
p += sizeof(bool); p += sizeof(bool);
int32_t offset = p - pResultInfo->pData;
if (offset > dataLen) {
tscError("invalid offset %d, dataLen %d", offset, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (convertUcs4) { if (convertUcs4) {
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength); code = doConvertUCS4(pResultInfo, colLength);
} }
return code; return code;
@ -2544,7 +2578,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR
pResultInfo->totalRows += pResultInfo->numOfRows; pResultInfo->totalRows += pResultInfo->numOfRows;
int32_t code = int32_t code =
setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4); setResultDataPtr(pResultInfo, convertUcs4);
return code; return code;
} }
@ -2573,7 +2607,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitNum = connLimitNum;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn; rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
rpcInit.readTimeout = tsReadTimeout; rpcInit.readTimeout = tsReadTimeout;
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) { if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) {
tscError("faild to convert taos version from str to int, errcode:%s", terrstr()); tscError("faild to convert taos version from str to int, errcode:%s", terrstr());
goto _OVER; goto _OVER;
} }
@ -2839,6 +2873,7 @@ void syncQueryFn(void* param, void* res, int32_t code) {
if (pParam->pRequest) { if (pParam->pRequest) {
pParam->pRequest->code = code; pParam->pRequest->code = code;
clientOperateReport(pParam->pRequest);
} }
if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) { if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) {

View File

@ -84,7 +84,7 @@ void taos_cleanup(void) {
taosCloseRef(id); taosCloseRef(id);
nodesDestroyAllocatorSet(); nodesDestroyAllocatorSet();
// cleanupAppInfo(); // cleanupAppInfo();
rpcCleanup(); rpcCleanup();
tscDebug("rpc cleanup"); tscDebug("rpc cleanup");
@ -388,7 +388,6 @@ void taos_free_result(TAOS_RES *res) {
tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp); tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp);
} }
taosMemoryFree(pRsp); taosMemoryFree(pRsp);
} }
void taos_kill_query(TAOS *taos) { void taos_kill_query(TAOS *taos) {
@ -484,7 +483,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields); return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields);
} }
int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields){ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int32_t len = 0; int32_t len = 0;
for (int i = 0; i < num_fields; ++i) { for (int i = 0; i < num_fields; ++i) {
if (i > 0 && len < size - 1) { if (i > 0 && len < size - 1) {
@ -589,7 +588,7 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD
break; break;
} }
} }
if (len < size){ if (len < size) {
str[len] = 0; str[len] = 0;
} }
@ -670,7 +669,7 @@ const char *taos_data_type(int type) {
} }
} }
const char *taos_get_client_info() { return version; } const char *taos_get_client_info() { return td_version; }
// return int32_t // return int32_t
int taos_affected_rows(TAOS_RES *res) { int taos_affected_rows(TAOS_RES *res) {
@ -2082,7 +2081,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) {
} }
int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) { int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) {
if (stmt == NULL || NULL == count) { if (stmt == NULL || count == NULL) {
tscError("NULL parameter for %s", __FUNCTION__); tscError("NULL parameter for %s", __FUNCTION__);
terrno = TSDB_CODE_INVALID_PARA; terrno = TSDB_CODE_INVALID_PARA;
return terrno; return terrno;
@ -2103,12 +2102,28 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count,
} }
} }
int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields) {
if (stmt == NULL || count == NULL) {
tscError("NULL parameter for %s", __FUNCTION__);
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
return stmtGetStbColFields2(stmt, count, fields);
}
void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) {
(void)stmt; (void)stmt;
if (!fields) return; if (!fields) return;
taosMemoryFree(fields); taosMemoryFree(fields);
} }
DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields) {
(void)stmt;
if (!fields) return;
taosMemoryFree(fields);
}
TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) { TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) {
if (stmt == NULL) { if (stmt == NULL) {
tscError("NULL parameter for %s", __FUNCTION__); tscError("NULL parameter for %s", __FUNCTION__);
@ -2144,4 +2159,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) {
return 0; return 0;
} }
char *getBuildInfo() { return buildinfo; } char *getBuildInfo() { return td_buildinfo; }

View File

@ -2,8 +2,6 @@
#include "cJSON.h" #include "cJSON.h"
#include "clientInt.h" #include "clientInt.h"
#include "clientLog.h" #include "clientLog.h"
#include "os.h"
#include "tglobal.h"
#include "tmisce.h" #include "tmisce.h"
#include "tqueue.h" #include "tqueue.h"
#include "ttime.h" #include "ttime.h"
@ -19,6 +17,7 @@ STaosQueue* monitorQueue;
SHashObj* monitorSlowLogHash; SHashObj* monitorSlowLogHash;
char tmpSlowLogPath[PATH_MAX] = {0}; char tmpSlowLogPath[PATH_MAX] = {0};
TdThread monitorThread; TdThread monitorThread;
extern bool tsEnableAuditDelete;
static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) { static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) {
int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir); int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir);
@ -216,7 +215,7 @@ static void reportSendProcess(void* param, void* tmrId) {
SEpSet ep = getEpSet_s(&pInst->mgmtEp); SEpSet ep = getEpSet_s(&pInst->mgmtEp);
generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep); generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep);
bool reset = bool reset =
taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); taosTmrReset(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset); tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset);
taosRUnLockLatch(&monitorLock); taosRUnLockLatch(&monitorLock);
} }
@ -289,7 +288,7 @@ void monitorCreateClient(int64_t clusterId) {
goto fail; goto fail;
} }
pMonitor->timer = pMonitor->timer =
taosTmrStart(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer); taosTmrStart(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
if (pMonitor->timer == NULL) { if (pMonitor->timer == NULL) {
tscError("failed to start timer"); tscError("failed to start timer");
goto fail; goto fail;
@ -660,7 +659,7 @@ static void monitorSendAllSlowLog() {
taosHashCancelIterate(monitorSlowLogHash, pIter); taosHashCancelIterate(monitorSlowLogHash, pIter);
return; return;
} }
if (t - pClient->lastCheckTime > pInst->monitorParas.tsMonitorInterval * 1000) { if (t - pClient->lastCheckTime > pInst->serverCfg.monitorParas.tsMonitorInterval * 1000) {
pClient->lastCheckTime = t; pClient->lastCheckTime = t;
} else { } else {
continue; continue;
@ -686,7 +685,7 @@ static void monitorSendAllSlowLog() {
static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) { static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) {
SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId); SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId);
if (pInst == NULL || !pInst->monitorParas.tsEnableMonitor) { if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) {
tscInfo("[monitor] monitor is disabled, skip send slow log"); tscInfo("[monitor] monitor is disabled, skip send slow log");
return; return;
} }
@ -933,3 +932,100 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) {
} }
return 0; return 0;
} }
int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
tscDebug("[del report]delete reportCB code:%d", code);
return 0;
}
int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) {
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
tscError("[del report]failed to allocate memory for sendInfo");
return terrno;
}
sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL};
sendInfo->requestId = requestId;
sendInfo->requestObjRefId = 0;
sendInfo->param = NULL;
sendInfo->fp = reportCB;
sendInfo->msgType = TDMT_MND_AUDIT;
SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
if (code != 0) {
tscError("[del report]failed to send msg to server, code:%d", code);
taosMemoryFree(sendInfo);
return code;
}
return TSDB_CODE_SUCCESS;
}
static void reportDeleteSql(SRequestObj* pRequest) {
SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot;
STscObj* pTscObj = pRequest->pTscObj;
if (pTscObj == NULL || pTscObj->pAppInfo == NULL) {
tscError("[del report]invalid tsc obj");
return;
}
if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) {
tscDebug("[del report]audit delete is disabled");
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
tscDebug("[del report]delete request result code:%d", pRequest->code);
return;
}
if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) {
tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable));
return;
}
SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable;
SAuditReq req;
req.pSql = pRequest->sqlstr;
req.sqlLen = pRequest->sqlLen;
TAOS_UNUSED(tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName));
TAOS_UNUSED(tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName));
TAOS_UNUSED(tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"));
int32_t tlen = tSerializeSAuditReq(NULL, 0, &req);
void* pReq = taosMemoryCalloc(1, tlen);
if (pReq == NULL) {
tscError("[del report]failed to allocate memory for req");
return;
}
if (tSerializeSAuditReq(pReq, tlen, &req) < 0) {
tscError("[del report]failed to serialize req");
taosMemoryFree(pReq);
return;
}
int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId);
if (code != 0) {
tscError("[del report]failed to send audit info, code:%d", code);
taosMemoryFree(pReq);
return;
}
tscDebug("[del report]delete data, sql:%s", req.pSql);
}
void clientOperateReport(SRequestObj* pRequest) {
if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) {
tscError("[del report]invalid request");
return;
}
if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) {
reportDeleteSql(pRequest);
}
}

View File

@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
goto End; goto End;
} }
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) { if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer); tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer);
goto End; goto End;
} }
@ -135,7 +135,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
// update the appInstInfo // update the appInstInfo
pTscObj->pAppInfo->clusterId = connectRsp.clusterId; pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
pTscObj->pAppInfo->monitorParas = connectRsp.monitorParas; pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas;
pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete;
tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d",
connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope); connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope);
lastClusterId = connectRsp.clusterId; lastClusterId = connectRsp.clusterId;
@ -588,7 +589,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
return code; return code;
} }
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize); *pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) { if (NULL == *pRsp) {
code = terrno; code = terrno;
@ -603,7 +605,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, SHOW_VARIABLES_RESULT_COLS); int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, SHOW_VARIABLES_RESULT_COLS);
if(len < 0) { if(len < 0) {
uError("buildShowVariablesRsp error, len:%d", len); uError("buildShowVariablesRsp error, len:%d", len);
code = terrno; code = terrno;
@ -741,7 +743,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
return code; return code;
} }
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize); *pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) { if (NULL == *pRsp) {
code = terrno; code = terrno;
@ -757,7 +760,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS); (*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, COMPACT_DB_RESULT_COLS); int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, COMPACT_DB_RESULT_COLS);
if(len < 0) { if(len < 0) {
uError("buildRetriveTableRspForCompactDb error, len:%d", len); uError("buildRetriveTableRspForCompactDb error, len:%d", len);
code = terrno; code = terrno;

View File

@ -52,10 +52,8 @@
#define TMQ_META_VERSION "1.0" #define TMQ_META_VERSION "1.0"
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t,
SColCmprWrapper* pColCmprRow, cJSON** pJson) { SColCmprWrapper* pColCmprRow, cJSON** pJson) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -457,7 +455,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
cJSON* tvalue = NULL; cJSON* tvalue = NULL;
if (IS_VAR_DATA_TYPE(pTagVal->type)) { if (IS_VAR_DATA_TYPE(pTagVal->type)) {
char* buf = NULL; char* buf = NULL;
int64_t bufSize = 0; int64_t bufSize = 0;
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) { if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
bufSize = pTagVal->nData * 2 + 2 + 3; bufSize = pTagVal->nData * 2 + 2 + 3;
@ -890,9 +888,6 @@ end:
} }
static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVCreateStbReq req = {0}; SVCreateStbReq req = {0};
SDecoder coder; SDecoder coder;
SMCreateStbReq pReq = {0}; SMCreateStbReq pReq = {0};
@ -1003,9 +998,6 @@ end:
} }
static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVDropStbReq req = {0}; SVDropStbReq req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
SMDropStbReq pReq = {0}; SMDropStbReq pReq = {0};
@ -1115,9 +1107,6 @@ static void destroyCreateTbReqBatch(void* data) {
} }
static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVCreateTbBatchReq req = {0}; SVCreateTbBatchReq req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -1304,9 +1293,6 @@ static void destroyDropTbReqBatch(void* data) {
} }
static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVDropTbBatchReq req = {0}; SVDropTbBatchReq req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -1419,9 +1405,6 @@ end:
} }
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SDeleteRes req = {0}; SDeleteRes req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
char sql[256] = {0}; char sql[256] = {0};
@ -1457,9 +1440,6 @@ end:
} }
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVAlterTbReq req = {0}; SVAlterTbReq req = {0};
SDecoder dcoder = {0}; SDecoder dcoder = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -1590,7 +1570,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
SHashObj* pVgHash = NULL; SHashObj* pVgHash = NULL;
SRequestObj* pRequest = NULL; SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest)); RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE, uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
rows, pData, tbname, fields, numFields); rows, pData, tbname, fields, numFields);
@ -1622,7 +1602,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
RAW_NULL_CHECK(pVgHash); RAW_NULL_CHECK(pVgHash);
RAW_RETURN_CHECK( RAW_RETURN_CHECK(
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData)));
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0)); RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0, false));
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL); launchQueryImpl(pRequest, pQuery, true, NULL);
@ -1651,7 +1631,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
SHashObj* pVgHash = NULL; SHashObj* pVgHash = NULL;
SRequestObj* pRequest = NULL; SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest)); RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname); uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname);
@ -1682,7 +1662,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
RAW_NULL_CHECK(pVgHash); RAW_NULL_CHECK(pVgHash);
RAW_RETURN_CHECK( RAW_RETURN_CHECK(
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData)));
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0)); RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0, false));
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL); launchQueryImpl(pRequest, pQuery, true, NULL);
@ -1708,116 +1688,6 @@ static void* getRawDataFromRes(void* pRetrieve) {
return rawData; return rawData;
} }
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if (taos == NULL || data == NULL) {
SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data);
return TSDB_CODE_INVALID_PARA;
}
int32_t code = TSDB_CODE_SUCCESS;
SHashObj* pVgHash = NULL;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
STableMeta* pTableMeta = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest));
uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
pRequest->syncQuery = true;
rspObj.resIter = -1;
rspObj.resType = RES_TYPE__TMQ;
int8_t dataVersion = *(int8_t*)data;
if (dataVersion >= MQ_DATA_RSP_VERSION) {
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
dataLen -= sizeof(int8_t) + sizeof(int32_t);
}
tDecoderInit(&decoder, data, dataLen);
code = tDecodeMqDataRsp(&decoder, &rspObj.dataRsp);
if (code != 0) {
SET_ERROR_MSG("decode mq data rsp failed");
code = TSDB_CODE_INVALID_MSG;
goto end;
}
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
struct SCatalog* pCatalog = NULL;
RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog));
SRequestConnInfo conn = {0};
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
conn.requestId = pRequest->requestId;
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pVgHash);
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
RAW_NULL_CHECK(pRetrieve);
if (!rspObj.dataRsp.withSchema) {
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
RAW_NULL_CHECK(tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta));
SVgroupInfo vg = {0};
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg));
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
if (hData == NULL) {
RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD));
RAW_NULL_CHECK(fields);
for (int i = 0; i < pSW->nCols; i++) {
fields[i].type = pSW->pSchema[i].type;
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
void* rawData = getRawDataFromRes(pRetrieve);
char err[ERR_MSG_LEN] = {0};
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true, err, ERR_MSG_LEN);
taosMemoryFree(fields);
taosMemoryFreeClear(pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", tbName, err);
goto end;
}
}
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code));
tDeleteMqDataRsp(&rspObj.dataRsp);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
taosMemoryFreeClear(pTableMeta);
return code;
}
static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) {
// find schema data info // find schema data info
int32_t code = 0; int32_t code = 0;
@ -1855,152 +1725,368 @@ end:
return code; return code;
} }
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { typedef enum {
if (taos == NULL || data == NULL) { WRITE_RAW_INIT_START = 0,
SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); WRITE_RAW_INIT_OK,
return TSDB_CODE_INVALID_PARA; WRITE_RAW_INIT_FAIL,
} WRITE_RAW_INIT_STATUS;
static SHashObj* writeRawCache = NULL;
static int8_t initFlag = 0;
static int8_t initedFlag = WRITE_RAW_INIT_START;
typedef struct {
SHashObj* pVgHash;
SHashObj* pNameHash;
SHashObj* pMetaHash;
} rawCacheInfo;
typedef struct {
SVgroupInfo vgInfo;
int64_t uid;
int64_t suid;
} tbInfo;
static void tmqFreeMeta(void* data) {
STableMeta* pTableMeta = *(STableMeta**)data;
taosMemoryFree(pTableMeta);
}
static void freeRawCache(void* data) {
rawCacheInfo* pRawCache = (rawCacheInfo*)data;
taosHashCleanup(pRawCache->pMetaHash);
taosHashCleanup(pRawCache->pNameHash);
taosHashCleanup(pRawCache->pVgHash);
}
static int32_t initRawCacheHash() {
if (writeRawCache == NULL) {
writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
if (writeRawCache == NULL) {
return terrno;
}
taosHashSetFreeFp(writeRawCache, freeRawCache);
} }
int32_t code = TSDB_CODE_SUCCESS; return 0;
SHashObj* pVgHash = NULL; }
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
STableMeta* pTableMeta = NULL;
SHashObj* pCreateTbHash = NULL;
SRequestObj* pRequest = NULL; static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) {
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); char* p = (char*)rawData;
// | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each
// column length |
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(uint64_t);
int8_t* fields = p;
uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); if (pSW->nCols != pTableMeta->tableInfo.numOfColumns) {
pRequest->syncQuery = true; return true;
rspObj.resIter = -1; }
rspObj.resType = RES_TYPE__TMQ_METADATA; for (int i = 0; i < pSW->nCols; i++) {
int j = 0;
for (; j < pTableMeta->tableInfo.numOfColumns; j++) {
SSchema* pColSchema = &pTableMeta->schema[j];
char* fieldName = pSW->pSchema[i].name;
if (strcmp(pColSchema->name, fieldName) == 0) {
if (*fields != pColSchema->type || *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) {
return true;
}
break;
}
}
fields += sizeof(int8_t) + sizeof(int32_t);
if (j == pTableMeta->tableInfo.numOfColumns) return true;
}
return false;
}
static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) {
int32_t code = 0;
void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES);
if (cacheInfo == NULL) {
*pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(*pVgHash);
*pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
RAW_NULL_CHECK(*pNameHash);
*pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(*pMetaHash);
taosHashSetFreeFp(*pMetaHash, tmqFreeMeta);
rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash};
RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo)));
} else {
rawCacheInfo* info = (rawCacheInfo*)cacheInfo;
*pVgHash = info->pVgHash;
*pNameHash = info->pNameHash;
*pMetaHash = info->pMetaHash;
}
return 0;
end:
taosHashCleanup(*pMetaHash);
taosHashCleanup(*pNameHash);
taosHashCleanup(*pVgHash);
return code;
}
static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) {
int32_t code = 0;
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, pRequest, 0));
(*pRequest)->syncQuery = true;
if (!(*pRequest)->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
RAW_RETURN_CHECK(catalogGetHandle((*pRequest)->pTscObj->pAppInfo->clusterId, pCatalog));
conn->pTrans = (*pRequest)->pTscObj->pAppInfo->pTransporter;
conn->requestId = (*pRequest)->requestId;
conn->requestObjRefId = (*pRequest)->self;
conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp);
end:
return code;
}
typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp);
static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func,
SMqRspObj* rspObj) {
int8_t dataVersion = *(int8_t*)data; int8_t dataVersion = *(int8_t*)data;
if (dataVersion >= MQ_DATA_RSP_VERSION) { if (dataVersion >= MQ_DATA_RSP_VERSION) {
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
dataLen -= sizeof(int8_t) + sizeof(int32_t); dataLen -= sizeof(int8_t) + sizeof(int32_t);
} }
tDecoderInit(&decoder, data, dataLen); rspObj->resIter = -1;
code = tDecodeSTaosxRsp(&decoder, &rspObj.dataRsp); tDecoderInit(decoder, data, dataLen);
int32_t code = func(decoder, &rspObj->dataRsp);
if (code != 0) { if (code != 0) {
SET_ERROR_MSG("decode mq taosx data rsp failed"); SET_ERROR_MSG("decode mq taosx data rsp failed");
code = TSDB_CODE_INVALID_MSG; }
goto end; return code;
}
static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash,
SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName,
STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) {
int32_t code = 0;
STableMeta* pTableMeta = NULL;
tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname));
if (tmpInfo == NULL || retry > 0) {
tbInfo info = {0};
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, conn, pName, &info.vgInfo));
if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta
tstrncpy(pName->tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN);
}
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta));
info.uid = pTableMeta->uid;
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
info.suid = pTableMeta->suid;
} else {
info.suid = pTableMeta->uid;
}
code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES);
if (code != 0) {
taosMemoryFree(pTableMeta);
goto end;
}
if (pCreateReqDst) {
pTableMeta->vgId = info.vgInfo.vgId;
pTableMeta->uid = pCreateReqDst->uid;
pCreateReqDst->ctb.suid = pTableMeta->suid;
}
RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo)));
tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname));
RAW_RETURN_CHECK(
taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo)));
} }
if (!pRequest->pDb) { if (pTableMeta == NULL || retry > 0) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES);
goto end; if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) {
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta));
code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES);
if (code != 0) {
taosMemoryFree(pTableMeta);
goto end;
}
} else {
pTableMeta = *pTableMetaTmp;
pTableMeta->uid = tmpInfo->uid;
pTableMeta->vgId = tmpInfo->vgInfo.vgId;
}
} }
*pMeta = pTableMeta;
struct SCatalog* pCatalog = NULL; end:
RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); return code;
}
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
int32_t code = TSDB_CODE_SUCCESS;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
SRequestObj* pRequest = NULL;
SCatalog* pCatalog = NULL;
SRequestConnInfo conn = {0}; SRequestConnInfo conn = {0};
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn));
conn.requestId = pRequest->requestId; uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
conn.requestObjRefId = pRequest->self; RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj));
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
SHashObj* pVgHash = NULL;
SHashObj* pNameHash = NULL;
SHashObj* pMetaHash = NULL;
RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos));
int retry = 0;
while (1) {
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
if (!rspObj.dataRsp.withSchema) {
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
RAW_NULL_CHECK(tbName);
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
RAW_NULL_CHECK(pRetrieve);
void* rawData = getRawDataFromRes(pRetrieve);
RAW_NULL_CHECK(rawData);
uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
STableMeta* pTableMeta = NULL;
RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, &pName, &pTableMeta, pSW,
rawData, retry));
char err[ERR_MSG_LEN] = {0};
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", pName.tname, err);
goto end;
}
}
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) {
uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code));
qDestroyQuery(pQuery);
pQuery = NULL;
rspObj.resIter = -1;
continue;
}
break;
}
end:
uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code));
tDeleteMqDataRsp(&rspObj.dataRsp);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
return code;
}
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
int32_t code = TSDB_CODE_SUCCESS;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
SHashObj* pCreateTbHash = NULL;
SRequestObj* pRequest = NULL;
SCatalog* pCatalog = NULL;
SRequestConnInfo conn = {0};
RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn));
uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj));
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pVgHash);
pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pCreateTbHash); RAW_NULL_CHECK(pCreateTbHash);
RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash));
uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); SHashObj* pVgHash = NULL;
while (++rspObj.resIter < rspObj.dataRsp.blockNum) { SHashObj* pNameHash = NULL;
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); SHashObj* pMetaHash = NULL;
RAW_NULL_CHECK(pRetrieve); RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos));
if (!rspObj.dataRsp.withSchema) { int retry = 0;
goto end; while (1) {
} RAW_RETURN_CHECK(smlInitHandle(&pQuery));
uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
if (!rspObj.dataRsp.withSchema) {
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
if (!tbName) { RAW_NULL_CHECK(tbName);
SET_ERROR_MSG("block tbname is null"); SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
code = terrno; RAW_NULL_CHECK(pSW);
goto end; void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
} RAW_NULL_CHECK(pRetrieve);
void* rawData = getRawDataFromRes(pRetrieve);
RAW_NULL_CHECK(rawData);
uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName); uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
// find schema data info // find schema data info
SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname));
SVgroupInfo vg = {0}; STableMeta* pTableMeta = NULL;
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, &pName,
if (pCreateReqDst) { // change stable name to get meta &pTableMeta, pSW, rawData, retry));
tstrncpy(pName.tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN); char err[ERR_MSG_LEN] = {0};
code =
rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", pName.tname, err);
goto end;
}
} }
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
if (pCreateReqDst) { if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) {
pTableMeta->vgId = vg.vgId; uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code));
pTableMeta->uid = pCreateReqDst->uid; qDestroyQuery(pQuery);
pCreateReqDst->ctb.suid = pTableMeta->suid; pQuery = NULL;
} rspObj.resIter = -1;
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); continue;
if (hData == NULL) {
RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD));
if (fields == NULL) {
SET_ERROR_MSG("calloc fields failed");
code = terrno;
goto end;
}
for (int i = 0; i < pSW->nCols; i++) {
fields[i].type = pSW->pSchema[i].type;
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
void* rawData = getRawDataFromRes(pRetrieve);
char err[ERR_MSG_LEN] = {0};
SVCreateTbReq* pCreateReqTmp = NULL;
if (pCreateReqDst) {
RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp));
}
code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN);
if (pCreateReqTmp != NULL) {
tdDestroySVCreateTbReq(pCreateReqTmp);
taosMemoryFree(pCreateReqTmp);
}
taosMemoryFree(fields);
taosMemoryFreeClear(pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", tbName, err);
goto end;
} }
break;
} }
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end: end:
uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code));
tDeleteSTaosxRsp(&rspObj.dataRsp);
void* pIter = taosHashIterate(pCreateTbHash, NULL); void* pIter = taosHashIterate(pCreateTbHash, NULL);
while (pIter) { while (pIter) {
tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE);
pIter = taosHashIterate(pCreateTbHash, pIter); pIter = taosHashIterate(pCreateTbHash, pIter);
} }
taosHashCleanup(pCreateTbHash); taosHashCleanup(pCreateTbHash);
tDeleteSTaosxRsp(&rspObj.dataRsp);
tDecoderClear(&decoder); tDecoderClear(&decoder);
qDestroyQuery(pQuery); qDestroyQuery(pQuery);
destroyRequest(pRequest); destroyRequest(pRequest);
taosHashCleanup(pVgHash);
taosMemoryFreeClear(pTableMeta);
return code; return code;
} }
@ -2076,18 +2162,18 @@ char* tmq_get_json_meta(TAOS_RES* res) {
return NULL; return NULL;
} }
char* string = NULL; char* string = NULL;
SMqRspObj* rspObj = (SMqRspObj*)res; SMqRspObj* rspObj = (SMqRspObj*)res;
if (TD_RES_TMQ_METADATA(res)) { if (TD_RES_TMQ_METADATA(res)) {
processAutoCreateTable(&rspObj->dataRsp, &string); processAutoCreateTable(&rspObj->dataRsp, &string);
} else if (TD_RES_TMQ_BATCH_META(res)) { } else if (TD_RES_TMQ_BATCH_META(res)) {
processBatchMetaToJson(&rspObj->batchMetaRsp, &string); processBatchMetaToJson(&rspObj->batchMetaRsp, &string);
} else if (TD_RES_TMQ_META(res)) { } else if (TD_RES_TMQ_META(res)) {
cJSON* pJson = NULL; cJSON* pJson = NULL;
processSimpleMeta(&rspObj->metaRsp, &pJson); processSimpleMeta(&rspObj->metaRsp, &pJson);
string = cJSON_PrintUnformatted(pJson); string = cJSON_PrintUnformatted(pJson);
cJSON_Delete(pJson); cJSON_Delete(pJson);
} else{ } else {
uError("tmq_get_json_meta res:%d, invalid type", *(int8_t*)res); uError("tmq_get_json_meta res:%d, invalid type", *(int8_t*)res);
} }
@ -2098,7 +2184,7 @@ char* tmq_get_json_meta(TAOS_RES* res) {
void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
static int32_t getOffSetLen(const SMqDataRsp* pRsp) { static int32_t getOffSetLen(const SMqDataRsp* pRsp) {
SEncoder coder = {0}; SEncoder coder = {0};
tEncoderInit(&coder, NULL, 0); tEncoderInit(&coder, NULL, 0);
if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1;
if (tEncodeSTqOffsetVal(&coder, &pRsp->rspOffset) < 0) return -1; if (tEncodeSTqOffsetVal(&coder, &pRsp->rspOffset) < 0) return -1;
@ -2108,7 +2194,7 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) {
} }
typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp); typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp);
static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) {
int32_t len = 0; int32_t len = 0;
int32_t code = 0; int32_t code = 0;
SEncoder encoder = {0}; SEncoder encoder = {0};
@ -2164,7 +2250,7 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
raw->raw_type = rspObj->metaRsp.resMsgType; raw->raw_type = rspObj->metaRsp.resMsgType;
uDebug("tmq get raw type meta:%p", raw); uDebug("tmq get raw type meta:%p", raw);
} else if (TD_RES_TMQ(res)) { } else if (TD_RES_TMQ(res)) {
int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw);
if (code != 0) { if (code != 0) {
uError("tmq get raw type error:%d", terrno); uError("tmq get raw type error:%d", terrno);
return code; return code;
@ -2199,7 +2285,31 @@ void tmq_free_raw(tmq_raw_data raw) {
(void)memset(terrMsg, 0, ERR_MSG_LEN); (void)memset(terrMsg, 0, ERR_MSG_LEN);
} }
static int32_t writeRawInit() {
while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) {
int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1);
if (old == 0) {
int32_t code = initRawCacheHash();
if (code != 0) {
uError("tmq writeRawImpl init error:%d", code);
atomic_store_8(&initedFlag, WRITE_RAW_INIT_FAIL);
return code;
}
atomic_store_8(&initedFlag, WRITE_RAW_INIT_OK);
}
}
if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL) {
return TSDB_CODE_INTERNAL_ERROR;
}
return 0;
}
static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) {
if (writeRawInit() != 0) {
return TSDB_CODE_INTERNAL_ERROR;
}
if (type == TDMT_VND_CREATE_STB) { if (type == TDMT_VND_CREATE_STB) {
return taosCreateStb(taos, buf, len); return taosCreateStb(taos, buf, len);
} else if (type == TDMT_VND_ALTER_STB) { } else if (type == TDMT_VND_ALTER_STB) {
@ -2214,10 +2324,10 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type)
return taosDropTable(taos, buf, len); return taosDropTable(taos, buf, len);
} else if (type == TDMT_VND_DELETE) { } else if (type == TDMT_VND_DELETE) {
return taosDeleteData(taos, buf, len); return taosDeleteData(taos, buf, len);
} else if (type == RES_TYPE__TMQ) {
return tmqWriteRawDataImpl(taos, buf, len);
} else if (type == RES_TYPE__TMQ_METADATA) { } else if (type == RES_TYPE__TMQ_METADATA) {
return tmqWriteRawMetaDataImpl(taos, buf, len); return tmqWriteRawMetaDataImpl(taos, buf, len);
} else if (type == RES_TYPE__TMQ) {
return tmqWriteRawDataImpl(taos, buf, len);
} else if (type == RES_TYPE__TMQ_BATCH_META) { } else if (type == RES_TYPE__TMQ_BATCH_META) {
return tmqWriteBatchMetaDataImpl(taos, buf, len); return tmqWriteBatchMetaDataImpl(taos, buf, len);
} }
@ -2225,7 +2335,8 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type)
} }
int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
if (!taos) { if (taos == NULL || raw.raw == NULL || raw.raw_len <= 0) {
SET_ERROR_MSG("taos:%p or data:%p is NULL or raw_len <= 0", taos, raw.raw);
return TSDB_CODE_INVALID_PARA; return TSDB_CODE_INVALID_PARA;
} }

File diff suppressed because it is too large Load Diff

View File

@ -21,259 +21,10 @@
#define OTD_JSON_SUB_FIELDS_NUM 2 #define OTD_JSON_SUB_FIELDS_NUM 2
#define JUMP_JSON_SPACE(start) \
while (*(start)) { \
if (unlikely(*(start) > 32)) \
break; \
else \
(start)++; \
}
static int32_t smlJsonGetObj(char **payload) {
int leftBracketCnt = 0;
bool isInQuote = false;
while (**payload) {
if (**payload == '"' && *((*payload) - 1) != '\\') {
isInQuote = !isInQuote;
} else if (!isInQuote && unlikely(**payload == '{')) {
leftBracketCnt++;
(*payload)++;
continue;
} else if (!isInQuote && unlikely(**payload == '}')) {
leftBracketCnt--;
(*payload)++;
if (leftBracketCnt == 0) {
return 0;
} else if (leftBracketCnt < 0) {
return -1;
}
continue;
}
(*payload)++;
}
return -1;
}
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) {
int index = 0;
while (*(*start)) {
if ((*start)[0] != '"') {
(*start)++;
continue;
}
if (unlikely(index >= OTD_JSON_FIELDS_NUM)) {
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start);
return TSDB_CODE_TSC_INVALID_JSON;
}
char *sTmp = *start;
if ((*start)[1] == 'm' && (*start)[2] == 'e' && (*start)[3] == 't' && (*start)[4] == 'r' && (*start)[5] == 'i' &&
(*start)[6] == 'c' && (*start)[7] == '"') {
(*start) += 8;
bool isInQuote = false;
while (*(*start)) {
if (unlikely(!isInQuote && *(*start) == '"')) {
(*start)++;
offset[index++] = *start - sTmp;
element->measure = (*start);
isInQuote = true;
continue;
}
if (unlikely(isInQuote && *(*start) == '"')) {
element->measureLen = (*start) - element->measure;
(*start)++;
break;
}
(*start)++;
}
} else if ((*start)[1] == 't' && (*start)[2] == 'i' && (*start)[3] == 'm' && (*start)[4] == 'e' &&
(*start)[5] == 's' && (*start)[6] == 't' && (*start)[7] == 'a' && (*start)[8] == 'm' &&
(*start)[9] == 'p' && (*start)[10] == '"') {
(*start) += 11;
bool hasColon = false;
while (*(*start)) {
if (unlikely(!hasColon && *(*start) == ':')) {
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->timestamp = (*start);
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->timestampLen = tmp - (*start);
*start = tmp;
}
break;
}
hasColon = true;
continue;
}
if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) {
element->timestampLen = (*start) - element->timestamp;
break;
}
(*start)++;
}
} else if ((*start)[1] == 'v' && (*start)[2] == 'a' && (*start)[3] == 'l' && (*start)[4] == 'u' &&
(*start)[5] == 'e' && (*start)[6] == '"') {
(*start) += 7;
bool hasColon = false;
while (*(*start)) {
if (unlikely(!hasColon && *(*start) == ':')) {
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->cols = (*start);
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->colsLen = tmp - (*start);
*start = tmp;
}
break;
}
hasColon = true;
continue;
}
if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) {
element->colsLen = (*start) - element->cols;
break;
}
(*start)++;
}
} else if ((*start)[1] == 't' && (*start)[2] == 'a' && (*start)[3] == 'g' && (*start)[4] == 's' &&
(*start)[5] == '"') {
(*start) += 6;
while (*(*start)) {
if (unlikely(*(*start) == ':')) {
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->tags = (*start);
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->tagsLen = tmp - (*start);
*start = tmp;
}
break;
}
(*start)++;
}
}
if (*(*start) == '\0') {
break;
}
if (*(*start) == '}') {
(*start)++;
break;
}
(*start)++;
}
if (unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL ||
element->measure == NULL || element->timestamp == NULL) {
uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM);
return TSDB_CODE_TSC_INVALID_JSON;
}
return 0;
}
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) {
int index = 0;
while (*(*start)) {
if ((*start)[0] != '"') {
(*start)++;
continue;
}
if (unlikely(index >= OTD_JSON_FIELDS_NUM)) {
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start);
return TSDB_CODE_TSC_INVALID_JSON;
}
if ((*start)[1] == 'm') {
(*start) += offset[index++];
element->measure = *start;
while (*(*start)) {
if (unlikely(*(*start) == '"')) {
element->measureLen = (*start) - element->measure;
(*start)++;
break;
}
(*start)++;
}
} else if ((*start)[1] == 't' && (*start)[2] == 'i') {
(*start) += offset[index++];
element->timestamp = *start;
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->timestampLen = tmp - (*start);
*start = tmp;
}
} else {
while (*(*start)) {
if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) {
element->timestampLen = (*start) - element->timestamp;
break;
}
(*start)++;
}
}
} else if ((*start)[1] == 'v') {
(*start) += offset[index++];
element->cols = *start;
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->colsLen = tmp - (*start);
*start = tmp;
}
} else {
while (*(*start)) {
if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) {
element->colsLen = (*start) - element->cols;
break;
}
(*start)++;
}
}
} else if ((*start)[1] == 't' && (*start)[2] == 'a') {
(*start) += offset[index++];
element->tags = (*start);
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->tagsLen = tmp - (*start);
*start = tmp;
}
}
if (*(*start) == '}') {
(*start)++;
break;
}
(*start)++;
}
if (unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) {
uError("elements != %d", OTD_JSON_FIELDS_NUM);
return TSDB_CODE_TSC_INVALID_JSON;
}
return TSDB_CODE_SUCCESS;
}
static inline int32_t smlParseMetricFromJSON(SSmlHandle *info, cJSON *metric, SSmlLineInfo *elements) { static inline int32_t smlParseMetricFromJSON(SSmlHandle *info, cJSON *metric, SSmlLineInfo *elements) {
elements->measureLen = strlen(metric->valuestring); elements->measureLen = strlen(metric->valuestring);
if (IS_INVALID_TABLE_LEN(elements->measureLen)) { if (IS_INVALID_TABLE_LEN(elements->measureLen)) {
uError("OTD:0x%" PRIx64 " Metric length is 0 or large than 192", info->id); uError("SML:0x%" PRIx64 " Metric length is 0 or large than 192", info->id);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
} }
@ -293,7 +44,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) {
child = child->next; child = child->next;
} }
if (*marks[i] == NULL) { if (*marks[i] == NULL) {
uError("smlGetJsonElements error, not find mark:%d:%s", i, jsonName[i]); uError("SML %s error, not find mark:%d:%s", __FUNCTION__, i, jsonName[i]);
return TSDB_CODE_TSC_INVALID_JSON; return TSDB_CODE_TSC_INVALID_JSON;
} }
} }
@ -302,7 +53,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) {
static int32_t smlConvertJSONBool(SSmlKv *pVal, char *typeStr, cJSON *value) { static int32_t smlConvertJSONBool(SSmlKv *pVal, char *typeStr, cJSON *value) {
if (strcasecmp(typeStr, "bool") != 0) { if (strcasecmp(typeStr, "bool") != 0) {
uError("OTD:invalid type(%s) for JSON Bool", typeStr); uError("SML:invalid type(%s) for JSON Bool", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE; return TSDB_CODE_TSC_INVALID_JSON_TYPE;
} }
pVal->type = TSDB_DATA_TYPE_BOOL; pVal->type = TSDB_DATA_TYPE_BOOL;
@ -316,7 +67,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// tinyint // tinyint
if (strcasecmp(typeStr, "i8") == 0 || strcasecmp(typeStr, "tinyint") == 0) { if (strcasecmp(typeStr, "i8") == 0 || strcasecmp(typeStr, "tinyint") == 0) {
if (!IS_VALID_TINYINT(value->valuedouble)) { if (!IS_VALID_TINYINT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(tinyint)", value->valuedouble); uError("SML:JSON value(%f) cannot fit in type(tinyint)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
} }
pVal->type = TSDB_DATA_TYPE_TINYINT; pVal->type = TSDB_DATA_TYPE_TINYINT;
@ -327,7 +78,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// smallint // smallint
if (strcasecmp(typeStr, "i16") == 0 || strcasecmp(typeStr, "smallint") == 0) { if (strcasecmp(typeStr, "i16") == 0 || strcasecmp(typeStr, "smallint") == 0) {
if (!IS_VALID_SMALLINT(value->valuedouble)) { if (!IS_VALID_SMALLINT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(smallint)", value->valuedouble); uError("SML:JSON value(%f) cannot fit in type(smallint)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
} }
pVal->type = TSDB_DATA_TYPE_SMALLINT; pVal->type = TSDB_DATA_TYPE_SMALLINT;
@ -338,7 +89,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// int // int
if (strcasecmp(typeStr, "i32") == 0 || strcasecmp(typeStr, "int") == 0) { if (strcasecmp(typeStr, "i32") == 0 || strcasecmp(typeStr, "int") == 0) {
if (!IS_VALID_INT(value->valuedouble)) { if (!IS_VALID_INT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(int)", value->valuedouble); uError("SML:JSON value(%f) cannot fit in type(int)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
} }
pVal->type = TSDB_DATA_TYPE_INT; pVal->type = TSDB_DATA_TYPE_INT;
@ -362,7 +113,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// float // float
if (strcasecmp(typeStr, "f32") == 0 || strcasecmp(typeStr, "float") == 0) { if (strcasecmp(typeStr, "f32") == 0 || strcasecmp(typeStr, "float") == 0) {
if (!IS_VALID_FLOAT(value->valuedouble)) { if (!IS_VALID_FLOAT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(float)", value->valuedouble); uError("SML:JSON value(%f) cannot fit in type(float)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
} }
pVal->type = TSDB_DATA_TYPE_FLOAT; pVal->type = TSDB_DATA_TYPE_FLOAT;
@ -379,7 +130,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
} }
// if reach here means type is unsupported // if reach here means type is unsupported
uError("OTD:invalid type(%s) for JSON Number", typeStr); uError("SML:invalid type(%s) for JSON Number", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE; return TSDB_CODE_TSC_INVALID_JSON_TYPE;
} }
@ -391,7 +142,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
} else if (strcasecmp(typeStr, "nchar") == 0) { } else if (strcasecmp(typeStr, "nchar") == 0) {
pVal->type = TSDB_DATA_TYPE_NCHAR; pVal->type = TSDB_DATA_TYPE_NCHAR;
} else { } else {
uError("OTD:invalid type(%s) for JSON String", typeStr); uError("SML:invalid type(%s) for JSON String", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE; return TSDB_CODE_TSC_INVALID_JSON_TYPE;
} }
pVal->length = strlen(value->valuestring); pVal->length = strlen(value->valuestring);
@ -474,7 +225,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) {
case cJSON_String: { case cJSON_String: {
int32_t ret = smlConvertJSONString(kv, "binary", root); int32_t ret = smlConvertJSONString(kv, "binary", root);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
uError("OTD:Failed to parse binary value from JSON Obj"); uError("SML:Failed to parse binary value from JSON Obj");
return ret; return ret;
} }
break; break;
@ -482,7 +233,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) {
case cJSON_Object: { case cJSON_Object: {
int32_t ret = smlParseValueFromJSONObj(root, kv); int32_t ret = smlParseValueFromJSONObj(root, kv);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
uError("OTD:Failed to parse value from JSON Obj"); uError("SML:Failed to parse value from JSON Obj");
return ret; return ret;
} }
break; break;
@ -511,7 +262,7 @@ static int32_t smlProcessTagJson(SSmlHandle *info, cJSON *tags){
} }
size_t keyLen = strlen(tag->string); size_t keyLen = strlen(tag->string);
if (unlikely(IS_INVALID_COL_LEN(keyLen))) { if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
uError("OTD:Tag key length is 0 or too large than 64"); uError("SML:Tag key length is 0 or too large than 64");
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
} }
@ -539,28 +290,24 @@ static int32_t smlProcessTagJson(SSmlHandle *info, cJSON *tags){
} }
static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo *elements) { static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo *elements) {
int32_t ret = 0; if (is_same_child_table_telnet(elements, &info->preLine) == 0) {
elements->measureTag = info->preLine.measureTag;
return TSDB_CODE_SUCCESS;
}
int32_t code = 0;
int32_t lino = 0;
if(info->dataFormat){ if(info->dataFormat){
ret = smlProcessSuperTable(info, elements); SML_CHECK_CODE(smlProcessSuperTable(info, elements));
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
}
ret = smlProcessTagJson(info, tags);
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
ret = smlJoinMeasureTag(elements);
if(ret != 0){
return ret;
} }
SML_CHECK_CODE(smlProcessTagJson(info, tags));
SML_CHECK_CODE(smlJoinMeasureTag(elements));
return smlProcessChildTable(info, elements); return smlProcessChildTable(info, elements);
END:
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
RETURN
} }
static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPrecision) { static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPrecision) {
@ -678,7 +425,8 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) {
} }
static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo *elements) { static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo *elements) {
int32_t ret = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
cJSON *metricJson = NULL; cJSON *metricJson = NULL;
cJSON *tsJson = NULL; cJSON *tsJson = NULL;
@ -688,57 +436,27 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
int32_t size = cJSON_GetArraySize(root); int32_t size = cJSON_GetArraySize(root);
// outmost json fields has to be exactly 4 // outmost json fields has to be exactly 4
if (size != OTD_JSON_FIELDS_NUM) { if (size != OTD_JSON_FIELDS_NUM) {
uError("OTD:0x%" PRIx64 " Invalid number of JSON fields in data point %d", info->id, size); uError("SML:0x%" PRIx64 " Invalid number of JSON fields in data point %d", info->id, size);
return TSDB_CODE_TSC_INVALID_JSON; return TSDB_CODE_TSC_INVALID_JSON;
} }
cJSON **marks[OTD_JSON_FIELDS_NUM] = {&metricJson, &tsJson, &valueJson, &tagsJson}; cJSON **marks[OTD_JSON_FIELDS_NUM] = {&metricJson, &tsJson, &valueJson, &tagsJson};
ret = smlGetJsonElements(root, marks); SML_CHECK_CODE(smlGetJsonElements(root, marks));
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
// Parse metric // Parse metric
ret = smlParseMetricFromJSON(info, metricJson, elements); SML_CHECK_CODE(smlParseMetricFromJSON(info, metricJson, elements));
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("OTD:0x%" PRIx64 " Unable to parse metric from JSON payload", info->id);
return ret;
}
// Parse metric value // Parse metric value
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN}; SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN};
ret = smlParseValueFromJSON(valueJson, &kv); SML_CHECK_CODE(smlParseValueFromJSON(valueJson, &kv));
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse metric value from JSON payload", info->id);
return ret;
}
// Parse tags // Parse tags
bool needFree = info->dataFormat;
elements->tags = cJSON_PrintUnformatted(tagsJson); elements->tags = cJSON_PrintUnformatted(tagsJson);
if (elements->tags == NULL){ SML_CHECK_NULL(elements->tags);
return TSDB_CODE_OUT_OF_MEMORY;
}
elements->tagsLen = strlen(elements->tags);
if (is_same_child_table_telnet(elements, &info->preLine) != 0) {
ret = smlParseTagsFromJSON(info, tagsJson, elements);
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id);
taosMemoryFree(elements->tags);
elements->tags = NULL;
return ret;
}
} else {
elements->measureTag = info->preLine.measureTag;
}
if (needFree) { elements->tagsLen = strlen(elements->tags);
taosMemoryFree(elements->tags); SML_CHECK_CODE(smlParseTagsFromJSON(info, tagsJson, elements));
elements->tags = NULL;
}
if (unlikely(info->reRun)) { if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS; goto END;
} }
// Parse timestamp // Parse timestamp
@ -747,29 +465,34 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
if (unlikely(ts < 0)) { if (unlikely(ts < 0)) {
char* tmp = cJSON_PrintUnformatted(tsJson); char* tmp = cJSON_PrintUnformatted(tsJson);
if (tmp == NULL) { if (tmp == NULL) {
uError("cJSON_PrintUnformatted failed since %s", tstrerror(TSDB_CODE_OUT_OF_MEMORY)); uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %" PRId64, info->id, info->msgBuf.buf, ts);
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %" PRId64, info->id, info->msgBuf.buf, ts);
} else { } else {
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %s %" PRId64, info->id, info->msgBuf.buf,tmp, ts); uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %s %" PRId64, info->id, info->msgBuf.buf,tmp, ts);
taosMemoryFree(tmp); taosMemoryFree(tmp);
} }
return TSDB_CODE_INVALID_TIMESTAMP; SML_CHECK_CODE(TSDB_CODE_INVALID_TIMESTAMP);
} }
SSmlKv kvTs = {0}; SSmlKv kvTs = {0};
smlBuildTsKv(&kvTs, ts); smlBuildTsKv(&kvTs, ts);
if (info->dataFormat){
code = smlParseEndTelnetJsonFormat(info, elements, &kvTs, &kv);
} else {
code = smlParseEndTelnetJsonUnFormat(info, elements, &kvTs, &kv);
}
SML_CHECK_CODE(code);
taosMemoryFreeClear(info->preLine.tags);
info->preLine = *elements;
elements->tags = NULL;
return smlParseEndTelnetJson(info, elements, &kvTs, &kv); END:
taosMemoryFree(elements->tags);
RETURN
} }
static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
int32_t payloadNum = 0; int32_t payloadNum = 0;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
if (unlikely(payload == NULL)) {
uError("SML:0x%" PRIx64 " empty JSON Payload", info->id);
return TSDB_CODE_TSC_INVALID_JSON;
}
info->root = cJSON_Parse(payload); info->root = cJSON_Parse(payload);
if (unlikely(info->root == NULL)) { if (unlikely(info->root == NULL)) {
uError("SML:0x%" PRIx64 " parse json failed:%s", info->id, payload); uError("SML:0x%" PRIx64 " parse json failed:%s", info->id, payload);
@ -782,27 +505,11 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
} else if (cJSON_IsObject(info->root)) { } else if (cJSON_IsObject(info->root)) {
payloadNum = 1; payloadNum = 1;
} else { } else {
uError("SML:0x%" PRIx64 " Invalid JSON Payload 3:%s", info->id, payload); uError("SML:0x%" PRIx64 " Invalid JSON type:%s", info->id, payload);
return TSDB_CODE_TSC_INVALID_JSON; return TSDB_CODE_TSC_INVALID_JSON;
} }
if (unlikely(info->lines != NULL)) {
for (int i = 0; i < info->lineNum; i++) {
taosArrayDestroyEx(info->lines[i].colArray, freeSSmlKv);
if (info->lines[i].measureTagsLen != 0) taosMemoryFree(info->lines[i].measureTag);
}
taosMemoryFree(info->lines);
info->lines = NULL;
}
info->lineNum = payloadNum; info->lineNum = payloadNum;
info->dataFormat = true;
ret = smlClearForRerun(info);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
info->parseJsonByLib = true;
cJSON *head = (payloadNum == 1 && cJSON_IsObject(info->root)) ? info->root : info->root->child; cJSON *head = (payloadNum == 1 && cJSON_IsObject(info->root)) ? info->root : info->root->child;
int cnt = 0; int cnt = 0;
@ -811,6 +518,7 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
if (info->dataFormat) { if (info->dataFormat) {
SSmlLineInfo element = {0}; SSmlLineInfo element = {0};
ret = smlParseJSONStringExt(info, dataPoint, &element); ret = smlParseJSONStringExt(info, dataPoint, &element);
if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag);
} else { } else {
ret = smlParseJSONStringExt(info, dataPoint, info->lines + cnt); ret = smlParseJSONStringExt(info, dataPoint, info->lines + cnt);
} }
@ -836,164 +544,3 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *elements) {
int32_t ret = TSDB_CODE_SUCCESS;
if (info->offset[0] == 0) {
ret = smlJsonParseObjFirst(start, elements, info->offset);
} else {
ret = smlJsonParseObj(start, elements, info->offset);
}
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
if (unlikely(**start == '\0' && elements->measure == NULL)) return TSDB_CODE_SUCCESS;
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) {
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (unlikely(elements->colsLen == 0)) {
uError("SML:colsLen == 0");
return TSDB_CODE_TSC_INVALID_VALUE;
} else if (unlikely(elements->cols[0] == '{')) {
char tmp = elements->cols[elements->colsLen];
elements->cols[elements->colsLen] = '\0';
cJSON *valueJson = cJSON_Parse(elements->cols);
if (unlikely(valueJson == NULL)) {
uError("SML:0x%" PRIx64 " parse json cols failed:%s", info->id, elements->cols);
elements->cols[elements->colsLen] = tmp;
return TSDB_CODE_TSC_INVALID_JSON;
}
if (taosArrayPush(info->tagJsonArray, &valueJson) == NULL){
cJSON_Delete(valueJson);
elements->cols[elements->colsLen] = tmp;
return terrno;
}
ret = smlParseValueFromJSONObj(valueJson, &kv);
if (ret != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " Failed to parse value from JSON Obj:%s", info->id, elements->cols);
elements->cols[elements->colsLen] = tmp;
return TSDB_CODE_TSC_INVALID_VALUE;
}
elements->cols[elements->colsLen] = tmp;
} else if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " cols invalidate:%s", info->id, elements->cols);
return TSDB_CODE_TSC_INVALID_VALUE;
}
// Parse tags
if (is_same_child_table_telnet(elements, &info->preLine) != 0) {
char tmp = *(elements->tags + elements->tagsLen);
*(elements->tags + elements->tagsLen) = 0;
cJSON *tagsJson = cJSON_Parse(elements->tags);
*(elements->tags + elements->tagsLen) = tmp;
if (unlikely(tagsJson == NULL)) {
uError("SML:0x%" PRIx64 " parse json tag failed:%s", info->id, elements->tags);
return TSDB_CODE_TSC_INVALID_JSON;
}
if (taosArrayPush(info->tagJsonArray, &tagsJson) == NULL){
cJSON_Delete(tagsJson);
uError("SML:0x%" PRIx64 " taosArrayPush failed", info->id);
return terrno;
}
ret = smlParseTagsFromJSON(info, tagsJson, elements);
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id);
return ret;
}
} else {
elements->measureTag = info->preLine.measureTag;
}
if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS;
}
// Parse timestamp
// notice!!! put ts back to tag to ensure get meta->precision
int64_t ts = 0;
if (unlikely(elements->timestampLen == 0)) {
uError("OTD:0x%" PRIx64 " elements->timestampLen == 0", info->id);
return TSDB_CODE_INVALID_TIMESTAMP;
} else if (elements->timestamp[0] == '{') {
char tmp = elements->timestamp[elements->timestampLen];
elements->timestamp[elements->timestampLen] = '\0';
cJSON *tsJson = cJSON_Parse(elements->timestamp);
ts = smlParseTSFromJSON(info, tsJson);
if (unlikely(ts < 0)) {
uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload:%s", info->id, elements->timestamp);
elements->timestamp[elements->timestampLen] = tmp;
cJSON_Delete(tsJson);
return TSDB_CODE_INVALID_TIMESTAMP;
}
elements->timestamp[elements->timestampLen] = tmp;
cJSON_Delete(tsJson);
} else {
ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) {
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id);
return TSDB_CODE_INVALID_TIMESTAMP;
}
}
SSmlKv kvTs = {0};
smlBuildTsKv(&kvTs, ts);
return smlParseEndTelnetJson(info, elements, &kvTs, &kv);
}
int32_t smlParseJSON(SSmlHandle *info, char *payload) {
int32_t payloadNum = 1 << 15;
int32_t ret = TSDB_CODE_SUCCESS;
uDebug("SML:0x%" PRIx64 "json:%s", info->id, payload);
int cnt = 0;
char *dataPointStart = payload;
while (1) {
if (info->dataFormat) {
SSmlLineInfo element = {0};
ret = smlParseJSONString(info, &dataPointStart, &element);
if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag);
} else {
if (cnt >= payloadNum) {
payloadNum = payloadNum << 1;
void *tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo));
if (tmp == NULL) {
ret = terrno;
return ret;
}
info->lines = (SSmlLineInfo *)tmp;
(void)memset(info->lines + cnt, 0, (payloadNum - cnt) * sizeof(SSmlLineInfo));
}
ret = smlParseJSONString(info, &dataPointStart, info->lines + cnt);
if ((info->lines + cnt)->measure == NULL) break;
}
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("SML:0x%" PRIx64 " Invalid JSON Payload 1:%s", info->id, payload);
return smlParseJSONExt(info, payload);
}
if (unlikely(info->reRun)) {
cnt = 0;
dataPointStart = payload;
info->lineNum = payloadNum;
ret = smlClearForRerun(info);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
continue;
}
cnt++;
if (*dataPointStart == '\0') break;
}
info->lineNum = cnt;
return TSDB_CODE_SUCCESS;
}

View File

@ -63,7 +63,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision); int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
if (unlikely(ts == -1)) { if (unlikely(ts == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data); smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid timestamp", data);
return TSDB_CODE_SML_INVALID_DATA; return TSDB_CODE_SML_INVALID_DATA;
} }
return ts; return ts;
@ -84,7 +84,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
} }
if (pVal->value[0] == 'l' || pVal->value[0] == 'L') { // nchar if (pVal->value[0] == 'l' || pVal->value[0] == 'L') { // nchar
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) { if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') {
pVal->type = TSDB_DATA_TYPE_NCHAR; pVal->type = TSDB_DATA_TYPE_NCHAR;
pVal->length -= NCHAR_ADD_LEN; pVal->length -= NCHAR_ADD_LEN;
if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
@ -97,7 +97,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
} }
if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) { if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') {
int32_t code = initCtxGeomFromText(); int32_t code = initCtxGeomFromText();
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
@ -124,7 +124,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
} }
if (pVal->value[0] == 'b' || pVal->value[0] == 'B') { // varbinary if (pVal->value[0] == 'b' || pVal->value[0] == 'B') { // varbinary
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) { if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') {
pVal->type = TSDB_DATA_TYPE_VARBINARY; pVal->type = TSDB_DATA_TYPE_VARBINARY;
if(isHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){ if(isHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){
if(!isValidateHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){ if(!isValidateHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){
@ -298,7 +298,7 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
} }
if (info->dataFormat && !isSmlTagAligned(info, cnt, &kv)) { if (info->dataFormat && !isSmlTagAligned(info, cnt, &kv)) {
return TSDB_CODE_TSC_INVALID_JSON; return TSDB_CODE_SML_INVALID_DATA;
} }
cnt++; cnt++;
@ -311,31 +311,24 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
} }
static int32_t smlParseTagLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *elements) { static int32_t smlParseTagLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *elements) {
int32_t code = 0;
int32_t lino = 0;
bool isSameCTable = IS_SAME_CHILD_TABLE; bool isSameCTable = IS_SAME_CHILD_TABLE;
if(isSameCTable){ if(isSameCTable){
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t ret = 0;
if(info->dataFormat){ if(info->dataFormat){
ret = smlProcessSuperTable(info, elements); SML_CHECK_CODE(smlProcessSuperTable(info, elements));
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
} }
SML_CHECK_CODE(smlProcessTagLine(info, sql, sqlEnd));
ret = smlProcessTagLine(info, sql, sqlEnd);
if(ret != 0){
if (info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
return smlProcessChildTable(info, elements); return smlProcessChildTable(info, elements);
END:
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
RETURN
} }
static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement) { static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement) {
@ -353,7 +346,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
const char *escapeChar = NULL; const char *escapeChar = NULL;
while (*sql < sqlEnd) { while (*sql < sqlEnd) {
if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) { if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA; return TSDB_CODE_SML_INVALID_DATA;
} }
if (unlikely(IS_EQUAL(*sql,escapeChar))) { if (unlikely(IS_EQUAL(*sql,escapeChar))) {
@ -370,7 +363,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
} }
if (unlikely(IS_INVALID_COL_LEN(keyLen - keyLenEscaped))) { if (unlikely(IS_INVALID_COL_LEN(keyLen - keyLenEscaped))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key); smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
} }
@ -404,18 +397,18 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
valueLen = *sql - value; valueLen = *sql - value;
if (unlikely(quoteNum != 0 && quoteNum != 2)) { if (unlikely(quoteNum != 0 && quoteNum != 2)) {
smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value); smlBuildInvalidDataMsg(&info->msgBuf, "SML line unbalanced quotes", value);
return TSDB_CODE_SML_INVALID_DATA; return TSDB_CODE_SML_INVALID_DATA;
} }
if (unlikely(valueLen == 0)) { if (unlikely(valueLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value); smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid value", value);
return TSDB_CODE_SML_INVALID_DATA; return TSDB_CODE_SML_INVALID_DATA;
} }
SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen}; SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen};
int32_t ret = smlParseValue(&kv, &info->msgBuf); int32_t ret = smlParseValue(&kv, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value); uError("SML:0x%" PRIx64 " %s parse value error:%d.", info->id, __FUNCTION__, ret);
return ret; return ret;
} }
@ -437,11 +430,6 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
} }
(void)memcpy(tmp, kv.value, kv.length); (void)memcpy(tmp, kv.value, kv.length);
PROCESS_SLASH_IN_FIELD_VALUE(tmp, kv.length); PROCESS_SLASH_IN_FIELD_VALUE(tmp, kv.length);
if(kv.type == TSDB_DATA_TYPE_GEOMETRY) {
uError("SML:0x%" PRIx64 " smlParseColLine error, invalid GEOMETRY type.", info->id);
taosMemoryFree((void*)kv.value);
return TSDB_CODE_TSC_INVALID_VALUE;
}
if(kv.type == TSDB_DATA_TYPE_VARBINARY){ if(kv.type == TSDB_DATA_TYPE_VARBINARY){
taosMemoryFree((void*)kv.value); taosMemoryFree((void*)kv.value);
} }
@ -510,7 +498,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
} }
elements->measureLen = sql - elements->measure; elements->measureLen = sql - elements->measure;
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen - measureLenEscaped))) { if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen - measureLenEscaped))) {
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL); smlBuildInvalidDataMsg(&info->msgBuf, "SML line measure is empty or too large than 192", NULL);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
} }
@ -557,7 +545,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
elements->colsLen = sql - elements->cols; elements->colsLen = sql - elements->cols;
if (unlikely(elements->colsLen == 0)) { if (unlikely(elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "cols is empty", NULL); smlBuildInvalidDataMsg(&info->msgBuf, "SML line cols is empty", NULL);
return TSDB_CODE_SML_INVALID_DATA; return TSDB_CODE_SML_INVALID_DATA;
} }
@ -574,7 +562,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
int64_t ts = smlParseInfluxTime(info, elements->timestamp, elements->timestampLen); int64_t ts = smlParseInfluxTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts <= 0)) { if (unlikely(ts <= 0)) {
uError("SML:0x%" PRIx64 " smlParseTS error:%" PRId64, info->id, ts); uError("SML:0x%" PRIx64 " %s error:%" PRId64, info->id, __FUNCTION__, ts);
return TSDB_CODE_INVALID_TIMESTAMP; return TSDB_CODE_INVALID_TIMESTAMP;
} }

View File

@ -148,31 +148,21 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t ret = 0; int32_t code = 0;
int32_t lino = 0;
if(info->dataFormat){ if(info->dataFormat){
ret = smlProcessSuperTable(info, elements); SML_CHECK_CODE(smlProcessSuperTable(info, elements));
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
} }
SML_CHECK_CODE(smlProcessTagTelnet(info, data, sqlEnd));
SML_CHECK_CODE(smlJoinMeasureTag(elements));
ret = smlProcessTagTelnet(info, data, sqlEnd); code = smlProcessChildTable(info, elements);
if(ret != 0){
if (info->reRun){ END:
return TSDB_CODE_SUCCESS; if(info->reRun){
} return TSDB_CODE_SUCCESS;
return ret;
} }
RETURN
ret = smlJoinMeasureTag(elements);
if(ret != 0){
return ret;
}
return smlProcessChildTable(info, elements);
} }
// format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>] // format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
@ -182,14 +172,14 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
// parse metric // parse metric
smlParseTelnetElement(&sql, sqlEnd, &elements->measure, &elements->measureLen); smlParseTelnetElement(&sql, sqlEnd, &elements->measure, &elements->measureLen);
if (unlikely((!(elements->measure) || IS_INVALID_TABLE_LEN(elements->measureLen)))) { if (unlikely((!(elements->measure) || IS_INVALID_TABLE_LEN(elements->measureLen)))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql); smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid measure", sql);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
} }
// parse timestamp // parse timestamp
smlParseTelnetElement(&sql, sqlEnd, &elements->timestamp, &elements->timestampLen); smlParseTelnetElement(&sql, sqlEnd, &elements->timestamp, &elements->timestampLen);
if (unlikely(!elements->timestamp || elements->timestampLen == 0)) { if (unlikely(!elements->timestamp || elements->timestampLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql); smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid timestamp", sql);
return TSDB_CODE_SML_INVALID_DATA; return TSDB_CODE_SML_INVALID_DATA;
} }
@ -199,19 +189,21 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
} }
int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) { if (unlikely(ts < 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql); smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet parse timestamp failed", sql);
return TSDB_CODE_INVALID_TIMESTAMP; return TSDB_CODE_INVALID_TIMESTAMP;
} }
// parse value // parse value
smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen); smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen);
if (unlikely(!elements->cols || elements->colsLen == 0)) { if (unlikely(!elements->cols || elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql); smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid value", sql);
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen}; SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) { int ret = smlParseValue(&kv, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " %s parse value error:%d.", info->id, __FUNCTION__, ret);
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
@ -220,11 +212,11 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
elements->tags = sql; elements->tags = sql;
elements->tagsLen = sqlEnd - sql; elements->tagsLen = sqlEnd - sql;
if (unlikely(!elements->tags || elements->tagsLen == 0)) { if (unlikely(!elements->tags || elements->tagsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql); smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid tag value", sql);
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
int ret = smlParseTelnetTags(info, sql, sqlEnd, elements); ret = smlParseTelnetTags(info, sql, sqlEnd, elements);
if (unlikely(ret != TSDB_CODE_SUCCESS)) { if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret; return ret;
} }
@ -239,5 +231,12 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision); kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision);
} }
return smlParseEndTelnetJson(info, elements, &kvTs, &kv); if (info->dataFormat){
ret = smlParseEndTelnetJsonFormat(info, elements, &kvTs, &kv);
} else {
ret = smlParseEndTelnetJsonUnFormat(info, elements, &kvTs, &kv);
}
info->preLine = *elements;
return ret;
} }

View File

@ -1068,6 +1068,34 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_STB** fields) {
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
return pStmt->errCode;
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
tscError("invalid operation to get query column fileds");
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
STableDataCxt** pDataBlock = NULL;
if (pStmt->sql.stbInterlaceMode) {
pDataBlock = &pStmt->sql.siInfo.pDataCtx;
} else {
pDataBlock =
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
}
}
STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, fieldNum, fields));
return TSDB_CODE_SUCCESS;
}
/* /*
SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) {
while (true) { while (true) {
@ -1808,7 +1836,7 @@ _return:
return code; return code;
} }
int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { int stmtParseColFields2(TAOS_STMT2* stmt) {
int32_t code = 0; int32_t code = 0;
STscStmt2* pStmt = (STscStmt2*)stmt; STscStmt2* pStmt = (STscStmt2*)stmt;
int32_t preCode = pStmt->errCode; int32_t preCode = pStmt->errCode;
@ -1842,8 +1870,6 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) {
STMT_ERRI_JRET(stmtParseSql(pStmt)); STMT_ERRI_JRET(stmtParseSql(pStmt));
} }
STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, fields));
_return: _return:
pStmt->errCode = preCode; pStmt->errCode = preCode;
@ -1851,6 +1877,24 @@ _return:
return code; return code;
} }
int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) {
int32_t code = stmtParseColFields2(stmt);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
return stmtFetchColFields2(stmt, nums, fields);
}
int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_STB** fields) {
int32_t code = stmtParseColFields2(stmt);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
return stmtFetchStbColFields2(stmt, nums, fields);
}
int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
STscStmt2* pStmt = (STscStmt2*)stmt; STscStmt2* pStmt = (STscStmt2*)stmt;

View File

@ -24,12 +24,9 @@
#include "tref.h" #include "tref.h"
#include "ttimer.h" #include "ttimer.h"
#define tqFatalC(...) do { if (cDebugFlag & DEBUG_FATAL || tqClientDebug) { taosPrintLog("TQ FATAL ", DEBUG_FATAL, tqDebugFlag, __VA_ARGS__); }} while(0) #define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebug) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqDebugFlag, __VA_ARGS__); }} while(0) #define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
#define tqWarnC(...) do { if (cDebugFlag & DEBUG_WARN || tqClientDebug) { taosPrintLog("TQ WARN ", DEBUG_WARN, tqDebugFlag, __VA_ARGS__); }} while(0) #define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebug) { taosPrintLog("TQ ", DEBUG_INFO, tqDebugFlag, __VA_ARGS__); }} while(0)
#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebug) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
#define tqTraceC(...) do { if (cDebugFlag & DEBUG_TRACE || tqClientDebug) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
#define EMPTY_BLOCK_POLL_IDLE_DURATION 10 #define EMPTY_BLOCK_POLL_IDLE_DURATION 10
#define DEFAULT_AUTO_COMMIT_INTERVAL 5000 #define DEFAULT_AUTO_COMMIT_INTERVAL 5000
@ -831,8 +828,8 @@ static int32_t innerCommitAll(tmq_t* tmq, SMqCommitCbParamSet* pParamSet){
} }
code = innerCommit(tmq, pTopic->topicName, &pVg->offsetInfo.endOffset, pVg, pParamSet); code = innerCommit(tmq, pTopic->topicName, &pVg->offsetInfo.endOffset, pVg, pParamSet);
if (code != 0){ if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){
tqDebugC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", tqErrorC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d",
tmq->consumerId, pTopic->topicName, pVg->vgId, tstrerror(code), pVg->offsetInfo.endOffset.version, j + 1, numOfVgroups); tmq->consumerId, pTopic->topicName, pVg->vgId, tstrerror(code), pVg->offsetInfo.endOffset.version, j + 1, numOfVgroups);
} }
} }
@ -857,7 +854,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
return; return;
} }
code = innerCommitAll(tmq, pParamSet); code = innerCommitAll(tmq, pParamSet);
if (code != 0){ if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){
tqErrorC("consumer:0x%" PRIx64 " innerCommitAll failed, code:%s", tmq->consumerId, tstrerror(code)); tqErrorC("consumer:0x%" PRIx64 " innerCommitAll failed, code:%s", tmq->consumerId, tstrerror(code));
} }
@ -957,7 +954,8 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
} }
} }
tqClientDebug = rsp.debugFlag; tqClientDebugFlag = rsp.debugFlag;
tDestroySMqHbRsp(&rsp); tDestroySMqHbRsp(&rsp);
END: END:
@ -978,6 +976,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
req.consumerId = tmq->consumerId; req.consumerId = tmq->consumerId;
req.epoch = tmq->epoch; req.epoch = tmq->epoch;
req.pollFlag = atomic_load_8(&tmq->pollFlag); req.pollFlag = atomic_load_8(&tmq->pollFlag);
tqDebugC("consumer:0x%" PRIx64 " send heartbeat, pollFlag:%d", tmq->consumerId, req.pollFlag);
req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows)); req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows));
if (req.topics == NULL) { if (req.topics == NULL) {
goto END; goto END;
@ -1063,7 +1062,7 @@ END:
tDestroySMqHbReq(&req); tDestroySMqHbReq(&req);
if (tmrId != NULL) { if (tmrId != NULL) {
bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer); bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer);
tqDebugC("reset timer fo tmq hb:%d", ret); tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag);
} }
int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId); int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId);
if (ret != 0){ if (ret != 0){
@ -1269,7 +1268,9 @@ static int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) {
} }
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); if (code != TSDB_CODE_MND_CONSUMER_NOT_READY){
tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code));
}
goto END; goto END;
} }
@ -1422,7 +1423,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) {
tqDebugC("consumer:0x%" PRIx64 " retrieve ep from mnode in 1s", pTmq->consumerId); tqDebugC("consumer:0x%" PRIx64 " retrieve ep from mnode in 1s", pTmq->consumerId);
bool ret = taosTmrReset(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(pTmq->refId), tmqMgmt.timer, bool ret = taosTmrReset(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(pTmq->refId), tmqMgmt.timer,
&pTmq->epTimer); &pTmq->epTimer);
tqDebugC("reset timer fo tmq ask ep:%d", ret); tqDebugC("reset timer for tmq ask ep:%d", ret);
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
tmq_commit_cb* pCallbackFn = (pTmq->commitCb != NULL) ? pTmq->commitCb : defaultCommitCbFn; tmq_commit_cb* pCallbackFn = (pTmq->commitCb != NULL) ? pTmq->commitCb : defaultCommitCbFn;
asyncCommitAllOffsets(pTmq, pCallbackFn, pTmq->commitCbUserParam); asyncCommitAllOffsets(pTmq, pCallbackFn, pTmq->commitCbUserParam);
@ -1430,7 +1431,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) {
pTmq->autoCommitInterval / 1000.0); pTmq->autoCommitInterval / 1000.0);
bool ret = taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, (void*)(pTmq->refId), tmqMgmt.timer, bool ret = taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, (void*)(pTmq->refId), tmqMgmt.timer,
&pTmq->commitTimer); &pTmq->commitTimer);
tqDebugC("reset timer fo commit:%d", ret); tqDebugC("reset timer for commit:%d", ret);
} else { } else {
tqErrorC("consumer:0x%" PRIx64 " invalid task type:%d", pTmq->consumerId, *pTaskType); tqErrorC("consumer:0x%" PRIx64 " invalid task type:%d", pTmq->consumerId, *pTaskType);
} }
@ -2868,8 +2869,7 @@ int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pRes
pRspObj->resInfo.precision = precision; pRspObj->resInfo.precision = precision;
pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows; pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows;
int32_t code = setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols, int32_t code = setResultDataPtr(&pRspObj->resInfo, convertUcs4);
pRspObj->resInfo.numOfRows, convertUcs4);
if (code != 0) { if (code != 0) {
return code; return code;
} }

View File

@ -68,6 +68,15 @@ TEST(testCase, smlParseInfluxString_Test) {
taosArrayDestroy(elements.colArray); taosArrayDestroy(elements.colArray);
elements.colArray = nullptr; elements.colArray = nullptr;
// case 0 false
tmp = "st,t1=3 c3=\"";
(void)memcpy(sql, tmp, strlen(tmp) + 1);
(void)memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(info, sql, sql + strlen(sql), &elements);
ASSERT_NE(ret, 0);
taosArrayDestroy(elements.colArray);
elements.colArray = nullptr;
// case 2 false // case 2 false
tmp = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000"; tmp = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
(void)memcpy(sql, tmp, strlen(tmp) + 1); (void)memcpy(sql, tmp, strlen(tmp) + 1);
@ -591,6 +600,104 @@ TEST(testCase, smlParseTelnetLine_Test) {
// smlDestroyInfo(info); // smlDestroyInfo(info);
//} //}
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg) {
const char *pVal = kvVal->value;
int32_t len = kvVal->length;
char *endptr = NULL;
double result = taosStr2Double(pVal, &endptr);
if (pVal == endptr) {
smlBuildInvalidDataMsg(msg, "invalid data", pVal);
return false;
}
int32_t left = len - (endptr - pVal);
if (left == 0 || (left == 3 && strncasecmp(endptr, "f64", left) == 0)) {
kvVal->type = TSDB_DATA_TYPE_DOUBLE;
kvVal->d = result;
} else if ((left == 3 && strncasecmp(endptr, "f32", left) == 0)) {
if (!IS_VALID_FLOAT(result)) {
smlBuildInvalidDataMsg(msg, "float out of range[-3.402823466e+38,3.402823466e+38]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_FLOAT;
kvVal->f = (float)result;
} else if ((left == 1 && *endptr == 'i') || (left == 3 && strncasecmp(endptr, "i64", left) == 0)) {
if (smlDoubleToInt64OverFlow(result)) {
errno = 0;
int64_t tmp = taosStr2Int64(pVal, &endptr, 10);
if (errno == ERANGE) {
smlBuildInvalidDataMsg(msg, "big int out of range[-9223372036854775808,9223372036854775807]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_BIGINT;
kvVal->i = tmp;
return true;
}
kvVal->type = TSDB_DATA_TYPE_BIGINT;
kvVal->i = (int64_t)result;
} else if ((left == 1 && *endptr == 'u') || (left == 3 && strncasecmp(endptr, "u64", left) == 0)) {
if (result >= (double)UINT64_MAX || result < 0) {
errno = 0;
uint64_t tmp = taosStr2UInt64(pVal, &endptr, 10);
if (errno == ERANGE || result < 0) {
smlBuildInvalidDataMsg(msg, "unsigned big int out of range[0,18446744073709551615]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
kvVal->u = tmp;
return true;
}
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
kvVal->u = result;
} else if (left == 3 && strncasecmp(endptr, "i32", left) == 0) {
if (!IS_VALID_INT(result)) {
smlBuildInvalidDataMsg(msg, "int out of range[-2147483648,2147483647]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_INT;
kvVal->i = result;
} else if (left == 3 && strncasecmp(endptr, "u32", left) == 0) {
if (!IS_VALID_UINT(result)) {
smlBuildInvalidDataMsg(msg, "unsigned int out of range[0,4294967295]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_UINT;
kvVal->u = result;
} else if (left == 3 && strncasecmp(endptr, "i16", left) == 0) {
if (!IS_VALID_SMALLINT(result)) {
smlBuildInvalidDataMsg(msg, "small int our of range[-32768,32767]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_SMALLINT;
kvVal->i = result;
} else if (left == 3 && strncasecmp(endptr, "u16", left) == 0) {
if (!IS_VALID_USMALLINT(result)) {
smlBuildInvalidDataMsg(msg, "unsigned small int out of rang[0,65535]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_USMALLINT;
kvVal->u = result;
} else if (left == 2 && strncasecmp(endptr, "i8", left) == 0) {
if (!IS_VALID_TINYINT(result)) {
smlBuildInvalidDataMsg(msg, "tiny int out of range[-128,127]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_TINYINT;
kvVal->i = result;
} else if (left == 2 && strncasecmp(endptr, "u8", left) == 0) {
if (!IS_VALID_UTINYINT(result)) {
smlBuildInvalidDataMsg(msg, "unsigned tiny int out of range[0,255]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_UTINYINT;
kvVal->u = result;
} else {
smlBuildInvalidDataMsg(msg, "invalid data", pVal);
return false;
}
return true;
}
TEST(testCase, smlParseNumber_performance_Test) { TEST(testCase, smlParseNumber_performance_Test) {
char msg[256] = {0}; char msg[256] = {0};
SSmlMsgBuf msgBuf; SSmlMsgBuf msgBuf;

View File

@ -118,7 +118,7 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, {.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
{.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "keep_time_offset", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "keep_time_offset", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "s3_chunksize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "s3_chunkpages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "s3_keeplocal", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "s3_keeplocal", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "s3_compact", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, {.name = "s3_compact", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
{.name = "with_arbitrator", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, {.name = "with_arbitrator", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
@ -165,8 +165,8 @@ static const SSysDbTableSchema userStbsSchema[] = {
static const SSysDbTableSchema streamSchema[] = { static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "stream_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "stream_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "stream_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "history_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "history_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
@ -190,9 +190,9 @@ static const SSysDbTableSchema streamTaskSchema[] = {
{.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, {.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "process_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "process_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "process_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "process_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "out_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "out_total", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "out_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "out_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "dispatch_throughput", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "dispatch_throughput", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "dispatch_total", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "dispatch_total", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},

View File

@ -18,6 +18,7 @@
#include "tcompare.h" #include "tcompare.h"
#include "tlog.h" #include "tlog.h"
#include "tname.h" #include "tname.h"
#include "tglobal.h"
#define MALLOC_ALIGN_BYTES 32 #define MALLOC_ALIGN_BYTES 32
@ -86,8 +87,18 @@ int32_t getJsonValueLen(const char* data) {
return dataLen; return dataLen;
} }
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { static int32_t getDataLen(int32_t type, const char* pData) {
if (isNull || pData == NULL) { int32_t dataLen = 0;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen = getJsonValueLen(pData);
} else {
dataLen = varDataTLen(pData);
}
return dataLen;
}
static int32_t colDataSetValHelp(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
if (isNull || pData == NULL) {
// There is a placehold for each NULL value of binary or nchar type. // There is a placehold for each NULL value of binary or nchar type.
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
pColumnInfoData->varmeta.offset[rowIndex] = -1; // it is a null value of VAR type. pColumnInfoData->varmeta.offset[rowIndex] = -1; // it is a null value of VAR type.
@ -101,11 +112,9 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
int32_t type = pColumnInfoData->info.type; int32_t type = pColumnInfoData->info.type;
if (IS_VAR_DATA_TYPE(type)) { if (IS_VAR_DATA_TYPE(type)) {
int32_t dataLen = 0; int32_t dataLen = getDataLen(type, pData);
if (type == TSDB_DATA_TYPE_JSON) { if (pColumnInfoData->varmeta.offset[rowIndex] > 0) {
dataLen = getJsonValueLen(pData); pColumnInfoData->varmeta.length = pColumnInfoData->varmeta.offset[rowIndex];
} else {
dataLen = varDataTLen(pData);
} }
SVarColAttr* pAttr = &pColumnInfoData->varmeta; SVarColAttr* pAttr = &pColumnInfoData->varmeta;
@ -134,7 +143,7 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
uint32_t len = pColumnInfoData->varmeta.length; uint32_t len = pColumnInfoData->varmeta.length;
pColumnInfoData->varmeta.offset[rowIndex] = len; pColumnInfoData->varmeta.offset[rowIndex] = len;
(void) memmove(pColumnInfoData->pData + len, pData, dataLen); (void)memmove(pColumnInfoData->pData + len, pData, dataLen);
pColumnInfoData->varmeta.length += dataLen; pColumnInfoData->varmeta.length += dataLen;
} else { } else {
memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * rowIndex, pData, pColumnInfoData->info.bytes); memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * rowIndex, pData, pColumnInfoData->info.bytes);
@ -144,6 +153,18 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
return 0; return 0;
} }
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
pColumnInfoData->varmeta.offset[rowIndex] = -1;
}
return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull);
}
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) {
return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull);
}
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx,
const char* pData) { const char* pData) {
int32_t type = pColumnInfoData->info.type; int32_t type = pColumnInfoData->info.type;
@ -3041,8 +3062,12 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
} }
// return length of encoded data, return -1 if failed // return length of encoded data, return -1 if failed
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) {
blockDataCheck(pBlock, false); int32_t code = blockDataCheck(pBlock);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return -1;
}
int32_t dataLen = 0; int32_t dataLen = 0;
@ -3106,9 +3131,11 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
size_t metaSize = 0; size_t metaSize = 0;
if (IS_VAR_DATA_TYPE(pColRes->info.type)) { if (IS_VAR_DATA_TYPE(pColRes->info.type)) {
metaSize = numOfRows * sizeof(int32_t); metaSize = numOfRows * sizeof(int32_t);
if(dataLen + metaSize > dataBuflen) goto _exit;
memcpy(data, pColRes->varmeta.offset, metaSize); memcpy(data, pColRes->varmeta.offset, metaSize);
} else { } else {
metaSize = BitmapLen(numOfRows); metaSize = BitmapLen(numOfRows);
if(dataLen + metaSize > dataBuflen) goto _exit;
memcpy(data, pColRes->nullbitmap, metaSize); memcpy(data, pColRes->nullbitmap, metaSize);
} }
@ -3127,12 +3154,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
} }
colSizes[col] += colSize; colSizes[col] += colSize;
dataLen += colSize; dataLen += colSize;
if(dataLen > dataBuflen) goto _exit;
(void) memmove(data, pColData, colSize); (void) memmove(data, pColData, colSize);
data += colSize; data += colSize;
} }
} else { } else {
colSizes[col] = colDataGetLength(pColRes, numOfRows); colSizes[col] = colDataGetLength(pColRes, numOfRows);
dataLen += colSizes[col]; dataLen += colSizes[col];
if(dataLen > dataBuflen) goto _exit;
if (pColRes->pData != NULL) { if (pColRes->pData != NULL) {
(void) memmove(data, pColRes->pData, colSizes[col]); (void) memmove(data, pColRes->pData, colSizes[col]);
} }
@ -3156,7 +3185,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
*actualLen = dataLen; *actualLen = dataLen;
*groupId = pBlock->info.id.groupId; *groupId = pBlock->info.id.groupId;
if (dataLen > dataBuflen) goto _exit;
return dataLen; return dataLen;
_exit:
uError("blockEncode dataLen:%d, dataBuflen:%zu", dataLen, dataBuflen);
terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
return -1;
} }
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos) { int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos) {
@ -3286,9 +3322,13 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos
*pEndPos = pStart; *pEndPos = pStart;
blockDataCheck(pBlock, false); code = blockDataCheck(pBlock);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return code;
}
return code; return TSDB_CODE_SUCCESS;
} }
int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) { int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) {
@ -3498,20 +3538,19 @@ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
return nextRowIdx; return nextRowIdx;
} }
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { #define BLOCK_DATA_CHECK_TRESSA(o) \
return; if (!(o)) { \
uError("blockDataCheck failed! line:%d", __LINE__); \
if (NULL == pDataBlock || pDataBlock->info.rows == 0) { return TSDB_CODE_INTERNAL_ERROR; \
return; }
int32_t blockDataCheck(const SSDataBlock* pDataBlock) {
if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER || NULL == pDataBlock || pDataBlock->info.rows == 0) {
return TSDB_CODE_SUCCESS;
} }
#define BLOCK_DATA_CHECK_TRESSA(o) ;
//#define BLOCK_DATA_CHECK_TRESSA(o) A S S E R T(o)
BLOCK_DATA_CHECK_TRESSA(pDataBlock->info.rows > 0); BLOCK_DATA_CHECK_TRESSA(pDataBlock->info.rows > 0);
if (!pDataBlock->info.dataLoad) {
if (!pDataBlock->info.dataLoad && !forceChk) { return TSDB_CODE_SUCCESS;
return;
} }
bool isVarType = false; bool isVarType = false;
@ -3522,8 +3561,10 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
for (int32_t i = 0; i < colNum; ++i) { for (int32_t i = 0; i < colNum; ++i) {
SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i); SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i);
BLOCK_DATA_CHECK_TRESSA(pCol != NULL);
isVarType = IS_VAR_DATA_TYPE(pCol->info.type); isVarType = IS_VAR_DATA_TYPE(pCol->info.type);
checkRows = pDataBlock->info.rows; checkRows = pDataBlock->info.rows;
if (pCol->info.noData == true) continue;
if (isVarType) { if (isVarType) {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset);
@ -3531,25 +3572,37 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
BLOCK_DATA_CHECK_TRESSA(pCol->nullbitmap); BLOCK_DATA_CHECK_TRESSA(pCol->nullbitmap);
} }
nextPos = 0; nextPos = -1;
for (int64_t r = 0; r < checkRows; ++r) { for (int64_t r = 0; r < checkRows; ++r) {
if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break;
if (!colDataIsNull_s(pCol, r)) { if (!colDataIsNull_s(pCol, r)) {
BLOCK_DATA_CHECK_TRESSA(pCol->pData); BLOCK_DATA_CHECK_TRESSA(pCol->pData);
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen);
if (isVarType) { if (isVarType) {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.allocLen > 0); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.allocLen > 0);
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] < pCol->varmeta.length); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] <= pCol->varmeta.length);
if (pCol->reassigned) { if (pCol->reassigned) {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0);
} else if (0 == r) { } else if (0 == r || nextPos == -1) {
nextPos = pCol->varmeta.offset[r]; nextPos = pCol->varmeta.offset[r];
} else { } else {
BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos);
} }
colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]); char* pColData = pCol->pData + pCol->varmeta.offset[r];
BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); int32_t colSize = 0;
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
colLen = getJsonValueLen(pColData);
} else {
colLen = varDataTLen(pColData);
}
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
BLOCK_DATA_CHECK_TRESSA(colLen >= CHAR_BYTES);
} else {
BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE);
}
BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes); BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes);
if (pCol->reassigned) { if (pCol->reassigned) {
@ -3561,13 +3614,21 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) {
typeValue = *(char*)(pCol->pData + pCol->varmeta.offset[r] + colLen - 1); typeValue = *(char*)(pCol->pData + pCol->varmeta.offset[r] + colLen - 1);
} else { } else {
GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r)); if (TSDB_DATA_TYPE_FLOAT == pCol->info.type) {
float v = 0;
GET_TYPED_DATA(v, float, pCol->info.type, colDataGetNumData(pCol, r));
} else if (TSDB_DATA_TYPE_DOUBLE == pCol->info.type) {
double v = 0;
GET_TYPED_DATA(v, double, pCol->info.type, colDataGetNumData(pCol, r));
} else {
GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r));
}
} }
} }
} }
} }
return; return TSDB_CODE_SUCCESS;
} }

View File

@ -119,6 +119,7 @@ bool tsMonitorForceV2 = true;
// audit // audit
bool tsEnableAudit = true; bool tsEnableAudit = true;
bool tsEnableAuditCreateTable = true; bool tsEnableAuditCreateTable = true;
bool tsEnableAuditDelete = true;
int32_t tsAuditInterval = 5000; int32_t tsAuditInterval = 5000;
// telem // telem
@ -137,8 +138,9 @@ bool tsEnableCrashReport = false;
#else #else
bool tsEnableCrashReport = true; bool tsEnableCrashReport = true;
#endif #endif
char *tsClientCrashReportUri = "/ccrashreport"; char *tsClientCrashReportUri = "/ccrashreport";
char *tsSvrCrashReportUri = "/dcrashreport"; char *tsSvrCrashReportUri = "/dcrashreport";
int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NORMAL;
// schemaless // schemaless
bool tsSmlDot2Underline = true; bool tsSmlDot2Underline = true;
@ -287,7 +289,7 @@ int32_t tsTtlUnit = 86400;
int32_t tsTtlPushIntervalSec = 10; int32_t tsTtlPushIntervalSec = 10;
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
int32_t tsS3MigrateIntervalSec = 60 * 60; // interval of s3migrate db in all vgroups int32_t tsS3MigrateIntervalSec = 60 * 60; // interval of s3migrate db in all vgroups
bool tsS3MigrateEnabled = 1; bool tsS3MigrateEnabled = 0;
int32_t tsGrantHBInterval = 60; int32_t tsGrantHBInterval = 60;
int32_t tsUptimeInterval = 300; // seconds int32_t tsUptimeInterval = 300; // seconds
char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits
@ -548,7 +550,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebug", tqClientDebug, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@ -610,6 +612,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN( TAOS_CHECK_RETURN(
cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
tsNumOfRpcThreads = tsNumOfCores / 2; tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS);
@ -675,10 +678,10 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", td_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", td_compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", td_gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", td_buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_RETURN(TSDB_CODE_SUCCESS); TAOS_RETURN(TSDB_CODE_SUCCESS);
} }
@ -777,6 +780,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableAuditDelete", tsEnableAuditDelete, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
@ -1305,6 +1309,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark"); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark");
tsmaDataDeleteMark = pItem->i32; tsmaDataDeleteMark = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel");
tsSafetyCheckLevel = pItem->i32;
TAOS_RETURN(TSDB_CODE_SUCCESS); TAOS_RETURN(TSDB_CODE_SUCCESS);
} }
@ -1490,6 +1497,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditCreateTable"); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditCreateTable");
tsEnableAuditCreateTable = pItem->bval; tsEnableAuditCreateTable = pItem->bval;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableAuditDelete");
tsEnableAuditDelete = pItem->bval;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditInterval"); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditInterval");
tsAuditInterval = pItem->i32; tsAuditInterval = pItem->i32;
@ -1646,6 +1656,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval"); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval");
tsStreamCheckpointInterval = pItem->i32; tsStreamCheckpointInterval = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint");
tsMaxConcurrentCheckpoint = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate"); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate");
tsSinkDataRate = pItem->fval; tsSinkDataRate = pItem->fval;
@ -2000,7 +2013,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag},
{"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag},
{"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag},
{"tqClientDebug", &tqClientDebug}, {"tqClientDebugFlag", &tqClientDebugFlag},
}; };
static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, static OptionNameAndVar options[] = {{"audit", &tsEnableAudit},
@ -2046,7 +2059,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"s3UploadDelaySec", &tsS3UploadDelaySec}, {"s3UploadDelaySec", &tsS3UploadDelaySec},
{"supportVnodes", &tsNumOfSupportVnodes}, {"supportVnodes", &tsNumOfSupportVnodes},
{"experimental", &tsExperimental}, {"experimental", &tsExperimental},
{"maxTsmaNum", &tsMaxTsmaNum}}; {"maxTsmaNum", &tsMaxTsmaNum},
{"safetyCheckLevel", &tsSafetyCheckLevel}};
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
code = taosCfgSetOption(options, tListLen(options), pItem, false); code = taosCfgSetOption(options, tListLen(options), pItem, false);
@ -2302,7 +2316,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
{"experimental", &tsExperimental}, {"experimental", &tsExperimental},
{"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags}, {"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags},
{"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay}, {"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay},
{"tsmaDataDeleteMark", &tsmaDataDeleteMark}}; {"tsmaDataDeleteMark", &tsmaDataDeleteMark},
{"safetyCheckLevel", &tsSafetyCheckLevel}};
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
code = taosCfgSetOption(options, tListLen(options), pItem, false); code = taosCfgSetOption(options, tListLen(options), pItem, false);

View File

@ -221,10 +221,9 @@ int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t
} }
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit);
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit);

View File

@ -567,6 +567,7 @@ int32_t tSerializeSClientHbBatchRsp(void *buf, int32_t bufLen, const SClientHbBa
TAOS_CHECK_EXIT(tSerializeSClientHbRsp(&encoder, pRsp)); TAOS_CHECK_EXIT(tSerializeSClientHbRsp(&encoder, pRsp));
} }
TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pBatchRsp->monitorParas)); TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pBatchRsp->monitorParas));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pBatchRsp->enableAuditDelete));
tEndEncode(&encoder); tEndEncode(&encoder);
_exit: _exit:
@ -609,6 +610,12 @@ int32_t tDeserializeSClientHbBatchRsp(void *buf, int32_t bufLen, SClientHbBatchR
TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pBatchRsp->monitorParas)); TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pBatchRsp->monitorParas));
} }
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pBatchRsp->enableAuditDelete));
} else {
pBatchRsp->enableAuditDelete = 0;
}
tEndDecode(&decoder); tEndDecode(&decoder);
_exit: _exit:
@ -1813,6 +1820,60 @@ _exit:
void tFreeSDropUserReq(SDropUserReq *pReq) { FREESQL(); } void tFreeSDropUserReq(SDropUserReq *pReq) { FREESQL(); }
int32_t tSerializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) {
SEncoder encoder = {0};
int32_t code = 0;
int32_t lino;
int32_t tlen;
tEncoderInit(&encoder, buf, bufLen);
TAOS_CHECK_EXIT(tStartEncode(&encoder));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->operation));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->db));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->table));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->sqlLen));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->pSql));
tEndEncode(&encoder);
_exit:
if (code) {
tlen = code;
} else {
tlen = encoder.pos;
}
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) {
SDecoder decoder = {0};
int32_t code = 0;
int32_t lino;
tDecoderInit(&decoder, buf, bufLen);
TAOS_CHECK_EXIT(tStartDecode(&decoder));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->operation));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->db));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->table));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->sqlLen));
if (pReq->sqlLen > 0) {
pReq->pSql = taosMemoryMalloc(pReq->sqlLen + 1);
if (pReq->pSql == NULL) {
TAOS_CHECK_EXIT(terrno);
}
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->pSql));
}
tEndDecode(&decoder);
_exit:
tDecoderClear(&decoder);
return code;
}
void tFreeSAuditReq(SAuditReq *pReq) { taosMemoryFreeClear(pReq->pSql); }
SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) { SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) {
if (pIpWhiteList == NULL) return NULL; if (pIpWhiteList == NULL) return NULL;
@ -3874,6 +3935,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->s3ChunkSize)); TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->s3ChunkSize));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->s3KeepLocal)); TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->s3KeepLocal));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->s3Compact)); TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->s3Compact));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->dnodeListStr));
tEndEncode(&encoder); tEndEncode(&encoder);
@ -3962,6 +4024,10 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->s3Compact)); TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->s3Compact));
} }
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->dnodeListStr));
}
tEndDecode(&decoder); tEndDecode(&decoder);
_exit: _exit:
@ -6289,6 +6355,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->authVer)); TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->authVer));
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pRsp->whiteListVer)); TAOS_CHECK_EXIT(tEncodeI64(&encoder, pRsp->whiteListVer));
TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pRsp->monitorParas)); TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pRsp->monitorParas));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pRsp->enableAuditDelete));
tEndEncode(&encoder); tEndEncode(&encoder);
_exit: _exit:
@ -6340,6 +6407,11 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (!tDecodeIsEnd(&decoder)) { if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pRsp->monitorParas)); TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pRsp->monitorParas));
} }
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pRsp->enableAuditDelete));
} else {
pRsp->enableAuditDelete = 0;
}
tEndDecode(&decoder); tEndDecode(&decoder);
_exit: _exit:
@ -10983,6 +11055,7 @@ _exit:
int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp)); TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp));
TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime)); TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime));
return 0; return 0;
} }
@ -11094,6 +11167,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen)); TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen));
} }
} }
_exit: _exit:
return code; return code;
} }

View File

@ -297,12 +297,13 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) {
static void dmGenerateGrant() { mndGenerateMachineCode(); } static void dmGenerateGrant() { mndGenerateMachineCode(); }
static void dmPrintVersion() { static void dmPrintVersion() {
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version); printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, td_version,
printf("git: %s\n", gitinfo); td_compatible_version);
printf("git: %s\n", td_gitinfo);
#ifdef TD_ENTERPRISE #ifdef TD_ENTERPRISE
printf("gitOfInternal: %s\n", gitinfoOfInternal); printf("gitOfInternal: %s\n", td_gitinfoOfInternal);
#endif #endif
printf("build: %s\n", buildinfo); printf("build: %s\n", td_buildinfo);
} }
static void dmPrintHelp() { static void dmPrintHelp() {

View File

@ -548,8 +548,8 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
} }
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(numOfCols); size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeBufSize;
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
if (pRsp == NULL) { if (pRsp == NULL) {
@ -574,7 +574,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
pStart += sizeof(SSysTableSchema); pStart += sizeof(SSysTableSchema);
} }
int32_t len = blockEncode(pBlock, pStart, numOfCols); int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, numOfCols);
if (len < 0) { if (len < 0) {
dError("failed to retrieve data since %s", tstrerror(code)); dError("failed to retrieve data since %s", tstrerror(code));
blockDataDestroy(pBlock); blockDataDestroy(pBlock);

View File

@ -212,6 +212,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_AUDIT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;

View File

@ -1,5 +1,10 @@
aux_source_directory(src MGMT_SNODE) aux_source_directory(src MGMT_SNODE)
add_library(mgmt_snode STATIC ${MGMT_SNODE}) add_library(mgmt_snode STATIC ${MGMT_SNODE})
if(${TD_DARWIN})
target_compile_options(mgmt_snode PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories( target_include_directories(
mgmt_snode mgmt_snode
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"

View File

@ -1,5 +1,10 @@
aux_source_directory(src MGMT_VNODE) aux_source_directory(src MGMT_VNODE)
add_library(mgmt_vnode STATIC ${MGMT_VNODE}) add_library(mgmt_vnode STATIC ${MGMT_VNODE})
if(${TD_DARWIN})
target_compile_options(mgmt_vnode PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories( target_include_directories(
mgmt_vnode mgmt_vnode
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"

View File

@ -741,7 +741,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
int32_t vgId = alterReq.vgId; int32_t vgId = alterReq.vgId;
dInfo( dInfo(
"vgId:%d,vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d " "vgId:%d, vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
"learnerSelfIndex:%d strict:%d changeVersion:%d", "learnerSelfIndex:%d strict:%d changeVersion:%d",
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica, vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
alterReq.learnerSelfIndex, alterReq.strict, alterReq.changeVersion); alterReq.learnerSelfIndex, alterReq.strict, alterReq.changeVersion);

View File

@ -30,9 +30,18 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet); dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
if (epSet.numOfEps <= 1) { if (epSet.numOfEps <= 1) {
pMsg->pCont = NULL; if (epSet.numOfEps == 0) {
pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; pMsg->pCont = NULL;
return; pMsg->code = TSDB_CODE_MNODE_NOT_FOUND;
return;
}
// dnode is not the mnode or mnode leader and This ensures that the function correctly handles cases where the
// dnode cannot obtain a valid epSet and avoids returning an incorrect or misleading epSet.
if (strcmp(epSet.eps[0].fqdn, tsLocalFqdn) == 0 && epSet.eps[0].port == tsServerPort) {
pMsg->pCont = NULL;
pMsg->code = TSDB_CODE_MNODE_NOT_FOUND;
return;
}
} }
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
@ -129,9 +138,9 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId); pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId);
int32_t svrVer = 0; int32_t svrVer = 0;
code = taosVersionStrToInt(version, &svrVer); code = taosVersionStrToInt(td_version, &svrVer);
if (code != 0) { if (code != 0) {
dError("failed to convert version string:%s to int, code:%d", version, code); dError("failed to convert version string:%s to int, code:%d", td_version, code);
goto _OVER; goto _OVER;
} }
if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) { if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) {
@ -425,8 +434,8 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.startReadTimer = 1; rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout; rpcInit.readTimeout = tsReadTimeout;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", version); dError("failed to convert version string:%s to int", td_version);
} }
pTrans->clientRpc = rpcOpen(&rpcInit); pTrans->clientRpc = rpcOpen(&rpcInit);
@ -474,8 +483,8 @@ int32_t dmInitStatusClient(SDnode *pDnode) {
rpcInit.startReadTimer = 0; rpcInit.startReadTimer = 0;
rpcInit.readTimeout = 0; rpcInit.readTimeout = 0;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", version); dError("failed to convert version string:%s to int", td_version);
} }
pTrans->statusRpc = rpcOpen(&rpcInit); pTrans->statusRpc = rpcOpen(&rpcInit);
@ -524,8 +533,8 @@ int32_t dmInitSyncClient(SDnode *pDnode) {
rpcInit.startReadTimer = 1; rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout; rpcInit.readTimeout = tsReadTimeout;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", version); dError("failed to convert version string:%s to int", td_version);
} }
pTrans->syncRpc = rpcOpen(&rpcInit); pTrans->syncRpc = rpcOpen(&rpcInit);
@ -579,8 +588,8 @@ int32_t dmInitServer(SDnode *pDnode) {
rpcInit.compressSize = tsCompressMsgSize; rpcInit.compressSize = tsCompressMsgSize;
rpcInit.shareConnLimit = tsShareConnLimit * 16; rpcInit.shareConnLimit = tsShareConnLimit * 16;
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
dError("failed to convert version string:%s to int", version); dError("failed to convert version string:%s to int", td_version);
} }
pTrans->serverRpc = rpcOpen(&rpcInit); pTrans->serverRpc = rpcOpen(&rpcInit);

View File

@ -54,7 +54,7 @@ void TestClient::DoInit() {
rpcInit.parent = this; rpcInit.parent = this;
// rpcInit.secret = (char*)secretEncrypt; // rpcInit.secret = (char*)secretEncrypt;
// rpcInit.spi = 1; // rpcInit.spi = 1;
taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
clientRpc = rpcOpen(&rpcInit); clientRpc = rpcOpen(&rpcInit);
ASSERT(clientRpc); ASSERT(clientRpc);

View File

@ -47,6 +47,15 @@ bool mndUpdateArbGroupBySetAssignedLeader(SArbGroup *pGroup, int32_t vgId, char
int32_t mndGetArbGroupSize(SMnode *pMnode); int32_t mndGetArbGroupSize(SMnode *pMnode);
typedef enum {
CHECK_SYNC_NONE = 0,
CHECK_SYNC_SET_ASSIGNED_LEADER = 1,
CHECK_SYNC_CHECK_SYNC = 2,
CHECK_SYNC_UPDATE = 3
} ECheckSyncOp;
void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -37,6 +37,7 @@ const char *mndGetDbStr(const char *src);
const char *mndGetStableStr(const char *src); const char *mndGetStableStr(const char *src);
int32_t mndProcessCompactDbReq(SRpcMsg *pReq); int32_t mndProcessCompactDbReq(SRpcMsg *pReq);
int32_t mndCheckDbDnodeList(SMnode *pMnode, char *db, char *dnodeListStr, SArray *dnodeList);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -133,6 +133,7 @@ int32_t mndStreamSetUpdateEpsetAction(SMnode *pMnode, SStreamObj *pStream, SVgr
int32_t mndGetStreamObj(SMnode *pMnode, int64_t streamId, SStreamObj** pStream); int32_t mndGetStreamObj(SMnode *pMnode, int64_t streamId, SStreamObj** pStream);
bool mndStreamNodeIsUpdated(SMnode *pMnode); bool mndStreamNodeIsUpdated(SMnode *pMnode);
int32_t mndCheckForSnode(SMnode *pMnode, SDbObj *pSrcDb);
int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId); int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId);
int32_t mndProcessStreamHb(SRpcMsg *pReq); int32_t mndProcessStreamHb(SRpcMsg *pReq);

View File

@ -35,9 +35,9 @@ void mndSortVnodeGid(SVgObj *pVgroup);
int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId); int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId);
int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup); int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup);
SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId); SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId, SArray *dnodeList);
int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup); int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups); int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups, SArray *dnodeList);
int32_t mndAddNewVgPrepareAction(SMnode *, STrans *pTrans, SVgObj *pVg); int32_t mndAddNewVgPrepareAction(SMnode *, STrans *pTrans, SVgObj *pVg);
int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid); int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid);
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup); int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);

View File

@ -250,6 +250,12 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p
pOld->assignedLeader.acked = pNew->assignedLeader.acked; pOld->assignedLeader.acked = pNew->assignedLeader.acked;
pOld->version++; pOld->version++;
mInfo(
"arbgroup:%d, perform update action. members[0].token:%s, members[1].token:%s, isSync:%d, as-dnodeid:%d, "
"as-token:%s, as-acked:%d, version:%" PRId64,
pOld->vgId, pOld->members[0].state.token, pOld->members[1].state.token, pOld->isSync,
pOld->assignedLeader.dnodeId, pOld->assignedLeader.token, pOld->assignedLeader.acked, pOld->version);
_OVER: _OVER:
(void)taosThreadMutexUnlock(&pOld->mutex); (void)taosThreadMutexUnlock(&pOld->mutex);
@ -577,19 +583,77 @@ static int32_t mndSendArbSetAssignedLeaderReq(SMnode *pMnode, int32_t dnodeId, i
return code; return code;
} }
void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup) {
*pOp = CHECK_SYNC_NONE;
int32_t code = 0;
int32_t vgId = pArbGroup->vgId;
bool member0IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 0, nowMs);
bool member1IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 1, nowMs);
SArbAssignedLeader *pAssignedLeader = &pArbGroup->assignedLeader;
int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId;
// 1. has assigned && no response => send req
if (currentAssignedDnodeId != 0 && pAssignedLeader->acked == false) {
*pOp = CHECK_SYNC_SET_ASSIGNED_LEADER;
return;
}
// 2. both of the two members are timeout => skip
if (member0IsTimeout && member1IsTimeout) {
return;
}
// 3. no member is timeout => check sync
if (member0IsTimeout == false && member1IsTimeout == false) {
// no assigned leader and not sync
if (currentAssignedDnodeId == 0 && !pArbGroup->isSync) {
*pOp = CHECK_SYNC_CHECK_SYNC;
}
return;
}
// 4. one of the members is timeout => set assigned leader
int32_t candidateIndex = member0IsTimeout ? 1 : 0;
SArbGroupMember *pMember = &pArbGroup->members[candidateIndex];
// has assigned leader and dnodeId not match => skip
if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) {
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId,
pMember->info.dnodeId, currentAssignedDnodeId);
return;
}
// not sync => skip
if (pArbGroup->isSync == false) {
if (currentAssignedDnodeId == pMember->info.dnodeId) {
mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
pMember->info.dnodeId);
} else {
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
pMember->info.dnodeId);
}
return;
}
// is sync && no assigned leader => write to sdb
mndArbGroupDupObj(pArbGroup, pNewGroup);
mndArbGroupSetAssignedLeader(pNewGroup, candidateIndex);
*pOp = CHECK_SYNC_UPDATE;
}
static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) { static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
int32_t code = 0; int32_t code = 0, lino = 0;
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb; SSdb *pSdb = pMnode->pSdb;
SArbGroup *pArbGroup = NULL; SArbGroup *pArbGroup = NULL;
SArbGroup arbGroupDup = {0};
void *pIter = NULL; void *pIter = NULL;
SArray *pUpdateArray = NULL;
char arbToken[TSDB_ARB_TOKEN_SIZE]; char arbToken[TSDB_ARB_TOKEN_SIZE];
if ((code = mndGetArbToken(pMnode, arbToken)) != 0) { TAOS_CHECK_EXIT(mndGetArbToken(pMnode, arbToken));
mError("failed to get arb token for arb-check-sync timer");
TAOS_RETURN(code);
}
int64_t term = mndGetTerm(pMnode); int64_t term = mndGetTerm(pMnode);
if (term < 0) { if (term < 0) {
mError("arb failed to get term since %s", terrstr()); mError("arb failed to get term since %s", terrstr());
@ -606,87 +670,63 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) {
return 0; return 0;
} }
SArray *pUpdateArray = taosArrayInit(16, sizeof(SArbGroup));
while (1) { while (1) {
pIter = sdbFetch(pSdb, SDB_ARBGROUP, pIter, (void **)&pArbGroup); pIter = sdbFetch(pSdb, SDB_ARBGROUP, pIter, (void **)&pArbGroup);
if (pIter == NULL) break; if (pIter == NULL) break;
SArbGroup arbGroupDup = {0};
(void)taosThreadMutexLock(&pArbGroup->mutex); (void)taosThreadMutexLock(&pArbGroup->mutex);
mndArbGroupDupObj(pArbGroup, &arbGroupDup); mndArbGroupDupObj(pArbGroup, &arbGroupDup);
(void)taosThreadMutexUnlock(&pArbGroup->mutex); (void)taosThreadMutexUnlock(&pArbGroup->mutex);
int32_t vgId = arbGroupDup.vgId;
bool member0IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 0, nowMs);
bool member1IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 1, nowMs);
SArbAssignedLeader *pAssignedLeader = &arbGroupDup.assignedLeader;
int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId;
// 1. has assigned && is sync && no response => send req
if (currentAssignedDnodeId != 0 && arbGroupDup.isSync == true && pAssignedLeader->acked == false) {
(void)mndSendArbSetAssignedLeaderReq(pMnode, currentAssignedDnodeId, vgId, arbToken, term,
pAssignedLeader->token);
mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, currentAssignedDnodeId);
sdbRelease(pSdb, pArbGroup);
continue;
}
// 2. both of the two members are timeout => skip
if (member0IsTimeout && member1IsTimeout) {
sdbRelease(pSdb, pArbGroup);
continue;
}
// 3. no member is timeout => check sync
if (member0IsTimeout == false && member1IsTimeout == false) {
// no assigned leader and not sync
if (currentAssignedDnodeId == 0 && !arbGroupDup.isSync) {
(void)mndSendArbCheckSyncReq(pMnode, arbGroupDup.vgId, arbToken, term, arbGroupDup.members[0].state.token,
arbGroupDup.members[1].state.token);
}
sdbRelease(pSdb, pArbGroup);
continue;
}
// 4. one of the members is timeout => set assigned leader
int32_t candidateIndex = member0IsTimeout ? 1 : 0;
SArbGroupMember *pMember = &arbGroupDup.members[candidateIndex];
// has assigned leader and dnodeId not match => skip
if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) {
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId,
pMember->info.dnodeId, currentAssignedDnodeId);
sdbRelease(pSdb, pArbGroup);
continue;
}
// not sync => skip
if (arbGroupDup.isSync == false) {
if (currentAssignedDnodeId == pMember->info.dnodeId) {
mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
pMember->info.dnodeId);
} else {
mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId,
pMember->info.dnodeId);
}
sdbRelease(pSdb, pArbGroup);
continue;
}
// is sync && no assigned leader => write to sdb
SArbGroup newGroup = {0};
mndArbGroupDupObj(&arbGroupDup, &newGroup);
mndArbGroupSetAssignedLeader(&newGroup, candidateIndex);
if (taosArrayPush(pUpdateArray, &newGroup) == NULL) {
taosArrayDestroy(pUpdateArray);
return terrno;
}
sdbRelease(pSdb, pArbGroup); sdbRelease(pSdb, pArbGroup);
ECheckSyncOp op = CHECK_SYNC_NONE;
SArbGroup newGroup = {0};
mndArbCheckSync(&arbGroupDup, nowMs, &op, &newGroup);
int32_t vgId = arbGroupDup.vgId;
SArbAssignedLeader *pAssgndLeader = &arbGroupDup.assignedLeader;
int32_t assgndDnodeId = pAssgndLeader->dnodeId;
switch (op) {
case CHECK_SYNC_NONE:
mTrace("vgId:%d, arb skip to send msg by check sync", vgId);
break;
case CHECK_SYNC_SET_ASSIGNED_LEADER:
(void)mndSendArbSetAssignedLeaderReq(pMnode, assgndDnodeId, vgId, arbToken, term, pAssgndLeader->token);
mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, assgndDnodeId);
break;
case CHECK_SYNC_CHECK_SYNC:
(void)mndSendArbCheckSyncReq(pMnode, vgId, arbToken, term, arbGroupDup.members[0].state.token,
arbGroupDup.members[1].state.token);
mInfo("vgId:%d, arb send check sync request", vgId);
break;
case CHECK_SYNC_UPDATE:
if (!pUpdateArray) {
pUpdateArray = taosArrayInit(16, sizeof(SArbGroup));
if (!pUpdateArray) {
TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY);
}
}
if (taosArrayPush(pUpdateArray, &newGroup) == NULL) {
TAOS_CHECK_EXIT(terrno);
}
break;
default:
mError("vgId:%d, arb unknown check sync op:%d", vgId, op);
break;
}
} }
TAOS_CHECK_RETURN(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray)); TAOS_CHECK_EXIT(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray));
_exit:
if (code != 0) {
mError("failed to check sync at line %d since %s", lino, terrstr());
}
taosArrayDestroy(pUpdateArray); taosArrayDestroy(pUpdateArray);
return 0; return 0;

View File

@ -239,12 +239,13 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
MND_TMQ_RETURN_CHECK(mndAcquireConsumer(pMnode, consumerId, &pConsumer)); MND_TMQ_RETURN_CHECK(mndAcquireConsumer(pMnode, consumerId, &pConsumer));
MND_TMQ_RETURN_CHECK(checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user)); MND_TMQ_RETURN_CHECK(checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user));
atomic_store_32(&pConsumer->hbStatus, 0); atomic_store_32(&pConsumer->hbStatus, 0);
mDebug("consumer:0x%" PRIx64 " receive hb pollFlag:%d %d", consumerId, req.pollFlag, pConsumer->pollStatus);
if (req.pollFlag == 1){ if (req.pollFlag == 1){
atomic_store_32(&pConsumer->pollStatus, 0); atomic_store_32(&pConsumer->pollStatus, 0);
} }
storeOffsetRows(pMnode, &req, pConsumer); storeOffsetRows(pMnode, &req, pConsumer);
rsp.debugFlag = tqClientDebug; rsp.debugFlag = tqClientDebugFlag;
code = buildMqHbRsp(pMsg, &rsp); code = buildMqHbRsp(pMsg, &rsp);
END: END:

View File

@ -462,8 +462,8 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
if (pCfg->cacheLast < TSDB_CACHE_MODEL_NONE || pCfg->cacheLast > TSDB_CACHE_MODEL_BOTH) return code; if (pCfg->cacheLast < TSDB_CACHE_MODEL_NONE || pCfg->cacheLast > TSDB_CACHE_MODEL_BOTH) return code;
if (pCfg->hashMethod != 1) return code; if (pCfg->hashMethod != 1) return code;
if (pCfg->replications > mndGetDnodeSize(pMnode)) { if (pCfg->replications > mndGetDnodeSize(pMnode)) {
terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; code = TSDB_CODE_MND_NO_ENOUGH_DNODES;
return code; TAOS_RETURN(code);
} }
if (pCfg->walRetentionPeriod < TSDB_DB_MIN_WAL_RETENTION_PERIOD) return code; if (pCfg->walRetentionPeriod < TSDB_DB_MIN_WAL_RETENTION_PERIOD) return code;
if (pCfg->walRetentionSize < TSDB_DB_MIN_WAL_RETENTION_SIZE) return code; if (pCfg->walRetentionSize < TSDB_DB_MIN_WAL_RETENTION_SIZE) return code;
@ -583,7 +583,7 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE; if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
if (pCfg->s3ChunkSize <= 0) pCfg->s3ChunkSize = TSDB_DEFAULT_S3_CHUNK_SIZE; if (pCfg->s3ChunkSize <= 0) pCfg->s3ChunkSize = TSDB_DEFAULT_S3_CHUNK_SIZE;
if (pCfg->s3KeepLocal <= 0) pCfg->s3KeepLocal = TSDB_DEFAULT_S3_KEEP_LOCAL; if (pCfg->s3KeepLocal <= 0) pCfg->s3KeepLocal = TSDB_DEFAULT_S3_KEEP_LOCAL;
if (pCfg->s3Compact <= 0) pCfg->s3Compact = TSDB_DEFAULT_S3_COMPACT; if (pCfg->s3Compact < 0) pCfg->s3Compact = TSDB_DEFAULT_S3_COMPACT;
if (pCfg->withArbitrator < 0) pCfg->withArbitrator = TSDB_DEFAULT_DB_WITH_ARBITRATOR; if (pCfg->withArbitrator < 0) pCfg->withArbitrator = TSDB_DEFAULT_DB_WITH_ARBITRATOR;
if (pCfg->encryptAlgorithm < 0) pCfg->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; if (pCfg->encryptAlgorithm < 0) pCfg->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO;
} }
@ -746,7 +746,7 @@ static int32_t mndSetCreateDbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj
TAOS_RETURN(code); TAOS_RETURN(code);
} }
static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, SUserObj *pUser) { static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, SUserObj *pUser, SArray *dnodeList) {
int32_t code = 0; int32_t code = 0;
SUserObj newUserObj = {0}; SUserObj newUserObj = {0};
SDbObj dbObj = {0}; SDbObj dbObj = {0};
@ -823,7 +823,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
} }
SVgObj *pVgroups = NULL; SVgObj *pVgroups = NULL;
if ((code = mndAllocVgroup(pMnode, &dbObj, &pVgroups)) != 0) { if ((code = mndAllocVgroup(pMnode, &dbObj, &pVgroups, dnodeList)) != 0) {
mError("db:%s, failed to create, alloc vgroup failed, since %s", pCreate->db, terrstr()); mError("db:%s, failed to create, alloc vgroup failed, since %s", pCreate->db, terrstr());
TAOS_RETURN(code); TAOS_RETURN(code);
} }
@ -925,6 +925,17 @@ _exit:
TAOS_RETURN(code); TAOS_RETURN(code);
} }
#ifndef TD_ENTERPRISE
int32_t mndCheckDbDnodeList(SMnode *pMnode, char *db, char *dnodeListStr, SArray *dnodeList) {
if (dnodeListStr[0] != 0) {
terrno = TSDB_CODE_OPS_NOT_SUPPORT;
return terrno;
} else {
return 0;
}
}
#endif
static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
int32_t code = -1; int32_t code = -1;
@ -932,6 +943,10 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
SDbObj *pDb = NULL; SDbObj *pDb = NULL;
SUserObj *pUser = NULL; SUserObj *pUser = NULL;
SCreateDbReq createReq = {0}; SCreateDbReq createReq = {0};
SArray *dnodeList = NULL;
dnodeList = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(int32_t));
TSDB_CHECK_NULL(dnodeList, code, lino, _OVER, TSDB_CODE_OUT_OF_MEMORY);
TAOS_CHECK_GOTO(tDeserializeSCreateDbReq(pReq->pCont, pReq->contLen, &createReq), NULL, _OVER); TAOS_CHECK_GOTO(tDeserializeSCreateDbReq(pReq->pCont, pReq->contLen, &createReq), NULL, _OVER);
#ifdef WINDOWS #ifdef WINDOWS
@ -975,9 +990,11 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
TAOS_CHECK_GOTO(mndCheckDbEncryptKey(pMnode, &createReq), &lino, _OVER); TAOS_CHECK_GOTO(mndCheckDbEncryptKey(pMnode, &createReq), &lino, _OVER);
TAOS_CHECK_GOTO(mndCheckDbDnodeList(pMnode, createReq.db, createReq.dnodeListStr, dnodeList), &lino, _OVER);
TAOS_CHECK_GOTO(mndAcquireUser(pMnode, pReq->info.conn.user, &pUser), &lino, _OVER); TAOS_CHECK_GOTO(mndAcquireUser(pMnode, pReq->info.conn.user, &pUser), &lino, _OVER);
TAOS_CHECK_GOTO(mndCreateDb(pMnode, pReq, &createReq, pUser), &lino, _OVER); TAOS_CHECK_GOTO(mndCreateDb(pMnode, pReq, &createReq, pUser, dnodeList), &lino, _OVER);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
SName name = {0}; SName name = {0};
@ -994,6 +1011,7 @@ _OVER:
mndReleaseDb(pMnode, pDb); mndReleaseDb(pMnode, pDb);
mndReleaseUser(pMnode, pUser); mndReleaseUser(pMnode, pUser);
tFreeSCreateDbReq(&createReq); tFreeSCreateDbReq(&createReq);
taosArrayDestroy(dnodeList);
TAOS_RETURN(code); TAOS_RETURN(code);
} }
@ -1168,7 +1186,9 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
SSdb *pSdb = pMnode->pSdb; SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL; void *pIter = NULL;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
SArray *pArray = mndBuildDnodesArray(pMnode, 0); SArray *pArray = mndBuildDnodesArray(pMnode, 0, NULL);
TSDB_CHECK_NULL(pArray, code, lino, _err, TSDB_CODE_OUT_OF_MEMORY);
while (1) { while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);

View File

@ -86,6 +86,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq);
static int32_t mndProcessNotifyReq(SRpcMsg *pReq); static int32_t mndProcessNotifyReq(SRpcMsg *pReq);
static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq); static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessStatisReq(SRpcMsg *pReq); static int32_t mndProcessStatisReq(SRpcMsg *pReq);
static int32_t mndProcessAuditReq(SRpcMsg *pReq);
static int32_t mndProcessUpdateDnodeInfoReq(SRpcMsg *pReq); static int32_t mndProcessUpdateDnodeInfoReq(SRpcMsg *pReq);
static int32_t mndProcessCreateEncryptKeyReq(SRpcMsg *pRsp); static int32_t mndProcessCreateEncryptKeyReq(SRpcMsg *pRsp);
static int32_t mndProcessCreateEncryptKeyRsp(SRpcMsg *pRsp); static int32_t mndProcessCreateEncryptKeyRsp(SRpcMsg *pRsp);
@ -125,6 +126,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq); mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq);
mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq); mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq);
mndSetMsgHandle(pMnode, TDMT_MND_STATIS, mndProcessStatisReq); mndSetMsgHandle(pMnode, TDMT_MND_STATIS, mndProcessStatisReq);
mndSetMsgHandle(pMnode, TDMT_MND_AUDIT, mndProcessAuditReq);
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ENCRYPT_KEY, mndProcessCreateEncryptKeyReq); mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ENCRYPT_KEY, mndProcessCreateEncryptKeyReq);
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_ENCRYPT_KEY_RSP, mndProcessCreateEncryptKeyRsp); mndSetMsgHandle(pMnode, TDMT_DND_CREATE_ENCRYPT_KEY_RSP, mndProcessCreateEncryptKeyRsp);
mndSetMsgHandle(pMnode, TDMT_MND_UPDATE_DNODE_INFO, mndProcessUpdateDnodeInfoReq); mndSetMsgHandle(pMnode, TDMT_MND_UPDATE_DNODE_INFO, mndProcessUpdateDnodeInfoReq);
@ -604,6 +606,24 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
return 0; return 0;
} }
static int32_t mndProcessAuditReq(SRpcMsg *pReq) {
mTrace("process audit req:%p", pReq);
if (tsEnableAudit && tsEnableAuditDelete) {
SMnode *pMnode = pReq->info.node;
SAuditReq auditReq = {0};
TAOS_CHECK_RETURN(tDeserializeSAuditReq(pReq->pCont, pReq->contLen, &auditReq));
mDebug("received audit req:%s, %s, %s, %s", auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql);
auditAddRecord(pReq, pMnode->clusterId, auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql,
auditReq.sqlLen);
tFreeSAuditReq(&auditReq);
}
return 0;
}
static int32_t mndUpdateDnodeObj(SMnode *pMnode, SDnodeObj *pDnode) { static int32_t mndUpdateDnodeObj(SMnode *pMnode, SDnodeObj *pDnode) {
int32_t code = 0, lino = 0; int32_t code = 0, lino = 0;
SDnodeInfoReq infoReq = {0}; SDnodeInfoReq infoReq = {0};

View File

@ -1021,7 +1021,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
} }
// cluster info // cluster info
tstrncpy(pClusterInfo->version, version, sizeof(pClusterInfo->version)); tstrncpy(pClusterInfo->version, td_version, sizeof(pClusterInfo->version));
pClusterInfo->monitor_interval = tsMonitorInterval; pClusterInfo->monitor_interval = tsMonitorInterval;
pClusterInfo->connections_total = mndGetNumOfConnections(pMnode); pClusterInfo->connections_total = mndGetNumOfConnections(pMnode);
pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB); pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB);

View File

@ -239,8 +239,8 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
goto _OVER; goto _OVER;
} }
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) { if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, td_version, 3)) != 0) {
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version); mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, td_version);
goto _OVER; goto _OVER;
} }
@ -305,12 +305,13 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen; connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold; connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest; connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
connectRsp.enableAuditDelete = tsEnableAuditDelete;
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
connectRsp.whiteListVer = pUser->ipWhiteListVer; connectRsp.whiteListVer = pUser->ipWhiteListVer;
(void)strcpy(connectRsp.sVer, version); tstrncpy(connectRsp.sVer, td_version, sizeof(connectRsp.sVer));
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, (void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", td_version,
buildinfo, gitinfo); td_buildinfo, td_gitinfo);
mndGetMnodeEpSet(pMnode, &connectRsp.epSet); mndGetMnodeEpSet(pMnode, &connectRsp.epSet);
int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp); int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp);
@ -709,6 +710,7 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) {
tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen; batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope; batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
batchRsp.enableAuditDelete = tsEnableAuditDelete;
int32_t sz = taosArrayGetSize(batchReq.reqs); int32_t sz = taosArrayGetSize(batchReq.reqs);
for (int i = 0; i < sz; i++) { for (int i = 0; i < sz; i++) {
@ -813,7 +815,7 @@ static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
SServerVerRsp rsp = {0}; SServerVerRsp rsp = {0};
tstrncpy(rsp.ver, version, sizeof(rsp.ver)); tstrncpy(rsp.ver, td_version, sizeof(rsp.ver));
int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp); int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp);
if (contLen < 0) { if (contLen < 0) {

View File

@ -242,13 +242,13 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks); SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
SStreamTask* pTask = NULL; SStreamTask* pTask = NULL;
int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, *pTaskList, pStream->conf.fillHistory, int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, 0, *pTaskList, pStream->conf.fillHistory,
pStream->subTableWithoutMd5, &pTask); pStream->subTableWithoutMd5, &pTask);
if (code != 0) { if (code != 0) {
return code; return code;
} }
mDebug("doAddSinkTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); mDebug("doAddSinkTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, isFillhistory);
pTask->info.nodeId = pVgroup->vgId; pTask->info.nodeId = pVgroup->vgId;
pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup); pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup);
@ -356,20 +356,22 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillh
uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid; uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks); SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
int32_t code = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0, int32_t code = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, pStream->conf.trigger,
*pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, pTask); useTriggerParam ? pStream->conf.triggerParam : 0, *pTaskList, pStream->conf.fillHistory,
pStream->subTableWithoutMd5, pTask);
return code; return code;
} }
static void addNewTaskList(SStreamObj* pStream) { static void addNewTaskList(SStreamObj* pStream) {
SArray* pTaskList = taosArrayInit(0, POINTER_BYTES); SArray* pTaskList = taosArrayInit(0, POINTER_BYTES);
if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) { if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) {
mError("failed to put array"); mError("failed to put into array");
} }
if (pStream->conf.fillHistory) { if (pStream->conf.fillHistory) {
pTaskList = taosArrayInit(0, POINTER_BYTES); pTaskList = taosArrayInit(0, POINTER_BYTES);
if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) { if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) {
mError("failed to put array"); mError("failed to put into array");
} }
} }
} }
@ -395,17 +397,18 @@ static void setHTasksId(SStreamObj* pStream) {
} }
static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, int64_t skey, static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, int64_t skey,
SArray* pVerList, SVgObj* pVgroup, bool isFillhistory, bool useTriggerParam) { SArray* pVerList, SVgObj* pVgroup, bool isHistoryTask, bool useTriggerParam) {
SStreamTask* pTask = NULL; SStreamTask* pTask = NULL;
int32_t code = buildSourceTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask); int32_t code = buildSourceTask(pStream, pEpset, isHistoryTask, useTriggerParam, &pTask);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
mDebug("doAddSourceTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); mDebug("doAddSourceTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId,
isHistoryTask);
if (pStream->conf.fillHistory) { if (pStream->conf.fillHistory) {
haltInitialTaskStatus(pTask, plan, isFillhistory); haltInitialTaskStatus(pTask, plan, isHistoryTask);
} }
streamTaskSetDataRange(pTask, skey, pVerList, pVgroup->vgId); streamTaskSetDataRange(pTask, skey, pVerList, pVgroup->vgId);
@ -451,10 +454,12 @@ static SSubplan* getAggSubPlan(const SQueryPlan* pPlan, int index) {
static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset,
int64_t nextWindowSkey, SArray* pVerList, bool useTriggerParam) { int64_t nextWindowSkey, SArray* pVerList, bool useTriggerParam) {
void* pIter = NULL;
int32_t code = 0;
SSdb* pSdb = pMnode->pSdb;
addNewTaskList(pStream); addNewTaskList(pStream);
void* pIter = NULL;
SSdb* pSdb = pMnode->pSdb;
while (1) { while (1) {
SVgObj* pVgroup; SVgObj* pVgroup;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup); pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
@ -467,10 +472,9 @@ static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream
continue; continue;
} }
int code = code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam);
doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam);
if (code != 0) { if (code != 0) {
mError("create stream task, code:%s", tstrerror(code)); mError("failed to create stream task, code:%s", tstrerror(code));
// todo drop the added source tasks. // todo drop the added source tasks.
sdbRelease(pSdb, pVgroup); sdbRelease(pSdb, pVgroup);
@ -502,9 +506,9 @@ static int32_t buildAggTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillhist
uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid; uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks); SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
int32_t code = int32_t code = tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, pStream->conf.trigger,
tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0, useTriggerParam ? pStream->conf.triggerParam : 0, *pTaskList, pStream->conf.fillHistory,
*pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, pAggTask); pStream->subTableWithoutMd5, pAggTask);
return code; return code;
} }
@ -512,19 +516,20 @@ static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan,
SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) { SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) {
int32_t code = 0; int32_t code = 0;
SStreamTask* pTask = NULL; SStreamTask* pTask = NULL;
const char* id = NULL;
code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask); code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
id = pTask->id.idStr;
if (pSnode != NULL) { if (pSnode != NULL) {
code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode); code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode);
mDebug("doAddAggTask taskId:%s, snode id:%d, isFillHistory:%d", pTask->id.idStr, pSnode->id, isFillhistory); mDebug("doAddAggTask taskId:%s, %p snode id:%d, isFillHistory:%d", id, pTask, pSnode->id, isFillhistory);
} else { } else {
code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup); code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup);
mDebug("doAddAggTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); mDebug("doAddAggTask taskId:%s, %p vgId:%d, isFillHistory:%d", id, pTask, pVgroup->vgId, isFillhistory);
} }
return code; return code;
} }
@ -678,7 +683,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) { if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) {
// add extra sink // add extra sink
hasExtraSink = true; hasExtraSink = true;
int32_t code = addSinkTask(pMnode, pStream, pEpset); code = addSinkTask(pMnode, pStream, pEpset);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -692,7 +697,8 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
if (terrno != 0) code = terrno; if (terrno != 0) code = terrno;
TAOS_RETURN(code); TAOS_RETURN(code);
} }
code = addSourceTask(pMnode, plan, pStream, pEpset, skey, pVerList, numOfPlanLevel == 1);
code = addSourceTask(pMnode, plan, pStream, pEpset, skey, pVerList, (numOfPlanLevel == 1));
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }

Some files were not shown because too many files have changed in this diff Show More