other: merge main

This commit is contained in:
Haojun Liao 2024-11-11 15:27:53 +08:00
commit 06c02c14c2
507 changed files with 47862 additions and 14096 deletions

58
.github/workflows/taoskeeper-ci.yml vendored Normal file
View File

@ -0,0 +1,58 @@
name: TaosKeeper CI
on:
push:
paths:
- tools/keeper/**
jobs:
build:
runs-on: ubuntu-latest
name: Run unit tests
steps:
- name: Checkout the repository
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.18
- name: Install system dependencies
run: |
sudo apt update -y
sudo apt install -y build-essential cmake libgeos-dev
- name: Install TDengine
run: |
mkdir debug
cd debug
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false
make -j 4
sudo make install
which taosd
which taosadapter
which taoskeeper
- name: Start taosd
run: |
cp /etc/taos/taos.cfg ./
sudo echo "supportVnodes 256" >> taos.cfg
nohup sudo taosd -c taos.cfg &
- name: Start taosadapter
run: nohup sudo taosadapter &
- name: Run tests with coverage
working-directory: tools/keeper
run: |
go mod tidy
sudo go test -v -ldflags="-X 'github.com/taosdata/taoskeeper/version.IsEnterprise=true'" -coverpkg=./... -coverprofile=coverage.out ./...
go tool cover -func=coverage.out
- name: Clean up
if: always()
run: |
if pgrep taosd; then sudo pkill taosd; fi
if pgrep taosadapter; then sudo pkill taosadapter; fi

4
.gitignore vendored
View File

@ -159,4 +159,6 @@ pcre2.h
zconf.h
version.h
geos_c.h
source/libs/parser/src/sql.c
include/common/ttokenauto.h
!packaging/smokeTest/pytest_require.txt

View File

@ -1,6 +1,7 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE FALSE)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
set(TD_BUILD_KEEPER_INTERNAL FALSE)
# set output directory
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
@ -57,6 +58,19 @@ IF(TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF()
IF("${BUILD_KEEPER}" STREQUAL "")
SET(TD_BUILD_KEEPER FALSE)
ELSEIF(${BUILD_KEEPER} MATCHES "false")
SET(TD_BUILD_KEEPER FALSE)
ELSEIF(${BUILD_KEEPER} MATCHES "true")
SET(TD_BUILD_KEEPER TRUE)
ELSEIF(${BUILD_KEEPER} MATCHES "internal")
SET(TD_BUILD_KEEPER FALSE)
SET(TD_BUILD_KEEPER_INTERNAL TRUE)
ELSE()
SET(TD_BUILD_KEEPER FALSE)
ENDIF()
IF("${BUILD_TOOLS}" STREQUAL "")
IF(TD_LINUX)
IF(TD_ARM_32)

View File

@ -0,0 +1,11 @@
# lemon
ExternalProject_Add(
lemon
SOURCE_DIR ${TD_CONTRIB_DIR}/lemon
CONFIGURE_COMMAND ""
BUILD_COMMAND "${C_COMPILER_LEMON}" -o ${TD_CONTRIB_DIR}/lemon/lemon ${TD_CONTRIB_DIR}/lemon/lemon.c
INSTALL_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_ALWAYS 1
)

View File

@ -184,6 +184,17 @@ if(${BUILD_PCRE2})
cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
find_program(C_COMPILER_LEMON NAMES gcc)
if(C_COMPILER_LEMON)
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
else()
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
endif()
# lemon
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
@ -261,11 +272,19 @@ if(${TD_DARWIN})
endif(${TD_DARWIN})
add_subdirectory(zlib EXCLUDE_FROM_ALL)
if(${TD_DARWIN})
target_compile_options(zlibstatic PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories(
zlibstatic
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib
)
if(${TD_DARWIN})
target_compile_options(zlib PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories(
zlib
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib

View File

@ -1149,7 +1149,7 @@ TOP(expr, k)
UNIQUE(expr)
```
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. For a table with composite primary key, only the data with the smallest primary key value is returned.
**Description**: Return the unique values of this column. The effect is similar to `distinct` keyword. Return the row with the earliest timestamp for duplicate data. For a table with composite primary key, only the data with the smallest primary key value is returned.
**Return value type**:Same as the data type of the column being operated upon

View File

@ -7,8 +7,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
```
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.

View File

@ -4,6 +4,6 @@
"main": "index.js",
"license": "MIT",
"dependencies": {
"@tdengine/websocket": "^3.1.0"
"@tdengine/websocket": "^3.1.1"
}
}

View File

@ -3,7 +3,6 @@ const taos = require("@tdengine/websocket");
let dsn = 'ws://localhost:6041';
async function createConnect() {
try {
let conf = new taos.WSConfig(dsn);
conf.setUser('root');

View File

@ -10,7 +10,6 @@ const groupId = "group1";
const clientId = "client1";
async function createConsumer() {
let groupId = "group1";
let clientId = "client1";
let configMap = new Map([

View File

@ -116,10 +116,11 @@ create stream if not exists count_history_s fill_history 1 into count_history as
### 流计算的触发模式
在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE。
在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE。
1. AT_ONCE写入立即触发。
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)。
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV 、NULL、 NONE、VALUE。
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
@ -228,3 +229,34 @@ RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
```
没有指定 IF EXISTS如果该 stream 不存在,则报错。如果存在,则恢复流计算。指定了 IF EXISTS如果 stream 不存在,则返回成功。如果存在,则恢复流计算。如果指定 IGNORE UNTREATED则恢复流计算时忽略流计算暂停期间写入的数据。
### 流计算升级故障恢复
升级 TDengine 后,如果流计算不兼容,需要删除流计算,然后重新创建流计算。步骤如下:
1.修改 taos.cfg添加 disableStream 1
2.重启 taosd。如果启动失败修改 stream 目录的名称,避免 taosd 启动的时候尝试加载 stream 目录下的流计算数据信息。不使用删除操作避免误操作导致的风险。需要修改的文件夹:$dataDir/vnode/vnode*/tq/stream$dataDir 指 TDengine 存储数据的目录,在 $dataDir/vnode/ 目录下会有多个类似 vnode1 、vnode2...vnode* 的目录,全部需要修改里面的 tq/stream 目录的名字,改为 tq/stream.bk
3.启动 taos
```sql
drop stream xxxx; ---- xxx 指stream name
flush database stream_source_db; ---- 流计算读取数据的超级表所在的 database
flush database stream_dest_db; ---- 流计算写入数据的超级表所在的 database
```
举例:
```sql
create stream streams1 into test1.streamst as select _wstart, count(a) c1 from test.st interval(1s) ;
drop database streams1;
flush database test;
flush database test1;
```
4.关闭 taosd
5.修改 taos.cfg去掉 disableStream 1或将 disableStream 改为 0
6.启动 taosd

View File

@ -14,18 +14,26 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx
## 支持的数据源
目前 TDengine 支持的数据源如下:
目前 TDengine 支持的数据源如下
1. Aveva PI System一个工业数据管理和分析平台前身为 OSIsoft PI System它能够实时采集、整合、分析和可视化工业数据助力企业实现智能化决策和精细化管理
2. Aveva Historian一个工业大数据分析软件前身为 Wonderware Historian专为工业环境设计用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据。
3. OPC DA/UAOPC 是 Open Platform Communications 的缩写是一种开放式、标准化的通信协议用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发旨在解决工业控制领域中不同设备之间互操作性差的问题。OPC 协议最初于 1996 年发布,当时称为 OPC DA Data Access主要用于实时数据采集和控制2006 年OPC 基金会发布了 OPC UA Unified Architecture 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本。
4. MQTTMessage Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。
5. Kafka由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。
6. OpenTSDB基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。
7. CSVComma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。
8. TDengine 2泛指运行 TDengine 2.x 版本的 TDengine 实例。
9. TDengine 3泛指运行 TDengine 3.x 版本的 TDengine 实例。
10. MySQL PostgreSQL Oracle 等关系型数据库。
| 数据源 | 支持版本 | 描述 |
| --- | --- | --- |
| Aveva PI System | PI AF Server Version 2.10.9.593 或以上 | 工业数据管理和分析平台,前身为 OSIsoft PI System它能够实时采集、整合、分析和可视化工业数据助力企业实现智能化决策和精细化管理 |
| Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian专为工业环境设计用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 |
| OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写是一种开放式、标准化的通信协议用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发旨在解决工业控制领域中不同设备之间互操作性差的问题OPC 协议最初于 1996 年发布,当时称为 OPC DA Data Access主要用于实时数据采集和控制。 |
| OPC UA | KeepWare KEPServerEx 6.5 | 2006 年OPC 基金会发布了 OPC UA Unified Architecture 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 |
| MQTT | emqx: 3.0.0 到 5.7.1<br/> hivemq: 4.0.0 到 4.31.0<br/> mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 |
| Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 |
| InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。|
| OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 |
| MySQL | 5.6,5.7,8.0+ | MySQL是最流行的关系型数据库管理系统之一由于其体积小、速度快、总体拥有成本低尤其是开放源码这一特点一般中小型和大型网站的开发都选择 MySQL 作为网站数据库。 |
| Oracle | 11G/12c/19c | Oracle 数据库系统是世界上流行的关系数据库管理系统,系统可移植性好、使用方便、功能强,适用于各类大、中、小微机环境。它是一种高效率的、可靠性好的、适应高吞吐量的数据库方案。 |
| PostgreSQL | v15.0+ | PostgreSQL 是一个功能非常强大的、源代码开放的客户/服务器关系型数据库管理系统, 有很多在大型商业RDBMS中所具有的特性包括事务、子选择、触发器、视图、外键引用完整性和复杂锁定功能。|
| SQL Server | 2012/2022 | Microsoft SQL Server 是一种关系型数据库管理系统,由 Microsoft 公司开发,具有使用方便可伸缩性好与相关软件集成程度高等优点。 |
| MongoDB | 3.6+ | MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。 |
| CSV | - | Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 |
| TDengine 2.x | 2.4 或 2.6+ | TDengine 旧版本,已不再维护,推荐升级到 3.0 最新版本。 |
| TDengine 3.x | 源端版本+ | 使用 TMQ 进行 TDengine 指定从数据库或超级表的订阅。 |
## 数据提取、过滤和转换

View File

@ -0,0 +1,54 @@
---
title: "ARIMA"
sidebar_label: "ARIMA"
---
本节讲述 ARIMA 算法模型的使用方法。
## 功能概述
ARIMA 即自回归移动平均模型Autoregressive Integrated Moving Average, ARIMA也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
ARIMA 模型是一种自回归模型只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
>平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。
- p= 自回归模型阶数
- d= 差分阶数
- q= 移动平均模型阶数
### 参数
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数|说明|必填项|
|---|---|-----|
|period|输入时间序列每个周期包含的数据点个数,如果不设置该参数或该参数设置为 0将使用非季节性/周期性的 ARIMA 模型预测|选填|
|start_p|自回归模型阶数的起始值0 开始的整数,不推荐大于 10|选填|
|max_p|自回归模型阶数的结束值0 开始的整数,不推荐大于 10|选填|
|start_q|移动平均模型阶数的起始值0 开始的整数,不推荐大于 10|选填|
|max_q|移动平均模型阶数的结束值0 开始的整数,不推荐大于 10|选填|
|d|差分阶数|选填|
`start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。
### 示例及结果
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期start_p 起始是 1 最大拟合是 5start_q 是 1最大值是 5预测结果中返回 95% 置信区间范围边界。
```
FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5")
```
```json5
{
"rows": fc_rows, // 返回结果的行数
"period": period, // 返回结果的周期性,同输入
"alpha": alpha, // 返回结果的置信区间,同输入
"algo": "arima", // 返回结果使用的算法
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
"res": res // 列模式的结果
}
```
### 参考文献
- https://en.wikipedia.org/wiki/Autoregressive_moving-average_model
- https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931?fromtitle=ARMA%E6%A8%A1%E5%9E%8B&fromid=8048415

View File

@ -0,0 +1,43 @@
---
title: "HoltWinters"
sidebar_label: "HoltWinters"
---
本节讲述 HoltWinters 算法模型的使用方法。
## 功能概述
HoltWinters 模型又称为多次指数平滑模型EMA。适用于含有线性趋势和周期波动的非平稳序列利用指数平滑法让模型参数不断适应非平稳序列的变化并对未来趋势进行**短期**预测。
HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。
该模型对于返回数据不提供计算的置信区间范围结果,在 95% 置信区间的上下界结果与预测结果相同。
### 参数
分析平台中使用自动化的 HoltWinters 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|参数|说明|必填项|
|---|---|---|
|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0将使用一次简单指数平滑方式进行数据拟合并据此进行未来数据的预测|选填|
|trend|趋势模型使用加法模型还是乘法模型|选填|
|seasonal|季节性采用加法模型还是乘法模型|选填|
参数 `trend``seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。
### 示例及结果
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势采用乘法模型,季节采用乘法模型
```
FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul")
```
```json5
{
"rows": rows, // 返回结果的行数
"period": period, // 返回结果的周期性,该结果与输入的周期性相同,如果没有周期性,该值为 0
"algo": 'holtwinters' // 返回结果使用的计算模型
"mse": mse, // 最小均方误差minmum square error
"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了两列 [timestamp][fc_results]。
}
```
### 参考文献
- https://en.wikipedia.org/wiki/Exponential_smoothing
- https://orangematter.solarwinds.com/2019/12/15/holt-winters-forecasting-simplified/

View File

@ -0,0 +1,46 @@
---
title: "Anomaly-detection"
sidebar_label: "Anomaly-detection"
---
本节讲述异常检测算法模型的使用方法。
## 概述
分析平台提供了 6 种异常检查模型6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。
### 统计学异常检测方法
- k-sigma<sup>[1]</sup>: 即 ***689599.7 rule*** 。***k***值默认为 3即序列均值的 3 倍标准差范围为边界超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点.
|参数|说明|是否必选|默认值|
|---|---|---|---|
|k|标准差倍数|选填|3|
- IQR<sup>[2]</sup>:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1第 1 个四分位数、Q2第 2 个四分位数)和 Q3第 3 个四分位数。IQR 定义为 Q3Q1位于 Q3+1.5。无输入参数。
- Grubbs<sup>[3]</sup>: 又称为 Grubbs' test即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
- SHESD<sup>[4]</sup> 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5%
|参数|说明|是否必选|默认值|
|---|---|---|---|
|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
### 基于数据密度的检测方法
LOF<sup>[5]</sup>: 局部离群因子LOF又叫局部异常因子算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。
### 基于深度学习的检测方法
使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。
### 参考文献
1. https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
2. https://en.wikipedia.org/wiki/Interquartile_range
3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 19. doi:10.1155/2015/708948.
4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017).
5. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93104. doi:10.1145/335191.335388. ISBN 1-58113-217-4.

View File

@ -0,0 +1,170 @@
---
title: "addins"
sidebar_label: "addins"
---
本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。
## 目录结构
![数据分析功能架构图](./pic/dir.png)
|目录|说明|
|---|---|
|taos|Python 源代码目录,其下包含了算法具体保存目录 algo放置杂项目录 misc单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码fc 放置预测算法代码|
|script|是安装脚本和发布脚本放置目录|
|model|放置针对数据集完成的训练模型|
|cfg|配置文件目录|
## 约定与限制
定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
### 类命名规范
算法类的名称需要以下划线开始,以 Service 结尾。例如_KsigmaService 是 KSigma 异常检测算法的实现类。
### 类继承约定
- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
### 类属性初始化
每个算法实现的类需要静态初始化两个类属性,分别是:
- `name`:触发调用的关键词,全小写英文字母
- `desc`:算法的描述信息
### 核心方法输入与输出约定
`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。
异常检测输出结果
`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100] 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
预测输出结果
对于预测算法,`AbstractForecastService` 的对象属性说明如下:
|属性名称|说明|默认值|
|---|---|---|
|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
|start_ts|预测结果的开始时间| 0|
|time_step|预测结果的两个数据点之间时间间隔|0 |
|fc_rows|预测结果的数量| 0 |
|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
|conf|置信区间分位数 0.05|
预测返回结果如下:
```python
return {
"rows": self.fc_rows, # 预测数据行数
"period": self.period, # 数据周期性,同输入
"algo": "holtwinters", # 预测使用的算法
"mse": mse, # 预测算法的 mse
"res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
}
```
## 示例代码
```python
import numpy as np
from service import AbstractAnomalyDetectionService
# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。
class _IqrService(AbstractAnomalyDetectionService):
""" IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """
# 定义算法调用关键词全小写ASCII码(必须添加)
name = 'iqr'
# 该算法的描述信息(建议添加)
desc = """found the anomaly data according to the inter-quartile range"""
def __init__(self):
super().__init__()
def execute(self):
""" execute 是算法实现逻辑的核心实现,直接修改该实现即可 """
# self.list 是输入数值列list 类型,例如:[1,2,3,4,5]。设置 self.list 的方法在父类中已经进行了定义。实现自己的算法,修改该文件即可,以下代码使用自己的实现替换即可。
#lower = np.quantile(self.list, 0.25)
#upper = np.quantile(self.list, 0.75)
#min_val = lower - 1.5 * (upper - lower)
#max_val = upper + 1.5 * (upper - lower)
#threshold = [min_val, max_val]
# 返回值是与输入数值列长度相同的数据列,异常值对应位置是 -1。例如上述输入数据列返回数值列是 [1, 1, 1, 1, -1],表示 [5] 是异常值。
return [-1 if k < threshold[0] or k > threshold[1] else 1 for k in self.list]
def set_params(self, params):
"""该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
pass
```
## 单元测试
在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
```python
def test_iqr(self):
""" 测试 _IqrService 类 """
s = loader.get_service("iqr")
# 设置需要进行检测的输入数据
s.set_input_list(AnomalyDetectionTest.input_list)
# 测试 set_params 的处理逻辑
try:
s.set_params({"k": 2})
except ValueError as e:
self.assertEqual(1, 0)
r = s.execute()
# 绘制异常检测结果
draw_ad_results(AnomalyDetectionTest.input_list, r, "iqr")
# 检查结果
self.assertEqual(r[-1], -1)
self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
```
## 需要模型的算法
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
训练完成后的模型,使用 joblib 进行保存。
并在 model 目录下建立对应的文件夹存放该模型。
保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。
具体的调用方式如下:
```python
def test_autoencoder_ad(self):
# 获取特定的算法服务
s = loader.get_service("ac")
data = self.__load_remote_data_for_ad()
# 设置异常检查的输入数据
s.set_input_list(data)
# 指定调用的模型,该模型是之前针对该数据集进行训练获得
s.set_params({"model": "ad_encoder_keras"})
# 执行检查动作,并返回结果
r = s.execute()
num_of_error = -(sum(filter(lambda x: x == -1, r)))
self.assertEqual(num_of_error, 109)
```

View File

@ -0,0 +1,322 @@
---
sidebar_label: 数据分析
title: 数据分析功能
---
## 概述
ANodeAnalysis Node是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。
ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
下图是数据分析的技术架构示意图。
![数据分析功能架构图](./pic/data-analysis.png)
## 安装部署
### 环境准备
ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip同时请确保能够正常连接互联网。
### 安装及卸载
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。
```bash
tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
cd TDengine-enterprise-anode-1.0.0
sudo ./install.sh
```
卸载 ANode执行命令 `rmtaosanode` 即可。
### 其他
为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。
## 启动及停止服务
安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
```bash
systemctl start taosanoded
systemctl stop taosanoded
systemctl status taosanoded
```
## 目录及配置说明
|目录/文件|说明|
|---------------|------|
|/usr/local/taos/taosanode/bin|可执行文件目录|
|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
|/usr/local/taos/taosanode/lib|库文件目录|
|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
|/var/log/taos/taosanode/|日志文件目录|
|/etc/taos/taosanode.ini|配置文件|
### 配置说明
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下:
```ini
[uwsgi]
# charset
env = LC_ALL = en_US.UTF-8
# ip:port
http = 127.0.0.1:6050
# the local unix socket file than communicate to Nginx
#socket = 127.0.0.1:8001
#socket-timeout = 10
# base directory
chdir = /usr/local/taos/taosanode/lib
# initialize python file
wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
# call module of uWSGI
callable = app
# auto remove unix Socket and pid file when stopping
vacuum = true
# socket exec model
#chmod-socket = 664
# uWSGI pid
uid = root
# uWSGI gid
gid = root
# main process
master = true
# the number of worker processes
processes = 2
# pid file
pidfile = /usr/local/taos/taosanode/taosanode.pid
# enable threads
enable-threads = true
# the number of threads for each process
threads = 4
# memory useage report
memory-report = true
# smooth restart
reload-mercy = 10
# conflict with systemctl, so do NOT uncomment this
# daemonize = /var/log/taos/taosanode/taosanode.log
# log directory
logto = /var/log/taos/taosanode/taosanode.log
# wWSGI monitor port
stats = 127.0.0.1:8387
# python virtual environment directory
virtualenv = /usr/local/taos/taosanode/venv/
[taosanode]
# default app log file
app-log = /var/log/taos/taosanode/taosanode.app.log
# model storage directory
model-dir = /usr/local/taos/taosanode/model/
# default log level
log-level = DEBUG
# draw the query results
draw-result = 0
```
**提示**
请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。
## ANode 基本操作
### 管理 ANode
#### 创建 ANode
```sql
CREATE ANODE {node_url}
```
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
#### 查看 ANode
列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`
```sql
SHOW ANODES;
```
#### 查看提供的时序数据分析服务
```SQL
SHOW ANODES FULL;
```
#### 强制刷新集群中的分析算法缓存
```SQL
UPDATE ANODE {node_id}
UPDATE ALL ANODES
```
#### 删除 ANode
```sql
DROP ANODE {anode_id}
```
删除 ANode 只是将 ANode 从 TDengine 集群中删除,管理 ANode 的启停仍然需要使用`systemctl`命令。
### 时序数据分析功能
#### 白噪声检查
分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列White Noise Data, WND和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
#### 数据重采样和时间戳对齐
分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5输出结果的频率是 10输入时间序列 [0 5 10 15 20 25 30] 将被重采用为间隔 为 10 的序列 [0, 10, 2030][5, 15, 25] 处的数据将被丢弃。
需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
#### 时序数据异常检测
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
##### 语法
```SQL
ANOMALY_WINDOW(column_name, option_expr)
option_expr: {"
algo=expr1
[,wncheck=1|0]
[,expr2]
"}
```
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma该算法接受的输入参数是 2。
3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
**参数说明**
|参数|含义|默认值|
|---|---|---|
|algo|异常检测调用的算法|iqr|
|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1默认值为 1表示进行白噪声检查|
异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
1. `_WSTART` 异常窗口开始时间戳
2. `_WEND`:异常窗口结束时间戳
3. `_WDURATION`:异常窗口持续时间
**示例**
```SQL
--- 使用 iqr 算法进行异常检测,检测列 i32 列。
SELECT _wstart, _wend, SUM(i32)
FROM ai.atb
ANOMALY_WINDOW(i32, "algo=iqr");
--- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2检测列 i32 列
SELECT _wstart, _wend, SUM(i32)
FROM ai.atb
ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
```
```
taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
_wstart | _wend | count(*) |
====================================================================
2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
Query OK, 1 row(s) in set (0.028946s)
```
**可用异常检测算法**
- iqr
- ksigma
- grubbs
- lof
- shesd
- tac
#### 时序数据预测
数据预测以一段训练数据作为输入,预测接下来一个连续时间区间内,时序数据的趋势。
##### 语法
```SQL
FORECAST(column_expr, option_expr)
option_expr: {"
algo=expr1
[,wncheck=1|0]
[,conf=conf_val]
[,every=every_val]
[,rows=rows_val]
[,start=start_ts_val]
[,expr2]
"}
```
1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。
2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
**参数说明**
|参数|含义|默认值|
|---|---|---|
|algo|预测分析使用的算法|holtwinters|
|wncheck|白噪声white noise data检查|默认值为 10 表示不进行检查|
|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
|every|预测数据的采样间隔|输入数据的采样间隔|
|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
|rows|预测结果的记录数|10|
1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
**示例**
```SQL
--- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
FROM ai.ftb;
--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%.
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
FROM ai.ftb;
```
```
taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
_flow | _fhigh | _frowts | forecast(i32) |
========================================================================================
10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
-21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 |
-78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 |
-154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 |
-253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 |
-375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 |
-514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 |
-680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 |
-868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 |
-1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 |
```
**可用预测算法**
- arima
- holtwinters

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

View File

@ -147,7 +147,7 @@ TDengine 的多级存储功能在使用上还具备以下优点。
下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。
| 接口或组件名称 | 端口 | 协议 |
|:------------------------------------:|:----------:|:-------:|
|:-----------------------------------------:|:----------:|:--------:|
| 原生接口taosc | 6030 | TCP |
| RESTful 接口 | 6041 | TCP |
| WebSocket 接口 | 6041 | TCP |

View File

@ -76,7 +76,7 @@ dataDir /mnt/data6 2 0
|s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3单位秒。最小值1最大值2592000 (30天默认值 60 秒 |
|s3PageCacheSize |s3 page cache 缓存页数目单位页。最小值4最大值1024*1024\*1024。 ,默认值 4096|
|s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600 |
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 1表示开启自动 S3 迁移,可配置为 0。 |
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0表示关闭自动 S3 迁移,可配置为 1。 |
### 检查配置参数可用性
@ -108,9 +108,37 @@ s3migrate database <db_name>;
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
| :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- |
| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 |
| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 |
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
### 对象存储读写次数估算
对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。
#### 数据上传
当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。
```math
上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1
```
在创建数据库时,可以通过 `s3_chunkpages` 参数调整每个文件块的大小,从而控制每个数据文件的上传次数。
其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。
#### 数据下载
在查询操作中如果需要访问对象存储中的数据TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。
相邻的多个数据页会作为一个数据块从对象存储下载一次,以减少从对象存储下载的次数。每个数据页的大小,在创建数据库时,通过 `tsdb_pagesize` 参数指定,默认 4K 字节。
```math
下载次数 = 查询需要的数据块数量 - 已缓存的数据块数量
```
页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。
## Azure Blob 存储
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
@ -135,3 +163,15 @@ s3BucketName td-test
- 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
- 最大支持的 S3 服务配置数为 10
### 不依赖 Flexify 服务
用户界面同 S3不同的地方在于下面三个参数的配置
| # | 参数 | 示例值 | 描述 |
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
| 3 | s3BucketName | test-container | Container name |
其中 fd2d01c73 是账户 ID微软 Blob 存储服务只支持 Https 协议,不支持 Http。

View File

@ -26,6 +26,22 @@ SHOW USERS;
```sql
ALTER USER TEST DROP HOST HOST_NAME1
```
说明
- 开源版和企业版本都能添加成功,且可以查询到,但是开源版本不会对 IP 做任何限制。
- create user u_write pass 'taosdata1' host 'iprange1','iprange2', 可以一次添加多个 iprange, 服务端会做去重,去重的逻辑是需要 iprange 完全一样
- 默认会把 127.0.0.1 添加到白名单列表,且在白名单列表可以查询
- 集群的节点 IP 集合会自动添加到白名单列表,但是查询不到。
- taosadaper 和 taosd 不在一个机器的时候,需要把 taosadaper IP 手动添加到 taosd 白名单列表中
- 集群情况下,各个节点 enableWhiteList 成一样,或者全为 false,或者全为 true, 要不然集群无法启动
- 白名单变更生效时间 1s不超过 2s, 每次变更对收发性能有些微影响(多一次判断,可以忽略),变更完之后、影响忽略不计, 变更过程中对集群没有影响,对正在访问客户端也没有影响(假设这些客户端的 IP 包含在 white list 内)
- 如果添加两个 ip range, 192.168.1.1/16(假设为 A), 192.168.1.1/24(假设为 B), 严格来说A 包含了 B但是考虑情况太复杂并不会对 A 和 B 做合并
- 要删除的时候,必须严格匹配。 也就是如果添加的是 192.168.1.1/24, 要删除也是 192.168.1.1/24
- 只有 root 才有权限对其他用户增删 ip white list
- 兼容之前的版本,但是不支持从当前版本回退到之前版本
- x.x.x.x/32 和 x.x.x.x 属于同一个 iprange, 显示为 x.x.x.x
- 如果客户端拿到的 0.0.0.0/0, 说明没有开启白名单
- 如果白名单发生了改变, 客户端会在 heartbeat 里检测到。
- 针对一个 user, 添加的 IP 个数上限是 2048
## 审计日志

View File

@ -22,34 +22,45 @@ taosKeeper 有两种安装方式:
taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:命令行参数、环境变量 和 配置文件。优先级为:命令行参数、环境变量、配置文件参数。 一般我们推荐使用配置文件。
### 命令行参数和环境变量
命令行参数 和 环境变量说明可以参考命令 `taoskeeper --help` 的输出。下面是一个例子:
```shell
Usage of taosKeeper v3.3.2.0:
--debug enable debug mode. Env "TAOS_KEEPER_DEBUG"
-P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000)
Usage of taoskeeper v3.3.3.0:
-R, --RotationInterval string interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL" (default "15s")
-c, --config string config path default /etc/taos/taoskeeper.toml
--drop string run taoskeeper in command mode, only support old_taosd_metric_stables.
--environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"
--fromTime string parameter of transfer, example: 2020-01-01T00:00:00+08:00 (default "2020-01-01T00:00:00+08:00")
--gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000)
-h, --help Print this help message and exit
--instanceId int instance ID. Env "TAOS_KEEPER_INSTANCE_ID" (default 64)
--log.compress whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"
--log.keepDays uint log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS" (default 30)
--log.level string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos")
--log.reservedDiskSize string reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE" (default "1GB")
--log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info")
--metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log")
--metrics.database.options.buffer int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER" (default 64)
--metrics.database.options.cachemodel string database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL" (default "both")
--metrics.database.options.keep int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP" (default 90)
--metrics.database.options.vgroups int database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS" (default 1)
--metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"
--metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"
-P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043)
--tdengine.host string TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST" (default "127.0.0.1")
--tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata")
--tdengine.port int TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT" (default 6041)
--tdengine.username string TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME" (default "root")
--tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata")
--tdengine.usessl TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"
--metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"
--metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log")
--metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"
--environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"
--log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5)
--log.rotationTime duration log rotation time. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "100000000")
-c, --config string config path default /etc/taos/taoskeeper.toml
--transfer string run taoskeeper in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit
-V, --version Print the version and exit
-h, --help Print this help message and exit
```
### 配置文件
taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置文件。
@ -57,20 +68,18 @@ taosKeeper 支持用 `taoskeeper -c <keeper config file>` 命令来指定配置
若既不指定 taosKeeper 配置文件,且 `/etc/taos/taoskeeper.toml` 也不存在,将使用默认配置。
**下面是配置文件的示例:**
```toml
# Start with debug middleware for gin
debug = false
# Listen port, default is 6043
```toml
# The ID of the currently running taoskeeper instance, default is 64.
instanceId = 64
# Listening port, default is 6043.
port = 6043
# log level
loglevel = "info"
# go pool size
# Go pool size
gopoolsize = 50000
# interval for metrics
# Interval for metrics
RotationInterval = "15s"
[tdengine]
@ -81,20 +90,21 @@ password = "taosdata"
usessl = false
[metrics]
# metrics prefix in metrics names.
# Metrics prefix in metrics names.
prefix = "taos"
# export some tables that are not super table
# Export some tables that are not super table.
tables = []
# database for storing metrics data
# Database for storing metrics data.
[metrics.database]
name = "log"
# database options for db storing metrics data
# Database options for db storing metrics data.
[metrics.database.options]
vgroups = 1
buffer = 64
KEEP = 90
keep = 90
cachemodel = "both"
[environment]
@ -102,9 +112,19 @@ cachemodel = "both"
incgroup = false
[log]
rotationCount = 5
rotationTime = "24h"
rotationSize = 100000000
# The directory where log files are stored.
# path = "/var/log/taos"
level = "info"
# Number of log file rotations before deletion.
rotationCount = 30
# The number of days to retain log files.
keepDays = 30
# The maximum size of a log file before rotation.
rotationSize = "1GB"
# If set to true, log files will be compressed.
compress = false
# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit.
reservedDiskSize = "1GB"
```
## 启动
@ -118,7 +138,6 @@ monitorFqdn localhost # taoskeeper 服务的 FQDN
TDengine 监控配置相关,具体请参考:[TDengine 监控配置](../../../operation/monitor)。
<Tabs>
<TabItem label="Linux" value="linux">
@ -188,7 +207,6 @@ Active: inactive (dead)
</TabItem>
</Tabs>
## 健康检查
可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码:
@ -208,7 +226,6 @@ Content-Length: 21
{"version":"3.3.2.3"}
```
## 数据收集与监控
taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中(默认的监控数据是 `log`),这些监控数据可以用来配置 TDengine 监控。
@ -216,6 +233,7 @@ taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产
### 查看监控数据
可以查看 `log` 库下的超级表,每个超级表都对应一组监控指标,具体指标不再赘述。
```shell
taos> use log;
Database changed.
@ -251,17 +269,14 @@ taos> select last_row(*) from taosd_dnodes_info;
Query OK, 1 row(s) in set (0.003168s)
```
### 使用 TDInsight 配置监控
收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/)
收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/)。
## 集成 Prometheus
taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。
### 导出监控指标
下面通过 `curl` 命令展示 `/metrics` 接口返回的数据格式:
@ -298,9 +313,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### taosd 集群
##### 监控信息支持的标签
- `cluster_id` 集群 id
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ----------------------------------- | ------- | ------------------------------------- |
| taos_cluster_info_connections_total | counter | 总连接数 |
@ -328,11 +345,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### dnode
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ------------------------------ | ------- | ---------------------------------------------------------------------------------------- |
| taos_d_info_status | gauge | dnode 状态,标签 value 表示状态, ready 表示正常, offline 表示下线, unknown 表示未知。 |
@ -361,6 +380,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 数据目录
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
@ -368,6 +388,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- `data_dir_level`:数据目录级别
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| --------------------------------- | ----- | -------------------- |
| taos_taosd_dnodes_data_dirs_avail | gauge | 可用空间(单位 Byte) |
@ -377,12 +398,14 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 日志目录
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
- `log_dir_name`:日志目录名
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| -------------------------------- | ----- | -------------------- |
| taos_taosd_dnodes_log_dirs_avail | gauge | 可用空间(单位 Byte) |
@ -392,11 +415,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 日志数量
##### 监控信息支持的标签
- `cluster_id` 集群 id
- `dnode_ep` dnode 端点
- `dnode_id`dnode id
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ---------------------- | ------- | ------------ |
| taos_log_summary_debug | counter | 调试日志数量 |
@ -404,14 +429,15 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
| taos_log_summary_info | counter | 信息日志数量 |
| taos_log_summary_trace | counter | 跟踪日志数量 |
#### taosadapter
##### 监控信息支持的标签
- `endpoint`:端点
- `req_type`请求类型0 表示 rest1 表示 websocket
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| -------------------------------------- | ------- | -------------------- |
| taos_adapter_requests_fail | counter | 失败的请求数 |
@ -433,9 +459,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### taoskeeper
##### 监控信息支持的标签
- `identify` 节点 endpoint
##### 相关指标及其含义
| 指标名称 | 类型 | 含义 |
| ----------------------- | ----- | ------------------------------------- |
| taos_keeper_monitor_cpu | gauge | taoskeeper CPU 使用率(取值范围 0~1 |
@ -444,6 +472,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
#### 其他 taosd 集群监控项
##### taos_m_info_role
- **标签**:
- `cluster_id`: 集群 id
- `mnode_ep`: mnode 端点
@ -453,6 +482,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: mnode 角色
##### taos_taos_sql_req_count
- **标签**:
- `cluster_id`: 集群 id
- `result`: 请求结果(取值范围: Success, Failed
@ -462,6 +492,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: SQL 请求数量
##### taos_taosd_sql_req_count
- **标签**:
- `cluster_id`: 集群 id
- `dnode_ep`: dnode 端点
@ -474,6 +505,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: SQL 请求数量
##### taos_taosd_vgroups_info_status
- **标签**:
- `cluster_id`: 集群 id
- `database_name`: 数据库名称
@ -482,6 +514,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: 虚拟组状态。 0 为 unsynced表示没有leader选出1 为 ready。
##### taos_taosd_vgroups_info_tables_num
- **标签**:
- `cluster_id`: 集群 id
- `database_name`: 数据库名称
@ -490,6 +523,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **含义**: 虚拟组表数量
##### taos_taosd_vnodes_info_role
- **标签**:
- `cluster_id`: 集群 id
- `database_name`: 数据库名称
@ -499,7 +533,6 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
- **类型**: gauge
- **含义**: 虚拟节点角色
### 抽取配置
Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。
@ -521,8 +554,6 @@ scrape_configs:
在 Grafana Dashboard 菜单点击 `import`dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。
## taosKeeper 监控指标
taosKeeper 也会将自己采集的监控数据写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。

View File

@ -30,6 +30,7 @@ database_option: {
| SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value
| TABLE_SUFFIX value
| DNODES value
| TSDB_PAGESIZE value
| WAL_LEVEL {1 | 2}
| WAL_FSYNC_PERIOD value
@ -70,6 +71,7 @@ database_option: {
- TABLE_PREFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
- TABLE_SUFFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
- TSDB_PAGESIZE一个 VNODE 中时序数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB到 16 MB。
- DNODES指定 VNODE 所在的 DNODE 列表,如 '1,2,3',以逗号区分且字符间不能有空格,仅企业版支持。
- WAL_LEVELWAL 级别,默认为 1。
- 1写 WAL但不执行 fsync。
- 2写 WAL而且执行 fsync。
@ -120,6 +122,7 @@ alter_database_option: {
| KEEP value
| WAL_RETENTION_PERIOD value
| WAL_RETENTION_SIZE value
| MINROWS value
}
```

View File

@ -1569,7 +1569,7 @@ COUNT({* | expr})
ELAPSED(ts_primary_key [, time_unit])
```
**功能说明**elapsed函数表达了统计周期内连续的时间长度和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下统计在给定时间范围内的每个窗口内有数据覆盖的时间范围如果没有INTERVAL子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED返回的并不是时间范围的绝对值而是绝对值除以time_unit所得到的单位个数。
**功能说明**elapsed 函数表达了统计周期内连续的时间长度,和 twa 函数配合使用可以计算统计曲线下的面积。在通过 INTERVAL 子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有 INTERVAL 子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED 返回的并不是时间范围的绝对值,而是绝对值除以 time_unit 所得到的单位个数。流计算仅在 FORCE_WINDOW_CLOSE 模式下支持该函数。
**返回结果类型**DOUBLE。
@ -1829,7 +1829,7 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。
- INTERP SQL 查询需要同时与 RANGEEVERY 和 FILL 关键字一起使用;流计算不能使用 RANGE需要 EVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2) 字段来指定,需满足 timestamp1 \<= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
@ -1988,7 +1988,7 @@ TOP(expr, k)
UNIQUE(expr)
```
**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
**功能说明**:返回该列数据去重后的值。该函数功能与 distinct 相似。对于相同的数据,返回时间戳最小的一条,对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
**返回数据类型**:同应用的字段。
@ -2180,7 +2180,7 @@ STATEDURATION(expr, oper, val, unit)
TWA(expr)
```
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。流计算仅在 FORCE_WINDOW_CLOSE 模式下支持该函数。
**返回数据类型**DOUBLE。

View File

@ -143,13 +143,14 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE
1. AT_ONCE写入立即触发
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY 必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV 、NULL、NONE、VALUE。
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
@ -291,3 +292,4 @@ RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
CREATE SNODE ON DNODE [id]
```
其中的 id 是集群中的 dnode 的序号。请注意选择的dnode流计算的中间状态将自动在其上进行备份。
从 3.3.4.0 版本开始,在多副本环境中创建流会进行 snode 的**存在性检查**,要求首先创建 snode。如果 snode 不存在,无法创建流。

View File

@ -11,337 +11,470 @@ description: TDengine 保留关键字的详细列表
关键字列表如下:
### A
- ABORT
- ACCOUNT
- ACCOUNTS
- ADD
- AFTER
- AGGREGATE
- ALIVE
- ALL
- ALTER
- ANALYZE
- AND
- APPS
- AS
- ASC
- AT_ONCE
- ATTACH
|关键字|说明|
|----------------------|-|
| ABORT | |
| ACCOUNT | |
| ACCOUNTS | |
| ADD | |
| AFTER | |
| AGGREGATE | |
| ALIAS | |
| ALIVE | |
| ALL | |
| ALTER | |
| ANALYZE | 3.3.4.3 及后续版本 |
| AND | |
| ANODE | 3.3.4.3 及后续版本 |
| ANODES | 3.3.4.3 及后续版本 |
| ANOMALY_WINDOW | 3.3.4.3 及后续版本 |
| ANTI | |
| APPS | |
| ARBGROUPS | |
| ARROW | |
| AS | |
| ASC | |
| ASOF | |
| AT_ONCE | |
| ATTACH | |
### B
- BALANCE
- BEFORE
- BEGIN
- BETWEEN
- BIGINT
- BINARY
- BITAND
- BITNOT
- BITOR
- BLOCKS
- BNODE
- BNODES
- BOOL
- BUFFER
- BUFSIZE
- BY
|关键字|说明|
|----------------------|-|
| BALANCE | |
| BEFORE | |
| BEGIN | |
| BETWEEN | |
| BIGINT | |
| BIN | |
| BINARY | |
| BITAND | |
| BITAND | |
| BITNOT | |
| BITOR | |
| BLOB | |
| BLOCKS | |
| BNODE | |
| BNODES | |
| BOOL | |
| BOTH | |
| BUFFER | |
| BUFSIZE | |
| BWLIMIT | |
| BY | |
### C
- CACHE
- CACHEMODEL
- CACHESIZE
- CASCADE
- CAST
- CHANGE
- CLIENT_VERSION
- CLUSTER
- COLON
- COLUMN
- COMMA
- COMMENT
- COMP
- COMPACT
- CONCAT
- CONFLICT
- CONNECTION
- CONNECTIONS
- CONNS
- CONSUMER
- CONSUMERS
- CONTAINS
- COPY
- COUNT
- CREATE
- CURRENT_USER
|关键字|说明|
|----------------------|-|
| CACHE | |
| CACHEMODEL | |
| CACHESIZE | |
| CASE | |
| CAST | |
| CHANGE | |
| CHILD | |
| CLIENT_VERSION | |
| CLUSTER | |
| COLON | |
| COLUMN | |
| COMMA | |
| COMMENT | |
| COMP | |
| COMPACT | |
| COMPACTS | |
| CONCAT | |
| CONFLICT | |
| CONNECTION | |
| CONNECTIONS | |
| CONNS | |
| CONSUMER | |
| CONSUMERS | |
| CONTAINS | |
| COPY | |
| COUNT | |
| COUNT_WINDOW | |
| CREATE | |
| CREATEDB | |
| CURRENT_USER | |
### D
- DATABASE
- DATABASES
- DBS
- DEFERRED
- DELETE
- DELIMITERS
- DESC
- DESCRIBE
- DETACH
- DISTINCT
- DISTRIBUTED
- DIVIDE
- DNODE
- DNODES
- DOT
- DOUBLE
- DROP
- DURATION
|关键字|说明|
|----------------------|-|
| DATABASE | |
| DATABASES | |
| DBS | |
| DECIMAL | |
| DEFERRED | |
| DELETE | |
| DELETE_MARK | |
| DELIMITERS | |
| DESC | |
| DESCRIBE | |
| DETACH | |
| DISTINCT | |
| DISTRIBUTED | |
| DIVIDE | |
| DNODE | |
| DNODES | |
| DOT | |
| DOUBLE | |
| DROP | |
| DURATION | |
### E
- EACH
- ENABLE
- END
- EVERY
- EXISTS
- EXPIRED
- EXPLAIN
|关键字|说明|
|----------------------|-|
| EACH | |
| ELSE | |
| ENABLE | |
| ENCRYPT_ALGORITHM | |
| ENCRYPT_KEY | |
| ENCRYPTIONS | |
| END | |
| EQ | |
| EVENT_WINDOW | |
| EVERY | |
| EXCEPT | |
| EXISTS | |
| EXPIRED | |
| EXPLAIN | |
### F
- FAIL
- FILE
- FILL
- FIRST
- FLOAT
- FLUSH
- FOR
- FROM
- FUNCTION
- FUNCTIONS
|关键字|说明|
|----------------------|-|
| FAIL | |
| FHIGH | 3.3.4.3 及后续版本 |
| FILE | |
| FILL | |
| FILL_HISTORY | |
| FIRST | |
| FLOAT | |
| FLOW | 3.3.4.3 及后续版本 |
| FLUSH | |
| FOR | |
| FORCE | |
| FORCE_WINDOW_CLOSE | 3.3.4.3 及后续版本 |
| FROM | |
| FROWTS | 3.3.4.3 及后续版本 |
| FULL | |
| FUNCTION | |
| FUNCTIONS | |
### G
- GLOB
- GRANT
- GRANTS
- GROUP
|关键字|说明|
|----------------------|-|
| GE | |
| GEOMETRY | |
| GLOB | |
| GRANT | |
| GRANTS | |
| GROUP | |
| GT | |
### H
- HAVING
- HOST
|关键字|说明|
|----------------------|-|
| HAVING | |
| HEX | |
| HOST | |
### I
- ID
- IF
- IGNORE
- IMMEDIATE
- IMPORT
- IN
- INDEX
- INDEXES
- INITIALLY
- INNER
- INSERT
- INSTEAD
- INT
- INTEGER
- INTERVAL
- INTO
- IS
- IS NULL
|关键字|说明|
|----------------------|-|
| ID | |
| IF | |
| IGNORE | |
| ILLEGAL | |
| IMMEDIATE | |
| IMPORT | |
| IN | |
| INDEX | |
| INDEXES | |
| INITIALLY | |
| INNER | |
| INSERT | |
| INSTEAD | |
| INT | |
| INTEGER | |
| INTERSECT | |
| INTERVAL | |
| INTO | |
| IPTOKEN | |
| IROWTS | |
| IS | |
| IS_IMPORT | |
| ISFILLED | |
| ISNULL | |
### J
- JOIN
- JSON
|关键字|说明|
|----------------------|-|
| JLIMIT | |
| JOIN | |
| JSON | |
### K
- KEEP
- KEY
- KILL
|关键字|说明|
|----------------------|-|
| KEEP | |
| KEEP_TIME_OFFSET | |
| KEY | |
| KILL | |
### L
- LAST
- LAST_ROW
- LICENCES
- LIKE
- LIMIT
- LINEAR
- LOCAL
|关键字|说明|
|----------------------|-|
| LANGUAGE | |
| LAST | |
| LAST_ROW | |
| LE | |
| LEADER | |
| LEADING | |
| LEFT | |
| LICENCES | |
| LIKE | |
| LIMIT | |
| LINEAR | |
| LOCAL | |
| LOGS | |
| LP | |
| LSHIFT | |
| LT | |
### M
- MATCH
- MAX_DELAY
- BWLIMIT
- MAXROWS
- MAX_SPEED
- MERGE
- META
- MINROWS
- MINUS
- MNODE
- MNODES
- MODIFY
- MODULES
|关键字|说明|
|----------------------|-|
| MACHINES | |
| MATCH | |
| MAX_DELAY | |
| MAXROWS | |
| MEDIUMBLOB | |
| MERGE | |
| META | |
| MINROWS | |
| MINUS | |
| MNODE | |
| MNODES | |
| MODIFY | |
| MODULES | |
### N
- NCHAR
- NEXT
- NMATCH
- NONE
- NOT
- NOT NULL
- NOW
- NULL
- NULLS
|关键字|说明|
|----------------------|-|
| NCHAR | |
| NE | |
| NEXT | |
| NMATCH | |
| NONE | |
| NORMAL | |
| NOT | |
| NOTNULL | |
| NOW | |
| NULL | |
| NULL_F | |
| NULLS | |
### O
- OF
- OFFSET
- ON
- OR
- ORDER
- OUTPUTTYPE
|关键字|说明|
|----------------------|-|
| OF | |
| OFFSET | |
| ON | |
| ONLY | |
| OR | |
| ORDER | |
| OUTER | |
| OUTPUTTYPE | |
### P
- PAGES
- PAGESIZE
- PARTITIONS
- PASS
- PLUS
- PORT
- PPS
- PRECISION
- PREV
- PRIVILEGE
|关键字|说明|
|----------------------|-|
| PAGES | |
| PAGESIZE | |
| PARTITION | |
| PASS | |
| PAUSE | |
| PI | |
| PLUS | |
| PORT | |
| POSITION | |
| PPS | |
| PRECISION | |
| PREV | |
| PRIMARY | |
| PRIVILEGE | |
| PRIVILEGES | |
### Q
- QNODE
- QNODES
- QTIME
- QUERIES
- QUERY
|关键字|说明|
|----------------------|-|
| QDURATION | |
| QEND | |
| QNODE | |
| QNODES | |
| QSTART | |
| QTAGS | |
| QTIME | |
| QUERIES | |
| QUERY | |
| QUESTION | |
### R
- RAISE
- RANGE
- RATIO
- READ
- REDISTRIBUTE
- RENAME
- REPLACE
- REPLICA
- RESET
- RESTRICT
- RETENTIONS
- REVOKE
- ROLLUP
- ROW
|关键字|说明|
|----------------------|-|
| RAISE | |
| RAND | |
| RANGE | |
| RATIO | |
| READ | |
| RECURSIVE | |
| REDISTRIBUTE | |
| REM | |
| REPLACE | |
| REPLICA | |
| RESET | |
| RESTORE | |
| RESTRICT | |
| RESUME | |
| RETENTIONS | |
| REVOKE | |
| RIGHT | |
| ROLLUP | |
| ROW | |
| ROWTS | |
| RP | |
| RSHIFT | |
### S
- SCHEMALESS
- SCORES
- SELECT
- SEMI
- SERVER_STATUS
- SERVER_VERSION
- SESSION
- SET
- SHOW
- SINGLE_STABLE
- SLIDING
- SLIMIT
- SMA
- SMALLINT
- SNODE
- SNODES
- SOFFSET
- SPLIT
- STABLE
- STABLES
- START
- STATE
- STATE_WINDOW
- STATEMENT
- STORAGE
- STREAM
- STREAMS
- STRICT
- STRING
- SUBSCRIPTIONS
- SYNCDB
- SYSINFO
|关键字|说明|
|----------------------|-|
| S3_CHUNKPAGES | |
| S3_COMPACT | |
| S3_KEEPLOCAL | |
| SCHEMALESS | |
| SCORES | |
| SELECT | |
| SEMI | |
| SERVER_STATUS | |
| SERVER_VERSION | |
| SESSION | |
| SET | |
| SHOW | |
| SINGLE_STABLE | |
| SLASH | |
| SLIDING | |
| SLIMIT | |
| SMA | |
| SMALLINT | |
| SMIGRATE | |
| SNODE | |
| SNODES | |
| SOFFSET | |
| SPLIT | |
| STABLE | |
| STABLES | |
| STAR | |
| START | |
| STATE | |
| STATE_WINDOW | |
| STATEMENT | |
| STORAGE | |
| STREAM | |
| STREAMS | |
| STRICT | |
| STRING | |
| STT_TRIGGER | |
| SUBSCRIBE | |
| SUBSCRIPTIONS | |
| SUBSTR | |
| SUBSTRING | |
| SUBTABLE | |
| SYSINFO | |
| SYSTEM | |
### T
- TABLE
- TABLES
- TAG
- TAGS
- TBNAME
- TIMES
- TIMESTAMP
- TIMEZONE
- TINYINT
- TO
- TODAY
- TOPIC
- TOPICS
- TRANSACTION
- TRANSACTIONS
- TRIGGER
- TRIM
- TSERIES
- TTL
|关键字|说明|
|----------------------|-|
| TABLE | |
| TABLE_PREFIX | |
| TABLE_SUFFIX | |
| TABLES | |
| TAG | |
| TAGS | |
| TBNAME | |
| THEN | |
| TIMES | |
| TIMESTAMP | |
| TIMEZONE | |
| TINYINT | |
| TO | |
| TODAY | |
| TOPIC | |
| TOPICS | |
| TRAILING | |
| TRANSACTION | |
| TRANSACTIONS | |
| TRIGGER | |
| TRIM | |
| TSDB_PAGESIZE | |
| TSERIES | |
| TSMA | |
| TSMAS | |
| TTL | |
### U
- UNION
- UNSIGNED
- UPDATE
- USE
- USER
- USERS
- USING
|关键字|说明|
|----------------------|-|
| UNION | |
| UNSAFE | |
| UNSIGNED | |
| UNTREATED | |
| UPDATE | |
| USE | |
| USER | |
| USERS | |
| USING | |
### V
|关键字|说明|
|----------------------|-|
| VALUE | |
| VALUE_F | |
| VALUES | |
| VARBINARY | |
| VARCHAR | |
| VARIABLE | |
| VARIABLES | |
| VERBOSE | |
| VGROUP | |
| VGROUPS | |
| VIEW | |
| VIEWS | |
| VNODE | |
| VNODES | |
- VALUE
- VALUES
- VARCHAR
- VARIABLE
- VARIABLES
- VERBOSE
- VGROUP
- VGROUPS
- VIEW
- VNODES
### W
- WAL
- WAL_FSYNC_PERIOD
- WAL_LEVEL
- WAL_RETENTION_PERIOD
- WAL_RETENTION_SIZE
- WATERMARK
- WHERE
- WINDOW_CLOSE
- WITH
- WRITE
|关键字|说明|
|----------------------|-|
| WAL | |
| WAL_FSYNC_PERIOD | |
| WAL_LEVEL | |
| WAL_RETENTION_PERIOD | |
| WAL_RETENTION_SIZE | |
| WAL_ROLL_PERIOD | |
| WAL_SEGMENT_SIZE | |
| WATERMARK | |
| WDURATION | |
| WEND | |
| WHEN | |
| WHERE | |
| WINDOW | |
| WINDOW_CLOSE | |
| WINDOW_OFFSET | |
| WITH | |
| WRITE | |
| WSTART | |
### \_

View File

@ -26,6 +26,7 @@ Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------: | :----------------: |
| 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 |
| 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 |
## 处理异常

View File

@ -3,11 +3,14 @@ sidebar_label: ODBC
title: TDengine ODBC
---
TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库的数据表/视图
TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)以及用户自定义开发的应用程序,通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。
TDengine ODBC 提供基于 WebSocket推荐和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。
注意TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。
TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 WebSocket 连接方式。
**注意:**
- 驱动管理器:确保使用与应用程序架构匹配的 ODBC 驱动管理器。32 位应用程序需要使用 32 位 ODBC 驱动管理器64 位应用程序需要使用 64 位 ODBC 驱动管理器。
- 数据源名称DSN32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN用户 DSN 标签页下的 DSN 如果名字相同会共用,因此需要在 DSN 名称上去区分。
想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。
@ -24,17 +27,17 @@ TDengine ODBC 提供基于 WebSocket推荐和 原生连接两种方式连
### 数据源连接类型与区别
TDengine ODBC 支持两种连接 TDengine 数据库方式Websocket 连接与 Native 连接,其区别如下:
TDengine ODBC 支持两种连接 TDengine 数据库方式WebSocket 连接与 Native 连接,其区别如下:
1. 访问云服务仅支持使用 Websocket 连接方式。
1. 访问云服务仅支持使用 WebSocket 连接方式。
2. 32 位应用程序仅支持使用 WebSocket 连接方式。
3. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。
3. WebSocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。
4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。
5. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。
5. 对于一般用户,建议使用 **WebSocket** 连接方式,性能与 Native 差别不大,兼容性更好。
### WebSocket 连接
@ -46,11 +49,11 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式Websocket 连接与
4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息
![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp)
![ODBC WebSocket connection config](./assets/odbc-ws-config-zh.webp)
4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名
4.2【连接类型】 : 必选,选择连接类型,这里选择 【Websocket】
4.2【连接类型】 : 必选,选择连接类型,这里选择 【WebSocket】
4.3【URL】必填ODBC 数据源 URL示例: `http://localhost:6041` 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token`
@ -111,7 +114,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位
| taos_odbc 版本 | 主要变化 | TDengine 版本 |
| :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- |
| v1.1.0 | 1. 支持视图功能;<br/>2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 |
| v1.1.0 | 1. 支持视图功能;<br/>2. 支持 VARBINARY/GEOMETRY 数据类型;<br/>3. 支持 ODBC 32 位 WebSocket 连接方式(仅企业版支持);<br/>4. 支持 ODBC 数据源配置对话框设置对工业软件 KingSCADA、Kepware 等的兼容性适配选项(仅企业版支持); | 3.3.3.0及更高版本 |
| v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 |
| v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;<br/>2. 重构字符集转换模块,提升读写性能;<br/>3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”<br/>4. ODBC 数据源配置对话框增加“测试连接”控件;<br/>5. ODBC 数据源配置支持中文/英文界面; | - |
| v1.0.0.0 | 发布初始版本支持与Tdengine数据库交互以读写数据具体请参考“API 参考”一节 | 3.2.2.0及更高版本 |

View File

@ -39,10 +39,10 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
| ----------- | ------------- | ------------- | ------------- | --------- | --------- |
| **OS** | **Linux** | **Win64** | **macOS** | **Linux** | **macOS** |
| **C/C++** | ● | ● | ● | ● | ● |
| **JDBC** | ● | ● | ○ | ● | ○ |
| **JDBC** | ● | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ● | ● |
| **Go** | ● | ● | ● | ● | ● |
| **NodeJs** | ● | ● | ○ | ● | ○ |
| **NodeJs** | ● | ● | ● | ● | ● |
| **C#** | ● | ● | ○ | ● | ○ |
| **Rust** | ● | ● | ● | ○ | ● |
| **RESTful** | ● | ● | ● | ● | ● |

View File

@ -81,6 +81,13 @@ typedef enum {
TSDB_SML_TIMESTAMP_NANO_SECONDS,
} TSDB_SML_TIMESTAMP_TYPE;
typedef enum TAOS_FIELD_T {
TAOS_FIELD_COL = 1,
TAOS_FIELD_TAG,
TAOS_FIELD_QUERY,
TAOS_FIELD_TBNAME,
} TAOS_FIELD_T;
typedef struct taosField {
char name[65];
int8_t type;
@ -95,6 +102,15 @@ typedef struct TAOS_FIELD_E {
int32_t bytes;
} TAOS_FIELD_E;
typedef struct TAOS_FIELD_STB {
char name[65];
int8_t type;
uint8_t precision;
uint8_t scale;
int32_t bytes;
TAOS_FIELD_T field_type;
} TAOS_FIELD_STB;
#ifdef WINDOWS
#define DLL_EXPORT __declspec(dllexport)
#else
@ -195,13 +211,6 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt);
typedef void TAOS_STMT2;
typedef enum TAOS_FIELD_T {
TAOS_FIELD_COL = 1,
TAOS_FIELD_TAG,
TAOS_FIELD_QUERY,
TAOS_FIELD_TBNAME,
} TAOS_FIELD_T;
typedef struct TAOS_STMT2_OPTION {
int64_t reqid;
bool singleStbInsert;
@ -232,7 +241,9 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows);
DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt);
DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert);
DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields);
DLL_EXPORT int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields);
DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields);
DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields);
DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt);
DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt);

View File

@ -36,7 +36,7 @@ typedef struct {
int32_t anode;
int32_t urlLen;
char *url;
} SAnalUrl;
} SAnalyticsUrl;
typedef enum {
ANAL_BUF_TYPE_JSON = 0,
@ -53,18 +53,18 @@ typedef struct {
TdFilePtr filePtr;
char fileName[TSDB_FILENAME_LEN + 10];
int64_t numOfRows;
} SAnalColBuf;
} SAnalyticsColBuf;
typedef struct {
EAnalBufType bufType;
TdFilePtr filePtr;
char fileName[TSDB_FILENAME_LEN];
int32_t numOfCols;
SAnalColBuf *pCols;
SAnalyticsColBuf *pCols;
} SAnalBuf;
int32_t taosAnalInit();
void taosAnalCleanup();
int32_t taosAnalyticsInit();
void taosAnalyticsCleanup();
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf);
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen);

View File

@ -154,6 +154,7 @@ typedef enum EStreamType {
STREAM_TRANS_STATE,
STREAM_MID_RETRIEVE,
STREAM_PARTITION_DELETE_DATA,
STREAM_GET_RESULT,
} EStreamType;
#pragma pack(push, 1)
@ -383,6 +384,10 @@ typedef struct STUidTagInfo {
#define TABLE_NAME_COLUMN_INDEX 6
#define PRIMARY_KEY_COLUMN_INDEX 7
//steam get result block column
#define DATA_TS_COLUMN_INDEX 0
#define DATA_VERSION_COLUMN_INDEX 1
// stream create table block column
#define UD_TABLE_NAME_COLUMN_INDEX 0
#define UD_GROUPID_COLUMN_INDEX 1

View File

@ -189,7 +189,12 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint
int32_t getJsonValueLen(const char* data);
// For the VAR_DATA_TYPE type, new data is inserted strictly according to the position of SVarColAttr.length.
// If the same row is inserted repeatedly, data holes will result.
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
// For the VAR_DATA_TYPE type, if a row already has data before inserting it (judged by offset != -1),
// it will be inserted at the original position and the old data will be overwritten.
int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows,
bool trimValue);
@ -233,7 +238,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo);
* @brief find how many rows already in order start from first row
*/
int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo);
void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk);
int32_t blockDataCheck(const SSDataBlock* pDataBlock);
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
@ -266,7 +271,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId)
int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData);
int32_t blockGetEncodeSize(const SSDataBlock* pBlock);
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataLen, int32_t numOfCols);
int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos);
// for debug

View File

@ -142,6 +142,7 @@ extern bool tsMonitorForceV2;
// audit
extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable;
extern bool tsEnableAuditDelete;
extern int32_t tsAuditInterval;
// telem
@ -153,6 +154,12 @@ extern bool tsEnableCrashReport;
extern char *tsTelemUri;
extern char *tsClientCrashReportUri;
extern char *tsSvrCrashReportUri;
extern int8_t tsSafetyCheckLevel;
enum {
TSDB_SAFETY_CHECK_LEVELL_NEVER = 0,
TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1,
TSDB_SAFETY_CHECK_LEVELL_BYROW = 2,
};
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
@ -194,10 +201,10 @@ extern int32_t tsMinIntervalTime;
extern int32_t tsMaxInsertBatchRows;
// build info
extern char version[];
extern char compatible_version[];
extern char gitinfo[];
extern char buildinfo[];
extern char td_version[];
extern char td_compatible_version[];
extern char td_gitinfo[];
extern char td_buildinfo[];
// lossy
extern char tsLossyColumns[];

View File

@ -467,9 +467,11 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY,
QUERY_NODE_PHYSICAL_PLAN_STREAM_ANOMALY,
QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC,
} ENodeType;
typedef struct {
@ -1022,6 +1024,7 @@ typedef struct {
char sDetailVer[128];
int64_t whiteListVer;
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SConnectRsp;
int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp);
@ -1215,6 +1218,7 @@ typedef struct {
int32_t bytes;
int8_t type;
uint8_t pk;
bool noData;
} SColumnInfo;
typedef struct STimeWindow {
@ -1337,6 +1341,7 @@ typedef struct {
char* sql;
int8_t withArbitrator;
int8_t encryptAlgorithm;
char dnodeListStr[TSDB_DNODE_LIST_LEN];
} SCreateDbReq;
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
@ -1825,6 +1830,17 @@ int32_t tSerializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq);
void tFreeSStatisReq(SStatisReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
char table[TSDB_TABLE_NAME_LEN];
char operation[AUDIT_OPERATION_LEN];
int32_t sqlLen;
char* pSql;
} SAuditReq;
int32_t tSerializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
int32_t tDeserializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq);
void tFreeSAuditReq(SAuditReq* pReq);
typedef struct {
int32_t dnodeId;
int64_t clusterId;
@ -2291,6 +2307,7 @@ typedef struct {
typedef struct {
SExplainRsp rsp;
uint64_t qId;
uint64_t cId;
uint64_t tId;
int64_t rId;
int32_t eId;
@ -2644,6 +2661,7 @@ typedef struct SSubQueryMsg {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2673,6 +2691,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int32_t execId;
} SQueryContinueReq;
@ -2707,6 +2726,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int32_t execId;
SOperatorParam* pOpParam;
@ -2722,6 +2742,7 @@ typedef struct {
typedef struct {
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2768,6 +2789,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2781,6 +2803,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2797,6 +2820,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2816,6 +2840,8 @@ typedef struct {
#define STREAM_TRIGGER_AT_ONCE 1
#define STREAM_TRIGGER_WINDOW_CLOSE 2
#define STREAM_TRIGGER_MAX_DELAY 3
#define STREAM_TRIGGER_FORCE_WINDOW_CLOSE 4
#define STREAM_DEFAULT_IGNORE_EXPIRED 1
#define STREAM_FILL_HISTORY_ON 1
#define STREAM_FILL_HISTORY_OFF 0
@ -3413,6 +3439,7 @@ typedef struct {
int32_t svrTimestamp;
SArray* rsps; // SArray<SClientHbRsp>
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SClientHbBatchRsp;
static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); }
@ -4111,7 +4138,6 @@ void tDeleteMqMetaRsp(SMqMetaRsp* pRsp);
#define MQ_DATA_RSP_VERSION 100
typedef struct {
struct {
SMqRspHead head;
STqOffsetVal rspOffset;
STqOffsetVal reqOffset;
@ -4122,7 +4148,6 @@ typedef struct {
SArray* blockData;
SArray* blockTbName;
SArray* blockSchema;
};
union{
struct{
@ -4251,6 +4276,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
uint32_t sqlLen;
uint32_t phyLen;

View File

@ -259,6 +259,7 @@
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8

View File

@ -62,7 +62,8 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
: (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
: 1000000000;
time_t t = taosTime(NULL);
time_t t;
(void) taosTime(&t);
struct tm tm;
(void) taosLocalTime(&t, &tm, NULL, 0);
tm.tm_hour = 0;

View File

@ -16,394 +16,7 @@
#ifndef _TD_COMMON_TOKEN_H_
#define _TD_COMMON_TOKEN_H_
#define TK_OR 1
#define TK_AND 2
#define TK_UNION 3
#define TK_ALL 4
#define TK_MINUS 5
#define TK_EXCEPT 6
#define TK_INTERSECT 7
#define TK_NK_BITAND 8
#define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12
#define TK_NK_MINUS 13
#define TK_NK_STAR 14
#define TK_NK_SLASH 15
#define TK_NK_REM 16
#define TK_NK_CONCAT 17
#define TK_CREATE 18
#define TK_ACCOUNT 19
#define TK_NK_ID 20
#define TK_PASS 21
#define TK_NK_STRING 22
#define TK_ALTER 23
#define TK_PPS 24
#define TK_TSERIES 25
#define TK_STORAGE 26
#define TK_STREAMS 27
#define TK_QTIME 28
#define TK_DBS 29
#define TK_USERS 30
#define TK_CONNS 31
#define TK_STATE 32
#define TK_NK_COMMA 33
#define TK_HOST 34
#define TK_IS_IMPORT 35
#define TK_NK_INTEGER 36
#define TK_CREATEDB 37
#define TK_USER 38
#define TK_ENABLE 39
#define TK_SYSINFO 40
#define TK_ADD 41
#define TK_DROP 42
#define TK_GRANT 43
#define TK_ON 44
#define TK_TO 45
#define TK_REVOKE 46
#define TK_FROM 47
#define TK_SUBSCRIBE 48
#define TK_READ 49
#define TK_WRITE 50
#define TK_NK_DOT 51
#define TK_WITH 52
#define TK_ENCRYPT_KEY 53
#define TK_ANODE 54
#define TK_UPDATE 55
#define TK_ANODES 56
#define TK_DNODE 57
#define TK_PORT 58
#define TK_DNODES 59
#define TK_RESTORE 60
#define TK_NK_IPTOKEN 61
#define TK_FORCE 62
#define TK_UNSAFE 63
#define TK_CLUSTER 64
#define TK_LOCAL 65
#define TK_QNODE 66
#define TK_BNODE 67
#define TK_SNODE 68
#define TK_MNODE 69
#define TK_VNODE 70
#define TK_DATABASE 71
#define TK_USE 72
#define TK_FLUSH 73
#define TK_TRIM 74
#define TK_S3MIGRATE 75
#define TK_COMPACT 76
#define TK_IF 77
#define TK_NOT 78
#define TK_EXISTS 79
#define TK_BUFFER 80
#define TK_CACHEMODEL 81
#define TK_CACHESIZE 82
#define TK_COMP 83
#define TK_DURATION 84
#define TK_NK_VARIABLE 85
#define TK_MAXROWS 86
#define TK_MINROWS 87
#define TK_KEEP 88
#define TK_PAGES 89
#define TK_PAGESIZE 90
#define TK_TSDB_PAGESIZE 91
#define TK_PRECISION 92
#define TK_REPLICA 93
#define TK_VGROUPS 94
#define TK_SINGLE_STABLE 95
#define TK_RETENTIONS 96
#define TK_SCHEMALESS 97
#define TK_WAL_LEVEL 98
#define TK_WAL_FSYNC_PERIOD 99
#define TK_WAL_RETENTION_PERIOD 100
#define TK_WAL_RETENTION_SIZE 101
#define TK_WAL_ROLL_PERIOD 102
#define TK_WAL_SEGMENT_SIZE 103
#define TK_STT_TRIGGER 104
#define TK_TABLE_PREFIX 105
#define TK_TABLE_SUFFIX 106
#define TK_S3_CHUNKSIZE 107
#define TK_S3_KEEPLOCAL 108
#define TK_S3_COMPACT 109
#define TK_KEEP_TIME_OFFSET 110
#define TK_ENCRYPT_ALGORITHM 111
#define TK_NK_COLON 112
#define TK_BWLIMIT 113
#define TK_START 114
#define TK_TIMESTAMP 115
#define TK_END 116
#define TK_TABLE 117
#define TK_NK_LP 118
#define TK_NK_RP 119
#define TK_USING 120
#define TK_FILE 121
#define TK_STABLE 122
#define TK_COLUMN 123
#define TK_MODIFY 124
#define TK_RENAME 125
#define TK_TAG 126
#define TK_SET 127
#define TK_NK_EQ 128
#define TK_TAGS 129
#define TK_BOOL 130
#define TK_TINYINT 131
#define TK_SMALLINT 132
#define TK_INT 133
#define TK_INTEGER 134
#define TK_BIGINT 135
#define TK_FLOAT 136
#define TK_DOUBLE 137
#define TK_BINARY 138
#define TK_NCHAR 139
#define TK_UNSIGNED 140
#define TK_JSON 141
#define TK_VARCHAR 142
#define TK_MEDIUMBLOB 143
#define TK_BLOB 144
#define TK_VARBINARY 145
#define TK_GEOMETRY 146
#define TK_DECIMAL 147
#define TK_COMMENT 148
#define TK_MAX_DELAY 149
#define TK_WATERMARK 150
#define TK_ROLLUP 151
#define TK_TTL 152
#define TK_SMA 153
#define TK_DELETE_MARK 154
#define TK_FIRST 155
#define TK_LAST 156
#define TK_SHOW 157
#define TK_FULL 158
#define TK_PRIVILEGES 159
#define TK_DATABASES 160
#define TK_TABLES 161
#define TK_STABLES 162
#define TK_MNODES 163
#define TK_QNODES 164
#define TK_ARBGROUPS 165
#define TK_FUNCTIONS 166
#define TK_INDEXES 167
#define TK_ACCOUNTS 168
#define TK_APPS 169
#define TK_CONNECTIONS 170
#define TK_LICENCES 171
#define TK_GRANTS 172
#define TK_LOGS 173
#define TK_MACHINES 174
#define TK_ENCRYPTIONS 175
#define TK_QUERIES 176
#define TK_SCORES 177
#define TK_TOPICS 178
#define TK_VARIABLES 179
#define TK_BNODES 180
#define TK_SNODES 181
#define TK_TRANSACTIONS 182
#define TK_DISTRIBUTED 183
#define TK_CONSUMERS 184
#define TK_SUBSCRIPTIONS 185
#define TK_VNODES 186
#define TK_ALIVE 187
#define TK_VIEWS 188
#define TK_VIEW 189
#define TK_COMPACTS 190
#define TK_NORMAL 191
#define TK_CHILD 192
#define TK_LIKE 193
#define TK_TBNAME 194
#define TK_QTAGS 195
#define TK_AS 196
#define TK_SYSTEM 197
#define TK_TSMA 198
#define TK_INTERVAL 199
#define TK_RECURSIVE 200
#define TK_TSMAS 201
#define TK_FUNCTION 202
#define TK_INDEX 203
#define TK_COUNT 204
#define TK_LAST_ROW 205
#define TK_META 206
#define TK_ONLY 207
#define TK_TOPIC 208
#define TK_CONSUMER 209
#define TK_GROUP 210
#define TK_DESC 211
#define TK_DESCRIBE 212
#define TK_RESET 213
#define TK_QUERY 214
#define TK_CACHE 215
#define TK_EXPLAIN 216
#define TK_ANALYZE 217
#define TK_VERBOSE 218
#define TK_NK_BOOL 219
#define TK_RATIO 220
#define TK_NK_FLOAT 221
#define TK_OUTPUTTYPE 222
#define TK_AGGREGATE 223
#define TK_BUFSIZE 224
#define TK_LANGUAGE 225
#define TK_REPLACE 226
#define TK_STREAM 227
#define TK_INTO 228
#define TK_PAUSE 229
#define TK_RESUME 230
#define TK_PRIMARY 231
#define TK_KEY 232
#define TK_TRIGGER 233
#define TK_AT_ONCE 234
#define TK_WINDOW_CLOSE 235
#define TK_IGNORE 236
#define TK_EXPIRED 237
#define TK_FILL_HISTORY 238
#define TK_SUBTABLE 239
#define TK_UNTREATED 240
#define TK_KILL 241
#define TK_CONNECTION 242
#define TK_TRANSACTION 243
#define TK_BALANCE 244
#define TK_VGROUP 245
#define TK_LEADER 246
#define TK_MERGE 247
#define TK_REDISTRIBUTE 248
#define TK_SPLIT 249
#define TK_DELETE 250
#define TK_INSERT 251
#define TK_NK_BIN 252
#define TK_NK_HEX 253
#define TK_NULL 254
#define TK_NK_QUESTION 255
#define TK_NK_ALIAS 256
#define TK_NK_ARROW 257
#define TK_ROWTS 258
#define TK_QSTART 259
#define TK_QEND 260
#define TK_QDURATION 261
#define TK_WSTART 262
#define TK_WEND 263
#define TK_WDURATION 264
#define TK_IROWTS 265
#define TK_ISFILLED 266
#define TK_FLOW 267
#define TK_FHIGH 268
#define TK_FROWTS 269
#define TK_CAST 270
#define TK_POSITION 271
#define TK_IN 272
#define TK_FOR 273
#define TK_NOW 274
#define TK_TODAY 275
#define TK_RAND 276
#define TK_SUBSTR 277
#define TK_SUBSTRING 278
#define TK_BOTH 279
#define TK_TRAILING 280
#define TK_LEADING 281
#define TK_TIMEZONE 282
#define TK_CLIENT_VERSION 283
#define TK_SERVER_VERSION 284
#define TK_SERVER_STATUS 285
#define TK_CURRENT_USER 286
#define TK_PI 287
#define TK_CASE 288
#define TK_WHEN 289
#define TK_THEN 290
#define TK_ELSE 291
#define TK_BETWEEN 292
#define TK_IS 293
#define TK_NK_LT 294
#define TK_NK_GT 295
#define TK_NK_LE 296
#define TK_NK_GE 297
#define TK_NK_NE 298
#define TK_MATCH 299
#define TK_NMATCH 300
#define TK_CONTAINS 301
#define TK_JOIN 302
#define TK_INNER 303
#define TK_LEFT 304
#define TK_RIGHT 305
#define TK_OUTER 306
#define TK_SEMI 307
#define TK_ANTI 308
#define TK_ASOF 309
#define TK_WINDOW 310
#define TK_WINDOW_OFFSET 311
#define TK_JLIMIT 312
#define TK_SELECT 313
#define TK_NK_HINT 314
#define TK_DISTINCT 315
#define TK_WHERE 316
#define TK_PARTITION 317
#define TK_BY 318
#define TK_SESSION 319
#define TK_STATE_WINDOW 320
#define TK_EVENT_WINDOW 321
#define TK_COUNT_WINDOW 322
#define TK_ANOMALY_WINDOW 323
#define TK_SLIDING 324
#define TK_FILL 325
#define TK_VALUE 326
#define TK_VALUE_F 327
#define TK_NONE 328
#define TK_PREV 329
#define TK_NULL_F 330
#define TK_LINEAR 331
#define TK_NEXT 332
#define TK_HAVING 333
#define TK_RANGE 334
#define TK_EVERY 335
#define TK_ORDER 336
#define TK_SLIMIT 337
#define TK_SOFFSET 338
#define TK_LIMIT 339
#define TK_OFFSET 340
#define TK_ASC 341
#define TK_NULLS 342
#define TK_ABORT 343
#define TK_AFTER 344
#define TK_ATTACH 345
#define TK_BEFORE 346
#define TK_BEGIN 347
#define TK_BITAND 348
#define TK_BITNOT 349
#define TK_BITOR 350
#define TK_BLOCKS 351
#define TK_CHANGE 352
#define TK_COMMA 353
#define TK_CONCAT 354
#define TK_CONFLICT 355
#define TK_COPY 356
#define TK_DEFERRED 357
#define TK_DELIMITERS 358
#define TK_DETACH 359
#define TK_DIVIDE 360
#define TK_DOT 361
#define TK_EACH 362
#define TK_FAIL 363
#define TK_GLOB 364
#define TK_ID 365
#define TK_IMMEDIATE 366
#define TK_IMPORT 367
#define TK_INITIALLY 368
#define TK_INSTEAD 369
#define TK_ISNULL 370
#define TK_MODULES 371
#define TK_NK_BITNOT 372
#define TK_NK_SEMI 373
#define TK_NOTNULL 374
#define TK_OF 375
#define TK_PLUS 376
#define TK_PRIVILEGE 377
#define TK_RAISE 378
#define TK_RESTRICT 379
#define TK_ROW 380
#define TK_STAR 381
#define TK_STATEMENT 382
#define TK_STRICT 383
#define TK_STRING 384
#define TK_TIMES 385
#define TK_VALUES 386
#define TK_VARIABLE 387
#define TK_WAL 388
#include "ttokenauto.h"
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601

View File

@ -238,12 +238,26 @@ typedef struct {
case TSDB_DATA_TYPE_UBIGINT: \
snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \
break; \
case TSDB_DATA_TYPE_FLOAT: \
case TSDB_DATA_TYPE_FLOAT: { \
int32_t n = snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \
if (n >= (_outputBytes)) { \
n = snprintf(_output, (int32_t)(_outputBytes), "%.7e", *(float *)(_input)); \
if (n >= (_outputBytes)) { \
snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \
} \
} \
break; \
case TSDB_DATA_TYPE_DOUBLE: \
} \
case TSDB_DATA_TYPE_DOUBLE: { \
int32_t n = snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \
if (n >= (_outputBytes)) { \
snprintf(_output, (int32_t)(_outputBytes), "%.15e", *(double *)(_input)); \
if (n >= (_outputBytes)) { \
snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \
} \
} \
break; \
} \
case TSDB_DATA_TYPE_UINT: \
snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \
break; \
@ -284,6 +298,7 @@ typedef struct {
#define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX)
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
#define IS_INVALID_TYPE(_t) ((_t) < TSDB_DATA_TYPE_NULL || (_t) >= TSDB_DATA_TYPE_MAX)
#define IS_CONVERT_AS_SIGNED(_t) \
(IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))

View File

@ -29,7 +29,6 @@ extern "C" {
#endif
#define AUDIT_DETAIL_MAX 65472
#define AUDIT_OPERATION_LEN 20
typedef struct {
const char *server;

View File

@ -31,7 +31,7 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef struct {
void* handle;
@ -222,8 +222,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
bool qStreamScanhistoryFinished(qTaskInfo_t tinfo);
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
void resetTaskInfo(qTaskInfo_t tinfo);
void qResetTaskInfoCode(qTaskInfo_t tinfo);
int32_t qGetStreamIntervalExecInfo(qTaskInfo_t tinfo, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow);
int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo);
int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo);

View File

@ -41,6 +41,8 @@ extern "C" {
#define STREAM_STATE_BUFF_HASH 1
#define STREAM_STATE_BUFF_SORT 2
#define STREAM_STATE_BUFF_HASH_SORT 3
#define STREAM_STATE_BUFF_HASH_SEARCH 4
typedef struct SMeta SMeta;
typedef TSKEY (*GetTsFun)(void*);
@ -325,6 +327,9 @@ typedef struct {
int64_t number;
void* pStreamFileState;
int32_t buffIndex;
int32_t hashIter;
void* pHashData;
int64_t minGpId;
} SStreamStateCur;
typedef struct SStateStore {
@ -337,6 +342,8 @@ typedef struct SStateStore {
void (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used);
void (*streamStateClearBuff)(SStreamState* pState, void* pVal);
void (*streamStateFreeVal)(void* val);
int32_t (*streamStateGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode);
@ -349,8 +356,15 @@ typedef struct SStateStore {
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t (*streamStateFillAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void (*streamStateFillDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateFillGetNext)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStateFillGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
void (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
void (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
@ -361,9 +375,12 @@ typedef struct SStateStore {
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur);
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateFillGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
void (*streamStateSetFillInfo)(SStreamState* pState);
void (*streamStateClearExpiredState)(SStreamState* pState);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
@ -400,8 +417,8 @@ typedef struct SStateStore {
SUpdateInfo** ppInfo);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen);
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(SEncoder* pEncoder, const SUpdateInfo* pInfo);
int32_t (*updateInfoDeserialize)(SDecoder* pDeCoder, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
@ -412,6 +429,11 @@ typedef struct SStateStore {
GetTsFun fp, void* pFile, TSKEY delMark, const char* id, int64_t ckId, int8_t type,
struct SStreamFileState** ppFileState);
int32_t (*streamStateGroupPut)(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
SStreamStateCur* (*streamStateGroupGetCur)(SStreamState* pState);
void (*streamStateGroupCurNext)(SStreamStateCur* pCur);
int32_t (*streamStateGroupGetKVByCur)(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);

View File

@ -292,6 +292,7 @@ bool fmIsElapsedFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType, int32_t pkBytes);
int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc);
int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** pFunc);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc);

View File

@ -24,6 +24,7 @@ extern "C" {
#include "thash.h"
#include "query.h"
#include "tqueue.h"
#include "clientInt.h"
typedef enum {
SQL_RESULT_SUCCESS = 0,
@ -81,6 +82,8 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name,
void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values);
const char* monitorResultStr(SQL_RESULT_CODE code);
int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data);
void clientOperateReport(SRequestObj* pRequest);
#ifdef __cplusplus
}
#endif

View File

@ -72,6 +72,7 @@ typedef struct SDatabaseOptions {
int8_t compressionLevel;
int8_t encryptAlgorithm;
int32_t daysPerFile;
char dnodeListStr[TSDB_DNODE_LIST_LEN];
char encryptAlgorithmStr[TSDB_ENCRYPT_ALGO_STR_LEN];
SValueNode* pDaysPerFile;
int32_t fsyncPeriod;

View File

@ -194,14 +194,26 @@ typedef struct SIndefRowsFuncLogicNode {
bool isTimeLineFunc;
} SIndefRowsFuncLogicNode;
typedef struct SStreamNodeOption {
int8_t triggerType;
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
int8_t igCheckUpdate;
int8_t destHasPrimaryKey;
} SStreamNodeOption;
typedef struct SInterpFuncLogicNode {
SLogicNode node;
SNodeList* pFuncs;
STimeWindow timeRange;
int64_t interval;
int8_t intervalUnit;
int8_t precision;
EFillMode fillMode;
SNode* pFillValues; // SNodeListNode
SNode* pTimeSeries; // SColumnNode
SStreamNodeOption streamNodeOption;
} SInterpFuncLogicNode;
typedef struct SForecastFuncLogicNode {
@ -511,11 +523,15 @@ typedef struct SInterpFuncPhysiNode {
STimeWindow timeRange;
int64_t interval;
int8_t intervalUnit;
int8_t precision;
EFillMode fillMode;
SNode* pFillValues; // SNodeListNode
SNode* pTimeSeries; // SColumnNode
SStreamNodeOption streamNodeOption;
} SInterpFuncPhysiNode;
typedef SInterpFuncPhysiNode SStreamInterpFuncPhysiNode;
typedef struct SForecastFuncPhysiNode {
SPhysiNode node;
SNodeList* pExprs;
@ -608,6 +624,7 @@ typedef struct SAggPhysiNode {
typedef struct SDownstreamSourceNode {
ENodeType type;
SQueryNodeAddr addr;
uint64_t clientId;
uint64_t taskId;
uint64_t schedId;
int32_t execId;
@ -650,7 +667,7 @@ typedef struct SWindowPhysiNode {
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
int8_t destHasPrimayKey;
int8_t destHasPrimaryKey;
bool mergeDataBlock;
} SWindowPhysiNode;

View File

@ -457,6 +457,7 @@ typedef struct SSelectStmt {
bool hasCountFunc;
bool hasUdaf;
bool hasStateKey;
bool hasTwaOrElapsedFunc;
bool onlyHasKeepOrderFunc;
bool groupSort;
bool tagScan;

View File

@ -147,6 +147,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
int32_t colIdx, int32_t rowNum);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
@ -176,8 +177,8 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* fields,
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen);
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields,
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut);

View File

@ -105,11 +105,11 @@ void qWorkerDestroy(void **qWorkerMgmt);
int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat);
int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
SQWMsg *qwMsg, SArray *explainRes);
int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
int32_t eId, SQWMsg *qwMsg, SArray *explainRes);
int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
void **pRsp, SArray *explainRes);
int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
int32_t eId, void **pRsp, SArray *explainRes);
int32_t qWorkerDbgEnableDebug(char *option);

View File

@ -105,6 +105,7 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara
int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t timeZoneStrLen();
int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -83,6 +83,9 @@ void schedulerStopQueryHb(void* pTrans);
int32_t schedulerUpdatePolicy(int32_t policy);
int32_t schedulerEnableReSchedule(bool enableResche);
int32_t initClientId(void);
uint64_t getClientId(void);
/**
* Cancel query job
* @param pJob

View File

@ -49,6 +49,8 @@ void streamStateClear(SStreamState* pState);
void streamStateSetNumber(SStreamState* pState, int32_t number, int32_t tsIdex);
void streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t streamStateGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
// session window
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen,
@ -75,8 +77,14 @@ int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, ch
// fill
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode);
int32_t streamStateFillAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateFillGetNext(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t streamStateFillGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
@ -96,15 +104,25 @@ SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey*
void streamStateFreeCur(SStreamStateCur* pCur);
void streamStateResetCur(SStreamStateCur* pCur);
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateFillGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
// twa
void streamStateSetFillInfo(SStreamState* pState);
void streamStateClearExpiredState(SStreamState* pState);
void streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode);
// group id
int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
SStreamStateCur* streamStateGroupGetCur(SStreamState* pState);
void streamStateGroupCurNext(SStreamStateCur* pCur);
int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
void streamStateReloadInfo(SStreamState* pState, TSKEY ts);
void streamStateCopyBackend(SStreamState* src, SStreamState* dst);

View File

@ -70,7 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo;
#define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
extern int32_t streamMetaId;
extern int32_t streamMetaRefPool;
extern int32_t streamTaskRefPool;
enum {
STREAM_STATUS__NORMAL = 0,
@ -113,7 +114,7 @@ enum {
enum {
TASK_TRIGGER_STATUS__INACTIVE = 1,
TASK_TRIGGER_STATUS__ACTIVE,
TASK_TRIGGER_STATUS__MAY_ACTIVE,
};
typedef enum {
@ -258,6 +259,7 @@ typedef struct STaskId {
typedef struct SStreamTaskId {
int64_t streamId;
int32_t taskId;
int64_t refId;
const char* idStr;
} SStreamTaskId;
@ -291,12 +293,12 @@ typedef struct SStreamStatus {
int8_t schedStatus;
int8_t statusBackup;
int32_t schedIdleTime; // idle time before invoke again
int32_t timerActive; // timer is active
int64_t lastExecTs; // last exec time stamp
int32_t inScanHistorySentinel;
bool appendTranstateBlock; // has append the transfer state data block already
bool appendTranstateBlock; // has appended the transfer state data block already
bool removeBackendFiles; // remove backend files on disk when free stream tasks
SConsenChkptInfo consenChkptInfo;
STimeWindow latestForceWindow; // latest generated time window, only valid in
} SStreamStatus;
typedef struct SDataRange {
@ -309,10 +311,12 @@ typedef struct SSTaskBasicInfo {
SEpSet epSet;
SEpSet mnodeEpset; // mnode epset for send heartbeat
int32_t selfChildId;
int32_t totalLevel;
int32_t trigger;
int8_t taskLevel;
int8_t fillHistory; // is fill history task or not
int64_t delaySchedParam; // in msec
int64_t watermark; // extracted from operators
SInterval interval;
} SSTaskBasicInfo;
typedef struct SStreamRetrieveReq SStreamRetrieveReq;
@ -454,7 +458,6 @@ struct SStreamTask {
// the followings attributes don't be serialized
SScanhistorySchedInfo schedHistoryInfo;
int32_t refCnt;
int32_t transferStateAlignCnt;
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
@ -544,9 +547,10 @@ typedef struct STaskUpdateEntry {
typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask);
void tFreeStreamTask(SStreamTask* pTask);
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int32_t trigger,
int64_t triggerParam, SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5,
SStreamTask** pTask);
void tFreeStreamTask(void* pTask);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
@ -664,6 +668,8 @@ void streamTaskResetStatus(SStreamTask* pTask);
void streamTaskSetStatusReady(SStreamTask* pTask);
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
const char* streamTaskGetExecType(int32_t type);
int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId);
void streamTaskFreeRefId(int64_t* pRefId);
bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
@ -753,16 +759,15 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask);
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
void streamMetaClear(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta);
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
int64_t endTs, bool ready);
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo);

View File

@ -16,8 +16,6 @@
#ifndef _STREAM_FILE_STATE_H_
#define _STREAM_FILE_STATE_H_
#include "os.h"
#include "storageapi.h"
#include "tarray.h"
#include "tdef.h"
@ -37,7 +35,7 @@ typedef void (*_state_buff_cleanup_fn)(void* pRowBuff);
typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num);
typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey);
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen);
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen);
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal,
@ -45,6 +43,8 @@ typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, i
typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2);
typedef int (*__session_compare_fn_t)(const void* pWin, const void* pDatas, int pos);
int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp,
void* pFile, TSKEY delMark, const char* taskId, int64_t checkpointId, int8_t type,
struct SStreamFileState** ppFileState);
@ -54,6 +54,8 @@ bool needClearDiskBuff(SStreamFileState* pFileState);
void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used);
void streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t addRowBuffIfNotExist(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
void deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
@ -71,9 +73,11 @@ int32_t streamFileStateGetSelectRowSize(SStreamFileState* pFileState);
void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts);
void* getRowStateBuff(SStreamFileState* pFileState);
void* getSearchBuff(SStreamFileState* pFileState);
void* getStateFileStore(SStreamFileState* pFileState);
bool isDeteled(SStreamFileState* pFileState, TSKEY ts);
bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap);
TSKEY getFlushMark(SStreamFileState* pFileState);
SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState);
int32_t getRowStateRowSize(SStreamFileState* pFileState);
@ -94,6 +98,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
void sessionWinStateClear(SStreamFileState* pFileState);
void sessionWinStateCleanup(void* pBuff);
SStreamStateCur* createStateCursor(SStreamFileState* pFileState);
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
@ -103,6 +108,8 @@ void sessionWinStateMoveToNext(SStreamStateCur* pCur);
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey,
range_cmpr_fn cmpFn);
int32_t binarySearch(void* keyList, int num, const void* key, __session_compare_fn_t cmpFn);
// state window
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen, int32_t* pWinCode);
@ -117,6 +124,34 @@ int32_t getSessionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyL
int32_t* pWinCode);
int32_t getFunctionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
// time slice
int32_t getHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
int32_t hashSortFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen);
int32_t hashSortFileRemoveFn(SStreamFileState* pFileState, const void* pKey);
void clearSearchBuff(SStreamFileState* pFileState);
int32_t getHashSortNextRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** pVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t getHashSortPrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t recoverFillSnapshot(SStreamFileState* pFileState, int64_t ckId);
void deleteHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey);
//group
int32_t streamFileStateGroupPut(SStreamFileState* pFileState, int64_t groupId, void* value, int32_t vLen);
void streamFileStateGroupCurNext(SStreamStateCur* pCur);
int32_t streamFileStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen);
SSHashObj* getGroupIdCache(SStreamFileState* pFileState);
int fillStateKeyCompare(const void* pWin1, const void* pDatas, int pos);
int32_t getRowStatePrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal,
int32_t* pVLen, int32_t* pWinCode);
int32_t addSearchItem(SStreamFileState* pFileState, SArray* pWinStates, const SWinKey* pKey);
//twa
void setFillInfo(SStreamFileState* pFileState);
void clearExpiredState(SStreamFileState* pFileState);
int32_t addArrayBuffIfNotExist(SSHashObj* pSearchBuff, uint64_t groupId, SArray** ppResStates);
#ifdef __cplusplus
}
#endif

View File

@ -36,8 +36,8 @@ bool updateInfoIsTableInserted(SUpdateInfo* pInfo, int64_t tbUid);
void updateInfoDestroy(SUpdateInfo* pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo* pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo* pInfo);
int32_t updateInfoSerialize(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen);
int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo);
int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo);
void windowSBfDelete(SUpdateInfo* pInfo, uint64_t count);
int32_t windowSBfAdd(SUpdateInfo* pInfo, uint64_t count);
bool isIncrementalTimeStamp(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts, void* pPkVal, int32_t len);

View File

@ -104,6 +104,7 @@ int64_t taosGetPthreadId(TdThread thread);
void taosResetPthread(TdThread *thread);
bool taosComparePthread(TdThread first, TdThread second);
int32_t taosGetPId();
int32_t taosGetPIdByName(const char* name, int32_t* pPId);
int32_t taosGetAppName(char *name, int32_t *len);
#ifdef __cplusplus

View File

@ -93,7 +93,7 @@ static FORCE_INLINE int64_t taosGetMonoTimestampMs() {
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf, int32_t bufSize);
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
time_t taosTime(time_t *t);
int32_t taosTime(time_t *t);
time_t taosMktime(struct tm *timep);
int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour,
const uint32_t min, const uint32_t sec, int64_t time_zone);

View File

@ -208,6 +208,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_TSC_COMPRESS_PARAM_ERROR TAOS_DEF_ERROR_CODE(0, 0X0233)
#define TSDB_CODE_TSC_COMPRESS_LEVEL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0234)
#define TSDB_CODE_TSC_FAIL_GENERATE_JSON TAOS_DEF_ERROR_CODE(0, 0X0235)
#define TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR TAOS_DEF_ERROR_CODE(0, 0X0236)
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X02FF)
// mnode-common
@ -352,6 +353,8 @@ int32_t taosGetErrSize();
#define TSDB_CODE_MND_INVALID_SYS_TABLENAME TAOS_DEF_ERROR_CODE(0, 0x039A)
#define TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE TAOS_DEF_ERROR_CODE(0, 0x039B)
#define TSDB_CODE_MND_INVALID_WAL_LEVEL TAOS_DEF_ERROR_CODE(0, 0x039C)
#define TSDB_CODE_MND_INVALID_DNODE_LIST_FMT TAOS_DEF_ERROR_CODE(0, 0x039D)
#define TSDB_CODE_MND_DNODE_LIST_REPEAT TAOS_DEF_ERROR_CODE(0, 0x039E)
// mnode-node
#define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03A0)

View File

@ -453,6 +453,7 @@ typedef enum ELogicConditionType {
#define TSDB_CACHE_MODEL_LAST_ROW 1
#define TSDB_CACHE_MODEL_LAST_VALUE 2
#define TSDB_CACHE_MODEL_BOTH 3
#define TSDB_DNODE_LIST_LEN 256
#define TSDB_ENCRYPT_ALGO_STR_LEN 16
#define TSDB_ENCRYPT_ALGO_NONE_STR "none"
#define TSDB_ENCRYPT_ALGO_SM4_STR "sm4"
@ -492,13 +493,13 @@ typedef enum ELogicConditionType {
#define TSDB_MIN_S3_CHUNK_SIZE (128 * 1024)
#define TSDB_MAX_S3_CHUNK_SIZE (1024 * 1024)
#define TSDB_DEFAULT_S3_CHUNK_SIZE (256 * 1024)
#define TSDB_DEFAULT_S3_CHUNK_SIZE (128 * 1024)
#define TSDB_MIN_S3_KEEP_LOCAL (1 * 1440) // unit minute
#define TSDB_MAX_S3_KEEP_LOCAL (365000 * 1440)
#define TSDB_DEFAULT_S3_KEEP_LOCAL (3650 * 1440)
#define TSDB_DEFAULT_S3_KEEP_LOCAL (365 * 1440)
#define TSDB_MIN_S3_COMPACT 0
#define TSDB_MAX_S3_COMPACT 1
#define TSDB_DEFAULT_S3_COMPACT 0
#define TSDB_DEFAULT_S3_COMPACT 1
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600
@ -652,6 +653,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
#define MONITOR_TAG_VALUE_LEN 300
#define MONITOR_METRIC_NAME_LEN 100
#define AUDIT_OPERATION_LEN 20
typedef enum {
ANAL_ALGO_TYPE_ANOMALY_DETECT = 0,
ANAL_ALGO_TYPE_FORECAST = 1,

View File

@ -70,7 +70,7 @@ extern int32_t tdbDebugFlag;
extern int32_t sndDebugFlag;
extern int32_t simDebugFlag;
extern int32_t tqClientDebug;
extern int32_t tqClientDebugFlag;
int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc);
void taosCloseLog();

View File

@ -20,11 +20,11 @@
extern "C" {
#endif
extern char version[];
extern char compatible_version[];
extern char gitinfo[];
extern char gitinfoOfInternal[];
extern char buildinfo[];
extern char td_version[];
extern char td_compatible_version[];
extern char td_gitinfo[];
extern char td_gitinfoOfInternal[];
extern char td_buildinfo[];
#ifdef __cplusplus
}

View File

@ -0,0 +1,59 @@
import subprocess
import re
# 执行 git fetch 命令并捕获输出
def git_fetch():
result = subprocess.run(['git', 'fetch'], capture_output=True, text=True)
return result
# 解析分支名称
def parse_branch_name_type1(error_output):
# 使用正则表达式匹配 'is at' 前的分支名称
match = re.search(r"error: cannot lock ref '(refs/remotes/origin/[^']+)': is at", error_output)
if match:
return match.group(1)
return None
# 解析第二种错误中的分支名称
def parse_branch_name_type2(error_output):
# 使用正则表达式匹配 'exists' 前的第一个引号内的分支名称
match = re.search(r"'(refs/remotes/origin/[^']+)' exists;", error_output)
if match:
return match.group(1)
return None
# 执行 git update-ref -d 命令
def git_update_ref(branch_name):
if branch_name:
subprocess.run(['git', 'update-ref', '-d', f'{branch_name}'], check=True)
# 解析错误类型并执行相应的修复操作
def handle_error(error_output):
# 错误类型1本地引用的提交ID与远程不一致
if "is at" in error_output and "but expected" in error_output:
branch_name = parse_branch_name_type1(error_output)
if branch_name:
print(f"Detected error type 1, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print("Error parsing branch name for type 1.")
# 错误类型2尝试创建新的远程引用时本地已经存在同名的引用
elif "exists; cannot create" in error_output:
branch_name = parse_branch_name_type2(error_output)
if branch_name:
print(f"Detected error type 2, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print("Error parsing branch name for type 2.")
# 主函数
def main():
fetch_result = git_fetch()
if fetch_result.returncode != 0: # 如果 git fetch 命令失败
error_output = fetch_result.stderr
handle_error(error_output)
else:
print("Git fetch successful.")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,319 @@
body {
font-family: Helvetica, Arial, sans-serif;
font-size: 12px;
/* do not increase min-width as some may use split screens */
min-width: 800px;
color: #999;
}
h1 {
font-size: 24px;
color: black;
}
h2 {
font-size: 16px;
color: black;
}
p {
color: black;
}
a {
color: #999;
}
table {
border-collapse: collapse;
}
/******************************
* SUMMARY INFORMATION
******************************/
#environment td {
padding: 5px;
border: 1px solid #e6e6e6;
vertical-align: top;
}
#environment tr:nth-child(odd) {
background-color: #f6f6f6;
}
#environment ul {
margin: 0;
padding: 0 20px;
}
/******************************
* TEST RESULT COLORS
******************************/
span.passed,
.passed .col-result {
color: green;
}
span.skipped,
span.xfailed,
span.rerun,
.skipped .col-result,
.xfailed .col-result,
.rerun .col-result {
color: orange;
}
span.error,
span.failed,
span.xpassed,
.error .col-result,
.failed .col-result,
.xpassed .col-result {
color: red;
}
.col-links__extra {
margin-right: 3px;
}
/******************************
* RESULTS TABLE
*
* 1. Table Layout
* 2. Extra
* 3. Sorting items
*
******************************/
/*------------------
* 1. Table Layout
*------------------*/
#results-table {
border: 1px solid #e6e6e6;
color: #999;
font-size: 12px;
width: 100%;
}
#results-table th,
#results-table td {
padding: 5px;
border: 1px solid #e6e6e6;
text-align: left;
}
#results-table th {
font-weight: bold;
}
/*------------------
* 2. Extra
*------------------*/
.logwrapper {
max-height: 230px;
overflow-y: scroll;
background-color: #e6e6e6;
}
.logwrapper.expanded {
max-height: none;
}
.logwrapper.expanded .logexpander:after {
content: "collapse [-]";
}
.logwrapper .logexpander {
z-index: 1;
position: sticky;
top: 10px;
width: max-content;
border: 1px solid;
border-radius: 3px;
padding: 5px 7px;
margin: 10px 0 10px calc(100% - 80px);
cursor: pointer;
background-color: #e6e6e6;
}
.logwrapper .logexpander:after {
content: "expand [+]";
}
.logwrapper .logexpander:hover {
color: #000;
border-color: #000;
}
.logwrapper .log {
min-height: 40px;
position: relative;
top: -50px;
height: calc(100% + 50px);
border: 1px solid #e6e6e6;
color: black;
display: block;
font-family: "Courier New", Courier, monospace;
padding: 5px;
padding-right: 80px;
white-space: pre-wrap;
}
div.media {
border: 1px solid #e6e6e6;
float: right;
height: 240px;
margin: 0 5px;
overflow: hidden;
width: 320px;
}
.media-container {
display: grid;
grid-template-columns: 25px auto 25px;
align-items: center;
flex: 1 1;
overflow: hidden;
height: 200px;
}
.media-container--fullscreen {
grid-template-columns: 0px auto 0px;
}
.media-container__nav--right,
.media-container__nav--left {
text-align: center;
cursor: pointer;
}
.media-container__viewport {
cursor: pointer;
text-align: center;
height: inherit;
}
.media-container__viewport img,
.media-container__viewport video {
object-fit: cover;
width: 100%;
max-height: 100%;
}
.media__name,
.media__counter {
display: flex;
flex-direction: row;
justify-content: space-around;
flex: 0 0 25px;
align-items: center;
}
.collapsible td:not(.col-links) {
cursor: pointer;
}
.collapsible td:not(.col-links):hover::after {
color: #bbb;
font-style: italic;
cursor: pointer;
}
.col-result {
width: 130px;
}
.col-result:hover::after {
content: " (hide details)";
}
.col-result.collapsed:hover::after {
content: " (show details)";
}
#environment-header h2:hover::after {
content: " (hide details)";
color: #bbb;
font-style: italic;
cursor: pointer;
font-size: 12px;
}
#environment-header.collapsed h2:hover::after {
content: " (show details)";
color: #bbb;
font-style: italic;
cursor: pointer;
font-size: 12px;
}
/*------------------
* 3. Sorting items
*------------------*/
.sortable {
cursor: pointer;
}
.sortable.desc:after {
content: " ";
position: relative;
left: 5px;
bottom: -12.5px;
border: 10px solid #4caf50;
border-bottom: 0;
border-left-color: transparent;
border-right-color: transparent;
}
.sortable.asc:after {
content: " ";
position: relative;
left: 5px;
bottom: 12.5px;
border: 10px solid #4caf50;
border-top: 0;
border-left-color: transparent;
border-right-color: transparent;
}
.hidden, .summary__reload__button.hidden {
display: none;
}
.summary__data {
flex: 0 0 550px;
}
.summary__reload {
flex: 1 1;
display: flex;
justify-content: center;
}
.summary__reload__button {
flex: 0 0 300px;
display: flex;
color: white;
font-weight: bold;
background-color: #4caf50;
text-align: center;
justify-content: center;
align-items: center;
border-radius: 3px;
cursor: pointer;
}
.summary__reload__button:hover {
background-color: #46a049;
}
.summary__spacer {
flex: 0 0 550px;
}
.controls {
display: flex;
justify-content: space-between;
}
.filters,
.collapse {
display: flex;
align-items: center;
}
.filters button,
.collapse button {
color: #999;
border: none;
background: none;
cursor: pointer;
text-decoration: underline;
}
.filters button:hover,
.collapse button:hover {
color: #ccc;
}
.filter__label {
margin-right: 10px;
}

View File

@ -0,0 +1,115 @@
# conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption(
"--verMode", default="enterprise", help="community or enterprise"
)
parser.addoption(
"--tVersion", default="3.3.2.6", help="the version of taos"
)
parser.addoption(
"--baseVersion", default="smoking", help="the path of nas"
)
parser.addoption(
"--sourcePath", default="nas", help="only support nas currently"
)
# Collect the setup and teardown of each test case and their std information
setup_stdout_info = {}
teardown_stdout_info = {}
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
# Record the std of setup and teardown
if call.when == 'setup':
for i in rep.sections:
if i[0] == "Captured stdout setup":
if not setup_stdout_info:
setup_stdout_info[item.nodeid] = i[1]
elif call.when == 'teardown':
for i in rep.sections:
if i[0] == "Captured stdout teardown":
teardown_stdout_info[item.nodeid] = i[1]
# Insert setup and teardown's std in the summary section
def pytest_html_results_summary(prefix, summary, postfix):
if setup_stdout_info or teardown_stdout_info:
rows = []
# Insert setup stdout
if setup_stdout_info:
for nodeid, stdout in setup_stdout_info.items():
html_content = '''
<tr>
<td><b><span style="font-size: larger; color: black;">Setup:</span></b></td>
<td colspan="4">
<a href="#" id="toggleSetup">Show Setup</a>
<div id="setupContent" class="collapsible-content" style="display: none; white-space: pre-wrap; margin-top: 5px;">
<pre>{}</pre>
</div>
</td>
</tr>
'''.format(stdout.strip())
# 如果需要在 Python 脚本中生成 HTML并使用 JavaScript 控制折叠内容的显示,可以这样做:
html_script = '''
<script>
document.getElementById('toggleSetup').addEventListener('click', function(event) {
event.preventDefault();
var setupContentDiv = document.getElementById('setupContent');
setupContentDiv.style.display = setupContentDiv.style.display === 'none' ? 'block' : 'none';
var buttonText = setupContentDiv.style.display === 'none' ? 'Show Setup' : 'Hide Setup';
this.textContent = buttonText;
});
</script>
'''
# 输出完整的 HTML 代码
final_html = html_content + html_script
rows.append(final_html)
rows.append("<br>")
# Insert teardown stdout
if teardown_stdout_info:
for nodeid, stdout in teardown_stdout_info.items():
html_content = '''
<tr>
<td><b><span style="font-size: larger; color: black;">Teardown:</span></b></td>
<td colspan="4">
<a href="#" id="toggleTeardown">Show Teardown</a>
<div id="teardownContent" class="collapsible-content" style="display: none; white-space: pre-wrap; margin-top: 5px;">
<pre>{}</pre>
</div>
</td>
</tr>
'''.format(stdout.strip())
# 如果需要在 Python 脚本中生成 HTML并使用 JavaScript 控制折叠内容的显示,可以这样做:
html_script = '''
<script>
document.getElementById('toggleTeardown').addEventListener('click', function(event) {
event.preventDefault();
var teardownContentDiv = document.getElementById('teardownContent');
teardownContentDiv.style.display = teardownContentDiv.style.display === 'none' ? 'block' : 'none';
var buttonText = teardownContentDiv.style.display === 'none' ? 'Show Teardown' : 'Hide Teardown';
this.textContent = buttonText;
});
</script>
'''
# 输出完整的 HTML 代码
final_html = html_content + html_script
rows.append(final_html)
prefix.extend(rows)

View File

@ -0,0 +1,15 @@
#!/usr/bin/expect
set packageName [lindex $argv 0]
set packageSuffix [lindex $argv 1]
set timeout 30
if { ${packageSuffix} == "deb" } {
spawn dpkg -i ${packageName}
} elseif { ${packageSuffix} == "rpm"} {
spawn rpm -ivh ${packageName}
}
expect "*one:"
send "\r"
expect "*skip:"
send "\r"
expect eof

View File

@ -0,0 +1,57 @@
set baseVersion=%1%
set version=%2%
set verMode=%3%
set sType=%4%
echo %fileType%
rem stop services
if EXIST C:\TDengine (
if EXIST C:\TDengine\stop-all.bat (
call C:\TDengine\stop-all.bat /silent
echo "***************Stop taos services***************"
)
if exist C:\TDengine\unins000.exe (
call C:\TDengine\unins000.exe /silent
echo "***************uninstall TDengine***************"
)
rd /S /q C:\TDengine
)
if EXIST C:\ProDB (
if EXIST C:\ProDB\stop-all.bat (
call C:\ProDB\stop-all.bat /silent
echo "***************Stop taos services***************"
)
if exist C:\ProDB\unins000.exe (
call C:\ProDB\unins000.exe /silent
echo "***************uninstall TDengine***************"
)
rd /S /q C:\ProDB
)
if "%verMode%"=="enterprise" (
if "%sType%"=="client" (
set fileType=enterprise-client
) else (
set fileType=enterprise
)
) else (
set fileType=%sType%
)
if "%baseVersion%"=="ProDB" (
echo %fileType%
set installer=ProDB-%fileType%-%version%-Windows-x64.exe
) else (
echo %fileType%
set installer=TDengine-%fileType%-%version%-Windows-x64.exe
)
if "%baseVersion%"=="ProDB" (
echo %installer%
scp root@192.168.1.213:/nas/OEM/ProDB/v%version%/%installer% C:\workspace
) else (
echo %installer%
scp root@192.168.1.213:/nas/TDengine/%baseVersion%/v%version%/%verMode%/%installer% C:\workspace
)
echo "***************Finish installer transfer!***************"
C:\workspace\%installer% /silent
echo "***************Finish install!***************"

View File

@ -0,0 +1,325 @@
#!/bin/sh
function usage() {
echo "$0"
echo -e "\t -f test file type,server/client/tools/"
echo -e "\t -m pacakage version Type,community/enterprise"
echo -e "\t -l package type,lite or not"
echo -e "\t -c operation type,x64/arm64"
echo -e "\t -v pacakage version,3.0.1.7"
echo -e "\t -o pacakage version,3.0.1.7"
echo -e "\t -s source Path,web/nas"
echo -e "\t -t package Type,tar/rpm/deb"
echo -e "\t -h help"
}
#parameter
scriptDir=$(dirname $(readlink -f $0))
version="3.0.1.7"
originversion="smoking"
testFile="server"
verMode="communtity"
sourcePath="nas"
cpuType="x64"
lite="true"
packageType="tar"
subFile="package.tar.gz"
while getopts "m:c:f:l:s:o:t:v:h" opt; do
case $opt in
m)
verMode=$OPTARG
;;
v)
version=$OPTARG
;;
f)
testFile=$OPTARG
;;
l)
lite=$OPTARG
;;
s)
sourcePath=$OPTARG
;;
o)
originversion=$OPTARG
;;
c)
cpuType=$OPTARG
;;
t)
packageType=$OPTARG
;;
h)
usage
exit 0
;;
?)
echo "Invalid option: -$OPTARG"
usage
exit 0
;;
esac
done
systemType=`uname`
if [ ${systemType} == "Darwin" ]; then
platform="macOS"
else
platform="Linux"
fi
echo "testFile:${testFile},verMode:${verMode},lite:${lite},cpuType:${cpuType},packageType:${packageType},version-${version},originversion:${originversion},sourcePath:${sourcePath}"
# Color setting
RED='\033[41;30m'
GREEN='\033[1;32m'
YELLOW='\033[1;33m'
BLUE='\033[1;34m'
GREEN_DARK='\033[0;32m'
YELLOW_DARK='\033[0;33m'
BLUE_DARK='\033[0;34m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
if [ "${originversion}" = "ProDB" ]; then
TDengine="ProDB"
else
TDengine="TDengine"
fi
if [[ ${verMode} = "enterprise" ]];then
prePackage="${TDengine}-enterprise"
if [[ ${testFile} = "client" ]];then
prePackage="${TDengine}-enterprise-${testFile}"
fi
elif [ ${verMode} = "community" ];then
prePackage="${TDengine}-${testFile}"
fi
if [ ${lite} = "true" ];then
packageLite="-Lite"
elif [ ${lite} = "false" ];then
packageLite=""
fi
if [[ "$packageType" = "tar" ]] ;then
packageType="tar.gz"
fi
tdPath="${prePackage}-${version}"
packageName="${tdPath}-${platform}-${cpuType}${packageLite}.${packageType}"
if [ "$testFile" == "server" ] ;then
installCmd="install.sh"
elif [ ${testFile} = "client" ];then
installCmd="install_client.sh"
fi
echo "tdPath:${tdPath},packageName:${packageName}}"
cmdInstall() {
command=$1
if command -v ${command} ;then
echoColor YD "${command} is already installed"
else
if command -v apt ;then
apt-get install ${command} -y
elif command -v yum ;then
yum -y install ${command}
echoColor YD "you should install ${command} manually"
fi
fi
}
echoColor() {
color=$1
command=$2
if [ ${color} = 'Y' ];then
echo -e "${YELLOW}${command}${NC}"
elif [ ${color} = 'YD' ];then
echo -e "${YELLOW_DARK}${command}${NC}"
elif [ ${color} = 'R' ];then
echo -e "${RED}${command}${NC}"
elif [ ${color} = 'G' ];then
echo -e "${GREEN}${command}${NC}\r\n"
elif [ ${color} = 'B' ];then
echo -e "${BLUE}${command}${NC}"
elif [ ${color} = 'BD' ];then
echo -e "${BLUE_DARK}${command}${NC}"
fi
}
wgetFile() {
file=$1
versionPath=$2
sourceP=$3
nasServerIP="192.168.1.213"
if [ "${originversion}" = "ProDB" ]; then
packagePath="/nas/OEM/ProDB/v${versionPath}"
else
packagePath="/nas/TDengine/${originversion}/v${versionPath}/${verMode}"
fi
if [ -f ${file} ];then
echoColor YD "${file} already exists ,it will delete it and download it again "
# rm -rf ${file}
fi
if [[ ${sourceP} = 'web' ]];then
echoColor BD "====download====:wget https://www.taosdata.com/assets-download/3.0/${file}"
wget https://www.taosdata.com/assets-download/3.0/${file}
elif [[ ${sourceP} = 'nas' ]];then
echoColor BD "====download====:scp root@${nasServerIP}:${packagePath}/${file} ."
scp root@${nasServerIP}:${packagePath}/${file} .
fi
}
function newPath {
buildPath=$1
if [ ! -d ${buildPath} ] ;then
echoColor BD "mkdir -p ${buildPath}"
mkdir -p ${buildPath}
else
echoColor YD "${buildPath} already exists"
fi
}
echoColor G "===== install basesoft ====="
cmdInstall tree
cmdInstall wget
cmdInstall expect
echoColor G "===== Uninstall all components of TDeingne ====="
if command -v rmtaos ;then
echoColor YD "uninstall all components of TDeingne:rmtaos"
rmtaos
else
echoColor YD "os doesn't include TDengine"
fi
if [[ ${packageName} =~ "server" ]] ;then
echoColor BD " pkill -9 taosd "
pkill -9 taosd
fi
if command -v rmprodb ;then
echoColor YD "uninstall all components of TDeingne:rmprodb"
rmprodb
else
echoColor YD "os doesn't include TDengine"
fi
if [[ ${packageName} =~ "server" ]] ;then
echoColor BD " pkill -9 prodbd "
pkill -9 prodbd
fi
echoColor G "===== new workroom path ====="
installPath="/usr/local/src/packageTest"
if [ ${systemType} == "Darwin" ]; then
installPath="${WORK_DIR}/packageTest"
fi
newPath ${installPath}
#if [ -d ${installPath}/${tdPath} ] ;then
# echoColor BD "rm -rf ${installPath}/${tdPath}/*"
# rm -rf ${installPath}/${tdPath}/*
#fi
echoColor G "===== download installPackage ====="
cd ${installPath} && wgetFile ${packageName} ${version} ${sourcePath}
#cd ${oriInstallPath} && wgetFile ${originPackageName} ${originversion} ${sourcePath}
cd ${installPath}
cp -r ${scriptDir}/debRpmAutoInstall.sh .
packageSuffix=$(echo ${packageName} | awk -F '.' '{print $NF}')
if [ ! -f debRpmAutoInstall.sh ];then
echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
echo 'set packageName [lindex $argv 0]' >> debRpmAutoInstall.sh
echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
echo 'set timeout 30 ' >> debRpmAutoInstall.sh
echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
echo ' spawn dpkg -i ${packageName} ' >> debRpmAutoInstall.sh
echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
echo ' spawn rpm -ivh ${packageName}' >> debRpmAutoInstall.sh
echo '}' >> debRpmAutoInstall.sh
echo 'expect "*one:"' >> debRpmAutoInstall.sh
echo 'send "\r"' >> debRpmAutoInstall.sh
echo 'expect "*skip:"' >> debRpmAutoInstall.sh
echo 'send "\r" ' >> debRpmAutoInstall.sh
fi
echoColor G "===== install Package ====="
if [[ ${packageName} =~ "deb" ]];then
cd ${installPath}
dpkg -r taostools
dpkg -r tdengine
if [[ ${packageName} =~ "TDengine" ]];then
echoColor BD "./debRpmAutoInstall.sh ${packageName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packageName} ${packageSuffix}
else
echoColor BD "dpkg -i ${packageName}" && dpkg -i ${packageName}
fi
elif [[ ${packageName} =~ "rpm" ]];then
cd ${installPath}
sudo rpm -e tdengine
sudo rpm -e taostools
if [[ ${packageName} =~ "TDengine" ]];then
echoColor BD "./debRpmAutoInstall.sh ${packageName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packageName} ${packageSuffix}
else
echoColor BD "rpm -ivh ${packageName}" && rpm -ivh ${packageName}
fi
elif [[ ${packageName} =~ "tar" ]];then
echoColor G "===== check installPackage File of tar ====="
cd ${installPath}
echoColor YD "unzip the new installation package"
echoColor BD "tar -xf ${packageName}" && tar -xf ${packageName}
cd ${installPath}/${tdPath} && tree -I "driver" > ${installPath}/now_${version}_checkfile
cd ${installPath}
diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
if [ ${diffNumbers} != 0 ];then
echoColor R "The number and names of files is different from the previous installation package"
diffLog=`cat ${installPath}/diffFile.log`
echoColor Y "${diffLog}"
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"
rm -rf ${installPath}/diffFile.log
fi
echoColor YD "===== install Package of tar ====="
cd ${installPath}/${tdPath}
if [ ${testFile} = "server" ];then
echoColor BD "bash ${installCmd} -e no "
bash ${installCmd} -e no
else
echoColor BD "bash ${installCmd} "
bash ${installCmd}
fi
elif [[ ${packageName} =~ "pkg" ]];then
cd ${installPath}
sudo installer -pkg ${packageName} -target /
echoColor YD "===== install Package successfully! ====="
fi
#cd ${installPath}
#
#rm -rf ${installPath}/${packageName}
#if [ ${platform} == "Linux" ]; then
# rm -rf ${installPath}/${tdPath}/
#fi
echoColor YD "===== end of shell file ====="

View File

@ -0,0 +1,12 @@
import subprocess
def run_cmd(command):
print("CMD:", command)
result = subprocess.run(command, capture_output=True, text=True, shell=True)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
print("Return Code:", result.returncode)
#assert result.returncode == 0
return result

View File

@ -0,0 +1,21 @@
import pytest
# python3 -m pytest test_server.py -v --html=/var/www/html/report.html --json-report --json-report-file="/var/www/html/report.json" --timeout=60
# pytest.main(["-s", "-v"])
import pytest
import subprocess
# define cmd function
def main():
pytest.main(['--html=report.html'])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,17 @@
pytest-html
pytest-json-report
pytest-timeout
taospy
numpy
fabric2
psutil
pandas
toml
distro
requests
pexpect
faker
pyopenssl
taos-ws-py
taospy
tzlocal

View File

@ -0,0 +1,11 @@
rm -rf %WIN_TDENGINE_ROOT_DIR%\debug
mkdir %WIN_TDENGINE_ROOT_DIR%\debug
mkdir %WIN_TDENGINE_ROOT_DIR%\debug\build
mkdir %WIN_TDENGINE_ROOT_DIR%\debug\build\bin
xcopy C:\TDengine\taos*.exe %WIN_TDENGINE_ROOT_DIR%\debug\build\bin
set case_out_file=%cd%\case.out
cd %WIN_TDENGINE_ROOT_DIR%\tests\system-test
python3 .\test.py -f 0-others\taosShell.py
python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3

View File

@ -0,0 +1,29 @@
#!/bin/bash
ulimit -c unlimited
rm -rf ${TDENGINE_ROOT_DIR}/debug
mkdir ${TDENGINE_ROOT_DIR}/debug
mkdir ${TDENGINE_ROOT_DIR}/debug/build
mkdir ${TDENGINE_ROOT_DIR}/debug/build/bin
systemType=`uname`
if [ ${systemType} == "Darwin" ]; then
cp /usr/local/bin/taos* ${TDENGINE_ROOT_DIR}/debug/build/bin/
else
cp /usr/bin/taos* ${TDENGINE_ROOT_DIR}/debug/build/bin/
fi
case_out_file=`pwd`/case.out
python3 -m pip install -r ${TDENGINE_ROOT_DIR}/tests/requirements.txt >> $case_out_file
python3 -m pip install taos-ws-py taospy >> $case_out_file
cd ${TDENGINE_ROOT_DIR}/tests/army
python3 ./test.py -f query/query_basic.py -N 3 >> $case_out_file
cd ${TDENGINE_ROOT_DIR}/tests/system-test
python3 ./test.py -f 1-insert/insert_column_value.py >> $case_out_file
python3 ./test.py -f 2-query/primary_ts_base_5.py >> $case_out_file
python3 ./test.py -f 2-query/case_when.py >> $case_out_file
python3 ./test.py -f 2-query/partition_limit_interval.py >> $case_out_file
python3 ./test.py -f 2-query/join.py >> $case_out_file
python3 ./test.py -f 2-query/fill.py >> $case_out_file

View File

@ -0,0 +1,251 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys, os
import re
import platform
import getopt
import subprocess
# from this import d
import time
# input for server
opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:P:v:u', [
'host=', 'Port=', 'version='])
serverHost = ""
serverPort = 0
version = ""
uninstall = False
for key, value in opts:
if key in ['--help']:
print('A collection of test cases written using Python')
print('-h serverHost')
print('-P serverPort')
print('-v test client version')
print('-u test uninstall process, will uninstall TDengine')
sys.exit(0)
if key in ['-h']:
serverHost = value
if key in ['-P']:
serverPort = int(value)
if key in ['-v']:
version = value
if key in ['-u']:
uninstall = True
if not serverHost:
print("Please input use -h to specify your server host.")
sys.exit(0)
if not version:
print("No version specified, will not run version check.")
if serverPort == 0:
serverPort = 6030
print("No server port specified, use default 6030.")
system = platform.system()
arch = platform.machine()
databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
# install taospy
taospy_version = ""
if system == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
# prepare data by taosBenchmark
cmd = "taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d &" % (databaseName, serverHost, serverPort)
process_out = subprocess.getoutput(cmd)
print(cmd)
#os.system("taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d" % (databaseName, serverHost, serverPort))
taosBenchmark_test_result = True
time.sleep(10)
import taos
conn = taos.connect(host=serverHost,
user="root",
password="taosdata",
database=databaseName,
port=serverPort,
timezone="Asia/Shanghai") # default your host's timezone
server_version = conn.server_info
print("server_version", server_version)
client_version = conn.client_info
print("client_version", client_version) # 3.0.0.0
# Execute a sql and get its result set. It's useful for SELECT statement
result: taos.TaosResult = conn.query("SELECT count(*) from meters")
data = result.fetch_all()
print(data)
if data[0][0] !=10000:
print(" taosBenchmark work not as expected ")
print("!!!!!!!!!!!Test Result: taosBenchmark test failed! !!!!!!!!!!")
sys.exit(1)
#else:
# print("**********Test Result: taosBenchmark test passed **********")
# drop database of test
taos_test_result = False
print("drop database test")
print("run taos -s 'drop database %s;' -h %s -P %d" % (databaseName, serverHost, serverPort))
taos_cmd_outpur = subprocess.getoutput('taos -s "drop database %s;" -h %s -P %d' % (databaseName, serverHost, serverPort))
print(taos_cmd_outpur)
if ("Drop OK" in taos_cmd_outpur):
taos_test_result = True
#print("*******Test Result: taos test passed ************")
version_test_result = False
if version:
print("Client info is: %s"%conn.client_info)
taos_V_output = ""
if system == "Windows":
taos_V_output = subprocess.getoutput("taos -V | findstr version")
else:
taos_V_output = subprocess.getoutput("taos -V | grep version")
print("taos -V output is: %s" % taos_V_output)
if version in taos_V_output and version in conn.client_info:
version_test_result = True
#print("*******Test Result: Version check passed ************")
conn.close()
if uninstall:
print("Start to run rmtaos")
leftFile = False
print("Platform: ", system)
if system == "Linux":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /etc/systemd/system/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib64/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/taos")
#print(out)
if "No such file or directory" not in out:
print("Uninstall left some files in /usr/local/taos%s" % out)
leftFile = True
if not leftFile:
print("*******Test Result: uninstall test passed ************")
elif system == "Darwin":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['sudo', 'rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
#out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/")
#print(out)
#if out:
# print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out)
# leftFile = True
#if not leftFile:
# print("*******Test Result: uninstall test passed ************")
elif system == "Windows":
process = subprocess.Popen(['unins000','/silent'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
process.wait()
time.sleep(10)
out = subprocess.getoutput("ls C:\TDengine")
print(out)
if len(out.split("\n")) > 3:
leftFile = True
print("Uninstall left some files: %s" % out)
if taosBenchmark_test_result:
print("**********Test Result: taosBenchmark test passed! **********")
if taos_test_result:
print("**********Test Result: taos test passed! **********")
else:
print("!!!!!!!!!!!Test Result: taos test failed! !!!!!!!!!!")
if version_test_result:
print("**********Test Result: version test passed! **********")
else:
print("!!!!!!!!!!!Test Result: version test failed! !!!!!!!!!!")
if not leftFile:
print("**********Test Result: uninstall test passed! **********")
else:
print("!!!!!!!!!!!Test Result: uninstall test failed! !!!!!!!!!!")
if taosBenchmark_test_result and taos_test_result and version_test_result and not leftFile:
sys.exit(0)
else:
sys.exit(1)

View File

@ -0,0 +1,380 @@
def sync_source(branch_name) {
sh '''
hostname
ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
echo ''' + branch_name + '''
'''
sh '''
cd ${TDENGINE_ROOT_DIR}
set +e
git reset --hard
git fetch || git fetch
git checkout -f '''+branch_name+'''
git reset --hard origin/'''+branch_name+'''
git log | head -n 20
git clean -fxd
set -e
'''
return 1
}
def sync_source_win() {
bat '''
hostname
taskkill /f /t /im taosd.exe
ipconfig
set
date /t
time /t
'''
bat '''
echo %branch_name%
cd %WIN_TDENGINE_ROOT_DIR%
git reset --hard
git fetch || git fetch
git checkout -f ''' + env.BRANCH_NAME + '''
git reset --hard origin/''' + env.BRANCH_NAME + '''
git branch
git restore .
git remote prune origin
git pull || git pull
git log | head -n 20
git clean -fxd
'''
return 1
}
pipeline {
agent none
parameters {
choice(
name: 'sourcePath',
choices: ['nas','web'],
description: 'Choice which way to download the installation pacakge;web is Office Web and nas means taos nas server '
)
choice(
name: 'verMode',
choices: ['enterprise','community'],
description: 'Choice which types of package you want do check '
)
string (
name:'version',
defaultValue:'3.3.2.0',
description: 'Release version number,eg: 3.0.0.1'
)
string (
name:'baseVersion',
defaultValue:'smoking',
description: 'Tnas root path. eg:smoking, 3.3'
)
choice (
name:'mode',
choices: ['server','client'],
description: 'Choose which mode of package you want do run '
)
choice (
name:'smoke_branch',
choices: ['test/3.0/smokeTest','test/main/smokeTest','test/3.1/smokeTest'],
description: 'Choose which mode of package you want do run '
)
string (
name:'runPlatforms',
defaultValue:'server_Linux_x64, server_Linux_arm64, server_Windows_x64, server_Mac_x64',
description: 'run package list hotfix usually run: server: server_Linux_x64, server_Linux_arm64 client: client_Linux_x64, client_Linux_arm64 release usually run: enterprise server: server_Linux_x64, server_Linux_arm64, server_Windows_x64 enterprise client: client_Linux_x64, client_Linux_arm64, client_Windows_x64 community server: server_Linux_x64, server_Linux_arm64, server_Mac_x64, server_Mac_arm64(not supported), server_Linux_x64_lite(not supported) community client: client_Linux_x64, client_Linux_arm64, client_Windows_x64, client_Mac_x64, client_Mac_arm64(not supported), client_Linux_x64_lite(not supported)'
)
}
environment{
WORK_DIR = "/var/lib/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
BRANCH_NAME = "${smoke_branch}"
}
stages {
stage ('Start Server for Client Test') {
when {
beforeAgent true
expression { mode == 'client' }
}
agent{label " ubuntu18 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
withEnv(['JENKINS_NODE_COOKIE=dontkillme']) {
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
bash start3NodesServer.sh
'''
}
}
}
}
stage ('Run SmokeTest') {
parallel {
stage('server_Linux_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Linux_x64') }
}
}
agent{label " ubuntu16 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
mkdir -p /var/www/html/${baseVersion}/${version}/${verMode}/json
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_server.py -v --html=/var/www/html/${baseVersion}/${version}/${verMode}/${mode}_linux_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
cp report.json /var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=linux_x64"
'''
}
}
}
stage('server_Linux_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Linux_arm64') }
}
}
agent{label "worker06_arm64"}
steps {
timeout(time: 60, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_server.py -v --html=${mode}_linux_arm64_report.html --json-report --json-report-file=report.json --timeout=600 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_linux_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=linux_arm64"
'''
}
}
}
stage ('server_Mac_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Mac_x64') }
}
}
agent{label " release_Darwin_x64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
python3 -m pytest -v -k linux --html=${mode}_Mac_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_x64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=Mac_x64"
'''
}
}
}
stage ('server_Mac_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Mac_arm64') }
}
}
agent{label " release_Darwin_arm64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
python3 -m pytest -v -k linux --html=${mode}_Mac_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=Mac_arm64"
'''
}
}
}
stage('server_Windows_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Windows_x64') }
}
}
agent{label " windows11 "}
environment{
WIN_WORK_DIR="C:\\workspace"
WIN_TDINTERNAL_ROOT_DIR="C:\\workspace\\TDinternal"
WIN_TDENGINE_ROOT_DIR="C:\\workspace\\TDinternal\\community"
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source_win()
bat '''
cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest
call getAndRunInstaller.bat %baseVersion% %version% %verMode% server
cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest
pip3 install -r pytest_require.txt
python3 -m pytest test_server.py -v --html=%mode%_Windows_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=%verMode% --tVersion=%version% --baseVersion=%baseVersion% --sourcePath=%sourcePath%
scp %mode%_Windows_x64_report.html root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/
scp report.json root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/json/%mode%_Windows_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=%version%&tag=%baseVersion%&type=%verMode%&role=server&build=Windows_x64"
'''
}
}
}
stage('client_Linux_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Linux_x64') }
}
}
agent{label " ubuntu16 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
mkdir -p /var/www/html/${baseVersion}/${version}/${verMode}/json
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_client.py -v --html=/var/www/html/${baseVersion}/${version}/${verMode}/${mode}_linux_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
cp report.json /var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=linux_x64"
'''
}
}
}
stage('client_Linux_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Linux_arm64') }
}
}
agent{label " worker06_arm64 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_client.py -v --html=${mode}_linux_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_linux_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=linux_arm64"
'''
}
}
}
stage ('client_Mac_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Mac_x64') }
}
}
agent{label " release_Darwin_x64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
rm -rf /opt/taos/main/TDinternal/debug/* || true
python3 -m pytest test_client.py -v --html=${mode}_Mac_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_x64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=Mac_x64"
'''
}
}
}
stage ('client_Mac_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Mac_arm64') }
}
}
agent{label " release_Darwin_arm64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
rm -rf /opt/taos/main/TDinternal/debug/* || true
python3 -m pytest test_client.py -v --html=${mode}_Mac_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=Mac_arm64"
'''
}
}
}
stage('client_Windows_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Windows_x64') }
}
}
agent{label " windows71 "}
environment{
WIN_WORK_DIR="C:\\workspace"
WIN_TDINTERNAL_ROOT_DIR="C:\\workspace\\TDinternal"
WIN_TDENGINE_ROOT_DIR="C:\\workspace\\TDinternal\\community"
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source_win()
bat '''
cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest
call getAndRunInstaller.bat %baseVersion% %version% %verMode% client
pip3 install -r pytest_require.txt
python3 -m pytest test_client.py -v --html=%mode%_Windows_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=%verMode% --tVersion=%version% --baseVersion=%baseVersion% --sourcePath=%sourcePath%
scp %mode%_Windows_x64_report.html root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/
scp report.json root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/json/%mode%_Windows_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=%version%&tag=%baseVersion%&type=%verMode%&role=client&build=Windows_x64"
'''
}
}
}
}
}
}
}

View File

@ -0,0 +1,67 @@
#!/bin/bash
BUILD_ID=dontKillMe
#******This script setup 3 nodes env for remote client installer test. Only for Linux *********
pwd=`pwd`
hostname=`hostname`
if [ -z $JENKINS_HOME ]; then
workdir="${pwd}/cluster"
echo $workdir
else
workdir="${JENKINS_HOME}/workspace/cluster"
echo $workdir
fi
name="taos"
if command -v prodb ;then
name="prodb"
fi
# Stop all taosd processes
for(( i=0; i<3; i++))
do
pid=$(ps -ef | grep ${name}d | grep -v grep | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
done
# Init 3 dnodes workdir and config file
rm -rf ${workdir}
mkdir ${workdir}
mkdir ${workdir}/output
mkdir ${workdir}/dnode1
mkdir ${workdir}/dnode1/data
mkdir ${workdir}/dnode1/log
mkdir ${workdir}/dnode1/cfg
touch ${workdir}/dnode1/cfg/${name}.cfg
echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6031\nlogDir ${workdir}/dnode1/log\ndataDir ${workdir}/dnode1/data\n" >> ${workdir}/dnode1/cfg/${name}.cfg
# Start first node
nohup ${name}d -c ${workdir}/dnode1/cfg/${name}.cfg & > /dev/null
sleep 5
${name} -P 6031 -s "CREATE DNODE \`${hostname}:6032\`;CREATE DNODE \`${hostname}:6033\`"
mkdir ${workdir}/dnode2
mkdir ${workdir}/dnode2/data
mkdir ${workdir}/dnode2/log
mkdir ${workdir}/dnode2/cfg
touch ${workdir}/dnode2/cfg/${name}.cfg
echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6032\nlogDir ${workdir}/dnode2/log\ndataDir ${workdir}/dnode2/data\n" >> ${workdir}/dnode2/cfg/${name}.cfg
nohup ${name}d -c ${workdir}/dnode2/cfg/${name}.cfg & > /dev/null
sleep 5
mkdir ${workdir}/dnode3
mkdir ${workdir}/dnode3/data
mkdir ${workdir}/dnode3/log
mkdir ${workdir}/dnode3/cfg
touch ${workdir}/dnode3/cfg/${name}.cfg
echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6033\nlogDir ${workdir}/dnode3/log\ndataDir ${workdir}/dnode3/data\n" >> ${workdir}/dnode3/cfg/${name}.cfg
nohup ${name}d -c ${workdir}/dnode3/cfg/${name}.cfg & > /dev/null
sleep 5
${name} -P 6031 -s "CREATE MNODE ON DNODE 2;CREATE MNODE ON DNODE 3;"

View File

@ -0,0 +1,137 @@
import pytest
import subprocess
import os
import sys
import platform
import getopt
import re
import time
import taos
from versionCheckAndUninstallforPytest import UninstallTaos
# python3 smokeTestClient.py -h 192.168.0.22 -P 6031 -v ${version} -u
OEM = ["ProDB"]
@pytest.fixture(scope="module")
def get_config(request):
verMode = request.config.getoption("--verMode")
taosVersion = request.config.getoption("--tVersion")
baseVersion = request.config.getoption("--baseVersion")
sourcePath = request.config.getoption("--sourcePath")
config = {
"verMode": verMode,
"taosVersion": taosVersion,
"baseVersion": baseVersion,
"sourcePath": sourcePath,
"system": platform.system(),
"arch": platform.machine(),
"serverHost": "192.168.0.22",
"serverPort": 6031,
"databaseName": re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
}
return config
@pytest.fixture(scope="module")
def setup_module(get_config):
config = get_config
# install taospy
if config["system"] == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
def get_connect(host, port, database=None):
conn = taos.connect(host=host,
user="root",
password="taosdata",
database=database,
port=port,
timezone="Asia/Shanghai") # default your host's timezone
return conn
def run_cmd(command):
print("CMD: %s" % command)
result = subprocess.run(command, capture_output=True, text=True, shell=True)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
print("Return Code:", result.returncode)
assert result.returncode == 0
return result
class TestClient:
@pytest.mark.all
def test_basic(self, get_config, setup_module):
config = get_config
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
if config["baseVersion"] in OEM and config["system"] == 'Windows':
cmd = f'{name} -s "create database {config["databaseName"]};" -h {config["serverHost"]} -P {config["serverPort"]}'
run_cmd(cmd)
cmd = f'{name} -s "CREATE STABLE {config["databaseName"]}.meters (`ts` TIMESTAMP,`current` FLOAT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(24));" -h {config["serverHost"]} -P {config["serverPort"]}'
run_cmd(cmd)
else:
cmd = f'{name}Benchmark -y -a 3 -n 100 -t 100 -d {config["databaseName"]} -h {config["serverHost"]} -P {config["serverPort"]} &'
run_cmd(cmd)
# os.system("taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d" % (databaseName, serverHost, serverPort))
time.sleep(5)
conn = get_connect(config["serverHost"], config["serverPort"], config["databaseName"])
sql = "SELECT count(*) from meters"
result: taos.TaosResult = conn.query(sql)
data = result.fetch_all()
print("SQL: %s" % sql)
print("Result: %s" % data)
if config["system"] == 'Windows' and config["baseVersion"] in OEM:
pass
elif data[0][0] != 10000:
raise f"{name}Benchmark work not as expected "
# drop database of test
cmd = f'{name} -s "drop database {config["databaseName"]};" -h {config["serverHost"]} -P {config["serverPort"]}'
result = run_cmd(cmd)
assert "Drop OK" in result.stdout
conn.close()
@pytest.mark.all
def test_version(self, get_config, setup_module):
config = get_config
conn = get_connect(config["serverHost"], config["serverPort"])
server_version = conn.server_info
print("server_version: ", server_version)
client_version = conn.client_info
print("client_version: ", client_version)
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
if config["system"] == "Windows":
taos_V_output = subprocess.getoutput(f"{name} -V | findstr version")
else:
taos_V_output = subprocess.getoutput(f"{name} -V | grep version")
assert config["taosVersion"] in taos_V_output
assert config["taosVersion"] in client_version
if config["taosVersion"] not in server_version:
print("warning: client version is not same as server version")
conn.close()
@pytest.mark.all
def test_uninstall(self, get_config, setup_module):
config = get_config
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
subprocess.getoutput("rm /usr/local/bin/taos")
subprocess.getoutput("pkill taosd")
UninstallTaos(config["taosVersion"], config["verMode"], True, name)

View File

@ -0,0 +1,238 @@
import pytest
import subprocess
import os
from versionCheckAndUninstallforPytest import UninstallTaos
import platform
import re
import time
import signal
system = platform.system()
current_path = os.path.abspath(os.path.dirname(__file__))
if system == 'Windows':
with open(r"%s\test_server_windows_case" % current_path) as f:
cases = f.read().splitlines()
else:
with open("%s/test_server_unix_case" % current_path) as f:
cases = f.read().splitlines()
OEM = ["ProDB"]
@pytest.fixture(scope="module")
def get_config(request):
verMode = request.config.getoption("--verMode")
taosVersion = request.config.getoption("--tVersion")
baseVersion = request.config.getoption("--baseVersion")
sourcePath = request.config.getoption("--sourcePath")
config = {
"verMode": verMode,
"taosVersion": taosVersion,
"baseVersion": baseVersion,
"sourcePath": sourcePath,
"system": platform.system(),
"arch": platform.machine()
}
return config
@pytest.fixture(scope="module")
def setup_module(get_config):
def run_cmd(command):
print("CMD:", command)
result = subprocess.run(command, capture_output=True, text=True, shell=True)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
print("Return Code:", result.returncode)
assert result.returncode == 0
return result
# setup before module tests
config = get_config
# bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
# t = "tar"
# if config["system"] == "Darwin":
# t = "pkg"
# cmd = "bash getAndRunInstaller.sh -m %s -f server -l false -c x64 -v %s -o %s -s %s -t %s" % (
# config["verMode"], config["taosVersion"], config["baseVersion"], config["sourcePath"], t)
# run_cmd(cmd)
if config["system"] == "Windows":
cmd = r"mkdir ..\..\debug\build\bin"
else:
cmd = "mkdir -p ../../debug/build/bin/"
subprocess.getoutput(cmd)
if config["system"] == "Linux": # add tmq_sim
cmd = "cp -rf ../../../debug/build/bin/tmq_sim ../../debug/build/bin/."
subprocess.getoutput(cmd)
if config["system"] == "Darwin":
cmd = "sudo cp -rf /usr/local/bin/taos* ../../debug/build/bin/"
elif config["system"] == "Windows":
cmd = r"xcopy C:\TDengine\taos*.exe ..\..\debug\build\bin /Y"
else:
if config["baseVersion"] in OEM:
cmd = '''sudo find /usr/bin -name 'prodb*' -exec sh -c 'for file; do cp "$file" "../../debug/build/bin/taos${file##/usr/bin/%s}"; done' sh {} +''' % (
config["baseVersion"].lower())
else:
cmd = "sudo cp /usr/bin/taos* ../../debug/build/bin/"
run_cmd(cmd)
if config["baseVersion"] in OEM: # mock OEM
cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "ln -s /usr/bin/prodb /usr/local/bin/taos"
subprocess.getoutput(cmd)
# yield
#
# name = "taos"
# if config["baseVersion"] in OEM:
# name = config["baseVersion"].lower()
# subprocess.getoutput("rm /usr/local/bin/taos")
# subprocess.getoutput("pkill taosd")
# UninstallTaos(config["taosVersion"], config["verMode"], True, name)
# use pytest fixture to exec case
@pytest.fixture(params=cases)
def run_command(request):
commands = request.param
if commands.strip().startswith("#"):
pytest.skip("This case has been marked as skipped")
d, command = commands.strip().split(",")
if system == "Windows":
cmd = r"cd %s\..\..\tests\%s && %s" % (current_path, d, command)
else:
cmd = "cd %s/../../tests/%s&&sudo %s" % (current_path, d, command)
print(cmd)
result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
return {
"command": command,
"stdout": result.stdout,
"stderr": result.stderr,
"returncode": result.returncode
}
class TestServer:
@pytest.mark.all
def test_taosd_up(self, setup_module):
# start process
if system == 'Windows':
subprocess.getoutput("taskkill /IM taosd.exe /F")
cmd = "..\\..\\debug\\build\\bin\\taosd.exe"
else:
subprocess.getoutput("pkill taosd")
cmd = "../../debug/build/bin/taosd"
process = subprocess.Popen(
[cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
# monitor output
while True:
line = process.stdout.readline()
if line:
print(line.strip())
if "succeed to write dnode" in line:
time.sleep(15)
# 发送终止信号
os.kill(process.pid, signal.SIGTERM)
break
@pytest.mark.all
def test_execute_cases(self, setup_module, run_command):
# assert the result
if run_command['returncode'] != 0:
print(f"Running command: {run_command['command']}")
print("STDOUT:", run_command['stdout'])
print("STDERR:", run_command['stderr'])
print("Return Code:", run_command['returncode'])
else:
print(f"Running command: {run_command['command']}")
if len(run_command['stdout']) > 1000:
print("STDOUT:", run_command['stdout'][:1000] + "...")
else:
print("STDOUT:", run_command['stdout'])
print("STDERR:", run_command['stderr'])
print("Return Code:", run_command['returncode'])
assert run_command[
'returncode'] == 0, f"Command '{run_command['command']}' failed with return code {run_command['returncode']}"
@pytest.mark.all
@pytest.mark.check_version
def test_check_version(self, get_config, setup_module):
config = get_config
databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
# install taospy
taospy_version = ""
system = config["system"]
version = config["taosVersion"]
verMode = config["verMode"]
if system == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
# start taosd server
if system == 'Windows':
cmd = ["C:\\TDengine\\start-all.bat"]
# elif system == 'Linux':
# cmd = "systemctl start taosd".split(' ')
else:
# cmd = "sudo launchctl start com.tdengine.taosd".split(' ')
cmd = "start-all.sh"
process_out = subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
print(cmd)
time.sleep(5)
import taos
conn = taos.connect()
check_list = {}
check_list["server_version"] = conn.server_info
check_list["client_version"] = conn.client_info
# Execute sql get version info
result: taos.TaosResult = conn.query("SELECT server_version()")
check_list["select_server"] = result.fetch_all()[0][0]
result: taos.TaosResult = conn.query("SELECT client_version()")
check_list["select_client"] = result.fetch_all()[0][0]
conn.close()
binary_files = ["taos", "taosd", "taosadapter", "taoskeeper", "taosBenchmark"]
if verMode.lower() == "enterprise":
binary_files.append("taosx")
if config["baseVersion"] in OEM:
binary_files = [i.replace("taos", config["baseVersion"].lower()) for i in binary_files]
if system == "Windows":
for i in binary_files:
check_list[i] = subprocess.getoutput("%s -V | findstr version" % i)
else:
for i in binary_files:
check_list[i] = subprocess.getoutput("%s -V | grep version | awk -F ' ' '{print $3}'" % i)
for i in check_list:
print("%s version is: %s" % (i, check_list[i]))
assert version in check_list[i]
@pytest.mark.all
def test_uninstall(self, get_config, setup_module):
config = get_config
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
subprocess.getoutput("rm /usr/local/bin/taos")
subprocess.getoutput("pkill taosd")
UninstallTaos(config["taosVersion"], config["verMode"], True, name)

View File

@ -0,0 +1,10 @@
system-test,python3 ./test.py -f 2-query/join.py
system-test,python3 ./test.py -f 1-insert/insert_column_value.py
system-test,python3 ./test.py -f 2-query/primary_ts_base_5.py
system-test,python3 ./test.py -f 2-query/case_when.py
system-test,python3 ./test.py -f 2-query/partition_limit_interval.py
system-test,python3 ./test.py -f 2-query/fill.py
army,python3 ./test.py -f query/query_basic.py -N 3
system-test,python3 ./test.py -f 7-tmq/basic5.py
system-test,python3 ./test.py -f 8-stream/stream_basic.py
system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3

View File

@ -0,0 +1,2 @@
system-test,python3 .\test.py -f 0-others\taosShell.py
system-test,python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3

View File

@ -0,0 +1,260 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys, os
import re
import platform
import getopt
import subprocess
# from this import d
import time
# input for server
opts, args = getopt.gnu_getopt(sys.argv[1:], 'v:m:u', ['version=', 'verMode='])
serverHost = ""
serverPort = 0
version = ""
uninstall = False
verMode = ""
for key, value in opts:
if key in ['--help']:
print('A collection of test cases written using Python')
print('-v test client version')
print('-u test uninstall process, will uninstall TDengine')
sys.exit(0)
if key in ['-v']:
version = value
if key in ['-u']:
uninstall = True
if key in ['-m']:
verMode = value
if not version:
print("No version specified, will not run version check.")
system = platform.system()
arch = platform.machine()
databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
# install taospy
taospy_version = ""
if system == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
# start taosd server
if system == 'Windows':
cmd = ["C:\\TDengine\\start-all.bat"]
elif system == 'Linux':
cmd = "systemctl start taosd".split(' ')
else:
cmd = "sudo launchctl start com.tdengine.taosd".split(' ')
process_out = subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
print(cmd)
time.sleep(5)
#get taosc version info
version_test_result = False
if version:
import taos
conn = taos.connect()
server_version = conn.server_info
print("server_version", server_version)
client_version = conn.client_info
print("client_version", client_version)
# Execute sql get version info
result: taos.TaosResult = conn.query("SELECT server_version()")
select_server = result.fetch_all()[0][0]
print("SELECT server_version():" + select_server)
result: taos.TaosResult = conn.query("SELECT client_version()")
select_client = result.fetch_all()[0][0]
print("SELECT client_version():" + select_client)
conn.close()
taos_V_output = ""
taosd_V_output = ""
taosadapter_V_output = ""
taoskeeper_V_output = ""
taosx_V_output = ""
taosB_V_output = ""
taosxVersion = False
if system == "Windows":
taos_V_output = subprocess.getoutput("taos -V | findstr version")
taosd_V_output = subprocess.getoutput("taosd -V | findstr version")
taosadapter_V_output = subprocess.getoutput("taosadapter -V | findstr version")
taoskeeper_V_output = subprocess.getoutput("taoskeeper -V | findstr version")
taosB_V_output = subprocess.getoutput("taosBenchmark -V | findstr version")
if verMode == "Enterprise":
taosx_V_output = subprocess.getoutput("taosx -V | findstr version")
else:
taos_V_output = subprocess.getoutput("taos -V | grep version | awk -F ' ' '{print $3}'")
taosd_V_output = subprocess.getoutput("taosd -V | grep version | awk -F ' ' '{print $3}'")
taosadapter_V_output = subprocess.getoutput("taosadapter -V | grep version | awk -F ' ' '{print $3}'")
taoskeeper_V_output = subprocess.getoutput("taoskeeper -V | grep version | awk -F ' ' '{print $3}'")
taosB_V_output = subprocess.getoutput("taosBenchmark -V | grep version | awk -F ' ' '{print $3}'")
if verMode == "Enterprise":
taosx_V_output = subprocess.getoutput("taosx -V | grep version | awk -F ' ' '{print $3}'")
print("taos -V output is: %s" % taos_V_output)
print("taosd -V output is: %s" % taosd_V_output)
print("taosadapter -V output is: %s" % taosadapter_V_output)
print("taoskeeper -V output is: %s" % taoskeeper_V_output)
print("taosBenchmark -V output is: %s" % taosB_V_output)
if verMode == "Enterprise":
print("taosx -V output is: %s" % taosx_V_output)
taosxVersion = version in taosx_V_output
else:
taosxVersion = True
if (version in client_version
and version in server_version
and version in select_server
and version in select_client
and version in taos_V_output
and version in taosd_V_output
and version in taosadapter_V_output
and version in taoskeeper_V_output
and version in taosB_V_output
and taosxVersion
):
version_test_result = True
leftFile = False
if uninstall:
print("Start to run rmtaos")
print("Platform: ", system)
# stop taosd server
if system == 'Windows':
cmd = "C:\\TDengine\\stop_all.bat"
elif system == 'Linux':
cmd = "systemctl stop taosd"
else:
cmd = "sudo launchctl stop com.tdengine.taosd"
process_out = subprocess.getoutput(cmd)
print(cmd)
time.sleep(10)
if system == "Linux":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /etc/systemd/system/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib64/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/taos")
#print(out)
if "No such file or directory" not in out:
print("Uninstall left some files in /usr/local/taos%s" % out)
leftFile = True
if not leftFile:
print("*******Test Result: uninstall test passed ************")
elif system == "Darwin":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['sudo', 'rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
#out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/")
#print(out)
#if out:
# print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out)
# leftFile = True
#if not leftFile:
# print("*******Test Result: uninstall test passed ************")
elif system == "Windows":
process = subprocess.Popen(['unins000','/silent'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
process.wait()
time.sleep(10)
out = subprocess.getoutput("ls C:\TDengine")
print(out)
if len(out.split("\n")) > 3:
leftFile = True
print("Uninstall left some files: %s" % out)
if version_test_result:
print("**********Test Result: version test passed! **********")
else:
print("!!!!!!!!!!!Test Result: version test failed! !!!!!!!!!!")
if not leftFile:
print("**********Test Result: uninstall test passed! **********")
else:
print("!!!!!!!!!!!Test Result: uninstall test failed! !!!!!!!!!!")
if version_test_result and not leftFile:
sys.exit(0)
else:
sys.exit(1)

View File

@ -0,0 +1,137 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys, os
import re
import platform
import getopt
import subprocess
# from this import d
import time
from lib import run_cmd
# input for server
def UninstallTaos(version, verMode, uninstall, name):
if not version:
raise "No version specified, will not run version check."
system = platform.system()
arch = platform.machine()
leftFile = False
if uninstall:
print("Start to run rm%s" % name)
print("Platform: ", system)
# stop taosd server
if system == 'Windows':
cmd = "C:\\TDengine\\stop_all.bat"
else:
cmd = "stop_all.sh"
process_out = subprocess.getoutput(cmd)
print(cmd)
time.sleep(5)
print("start to rm%s" % name)
if system == "Linux":
# 启动命令
process = subprocess.Popen(['rm%s' % name], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
# 发送交互输入
stdout, stderr = process.communicate(
input="y\nI confirm that I would like to delete all data, log and configuration files\n")
# 打印输出(可选)
print(stdout)
print(stderr)
# 检查目录清除情况
out = subprocess.getoutput("ls /etc/systemd/system/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/bin/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/bin/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib/lib%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib64/lib%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/include/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/%s" % name)
# print(out)
if "No such file or directory" not in out:
print("Uninstall left some files in /usr/local/%s%s" % (name, out))
leftFile = True
if not leftFile:
print("*******Test Result: uninstall test passed ************")
elif system == "Darwin":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['sudo', 'rm%s' % name],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /usr/local/bin/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/lib/lib%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/include/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
# out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/")
# print(out)
# if out:
# print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out)
# leftFile = True
# if not leftFile:
# print("*******Test Result: uninstall test passed ************")
elif system == "Windows":
process = subprocess.Popen(['unins000', '/silent'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
process.wait()
time.sleep(10)
for file in ["C:\TDengine\\taos.exe", "C:\TDengine\\unins000.exe", "C:\ProDB\prodb.exe",
"C:\ProDB\\unins000.exe"]:
if os.path.exists(file):
leftFile = True
if leftFile:
raise "uninstall %s fail, please check" % name
else:
print("**********Test Result: uninstall test passed! **********")

View File

@ -185,7 +185,14 @@ function kill_process() {
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}rm -rf ${install_main_dir}/cfg || :
${csudo}rm -rf ${install_main_dir}/bin || :
${csudo}rm -rf ${install_main_dir}/driver || :
${csudo}rm -rf ${install_main_dir}/examples || :
${csudo}rm -rf ${install_main_dir}/include || :
${csudo}rm -rf ${install_main_dir}/share || :
${csudo}rm -rf ${install_main_dir}/log || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin

View File

@ -12,9 +12,18 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
rem // stop and delete service
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close)
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
echo This might take a few moment to accomplish deleting service taosd/taoskeeper ...
)
call :check_svc taosd
call :check_svc taosadapter
call :check_svc taoskeeper
set source_dir=%2
set source_dir=%source_dir:/=\\%
@ -46,6 +55,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul
)
)
if exist %binary_dir%\\test\\cfg\\taoskeeper.toml (
if not exist %target_dir%\\cfg\\taoskeeper.toml (
copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul
)
)
copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul
copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul
copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul
@ -104,6 +118,9 @@ copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul
)
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)
@ -116,6 +133,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe (
echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m
)
goto :eof
:hasAdmin
@ -123,6 +144,7 @@ goto :eof
call :stop_delete
call :check_svc taosd
call :check_svc taosadapter
call :check_svc taoskeeper
if exist c:\\windows\\sysnative (
echo x86
@ -141,6 +163,7 @@ if exist c:\\windows\\sysnative (
rem // create services
sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND
set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\<Path\>"') do (
@ -181,6 +204,8 @@ sc stop taosd
sc delete taosd
sc stop taosadapter
sc delete taosadapter
sc stop taoskeeper
sc delete taoskeeper
exit /B 0
:check_svc

View File

@ -129,6 +129,13 @@ function kill_taosadapter() {
fi
}
function kill_taoskeeper() {
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function kill_taosd() {
pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
@ -138,7 +145,14 @@ function kill_taosd() {
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}rm -rf ${install_main_dir}/cfg || :
${csudo}rm -rf ${install_main_dir}/bin || :
${csudo}rm -rf ${install_main_dir}/driver || :
${csudo}rm -rf ${install_main_dir}/examples || :
${csudo}rm -rf ${install_main_dir}/include || :
${csudo}rm -rf ${install_main_dir}/share || :
${csudo}rm -rf ${install_main_dir}/log || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin
@ -155,6 +169,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/${serverName} || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || :
@ -169,6 +184,7 @@ function install_bin() {
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
@ -183,6 +199,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || :
@ -197,6 +214,7 @@ function install_bin() {
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || :
@ -208,6 +226,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
@ -407,6 +426,29 @@ function install_taosadapter_config() {
fi
}
function install_taoskeeper_config() {
if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then
${csudo}mkdir -p ${cfg_install_dir} || :
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || :
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \
${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || :
else
if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
fi
fi
}
function install_log() {
${csudo}rm -rf ${log_dir} || :
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
@ -526,6 +568,15 @@ function install_taosadapter_service() {
fi
}
function install_taoskeeper_service() {
if ((${service_mod} == 0)); then
[ -f ${binary_dir}/test/cfg/taoskeeper.service ] &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \
${service_config_dir}/ || :
${csudo}systemctl daemon-reload
fi
}
function install_service_on_launchctl() {
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist
@ -534,6 +585,10 @@ function install_service_on_launchctl() {
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
}
function install_service() {
@ -549,6 +604,7 @@ function install_service() {
install_service_on_launchctl
fi
}
function install_app() {
if [ "$osType" = "Darwin" ]; then
${csudo}rm -rf /Applications/TDengine.app &&
@ -573,6 +629,7 @@ function update_TDengine() {
elif ((${service_mod} == 1)); then
${csudo}service ${serverName} stop || :
else
kill_taoskeeper
kill_taosadapter
kill_taosd
fi
@ -591,9 +648,11 @@ function update_TDengine() {
install_service
install_taosadapter_service
install_taoskeeper_service
install_config
install_taosadapter_config
install_taoskeeper_config
echo
echo -e "\033[44;32;1m${productName} is updated successfully!${NC}"
@ -602,22 +661,33 @@ function update_TDengine() {
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
else
if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
else
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
fi
fi
@ -643,9 +713,11 @@ function install_TDengine() {
install_service
install_taosadapter_service
install_taoskeeper_service
install_config
install_taosadapter_config
install_taoskeeper_config
# Ask if to start the service
echo
@ -654,22 +726,33 @@ function install_TDengine() {
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
else
if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
else
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
fi
fi

View File

@ -10,6 +10,10 @@ else()
add_library(taos SHARED ${CLIENT_SRC})
endif()
if(${TD_DARWIN})
target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype)
endif()
INCLUDE_DIRECTORIES(jni)
target_include_directories(
@ -46,6 +50,11 @@ set_target_properties(
)
add_library(taos_static STATIC ${CLIENT_SRC})
if(${TD_DARWIN})
target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories(
taos_static
PUBLIC "${TD_SOURCE_DIR}/include/client"

View File

@ -108,6 +108,10 @@ typedef struct SQueryExecMetric {
int64_t execCostUs;
} SQueryExecMetric;
typedef struct {
SMonitorParas monitorParas;
int8_t enableAuditDelete;
} SAppInstServerCFG;
struct SAppInstInfo {
int64_t numOfConns;
SCorEpSet mgmtEp;
@ -121,7 +125,7 @@ struct SAppInstInfo {
void* pTransporter;
SAppHbMgr* pAppHbMgr;
char* instKey;
SMonitorParas monitorParas;
SAppInstServerCFG serverCfg;
};
typedef struct SAppInfo {
@ -297,8 +301,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4);
int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);

View File

@ -92,6 +92,26 @@ extern "C" {
} \
}
#define SML_CHECK_CODE(CMD) \
code = (CMD); \
if (TSDB_CODE_SUCCESS != code) { \
lino = __LINE__; \
goto END; \
}
#define SML_CHECK_NULL(CMD) \
if (NULL == (CMD)) { \
code = terrno; \
lino = __LINE__; \
goto END; \
}
#define RETURN \
if (code != 0){ \
uError("%s failed code:%d line:%d", __FUNCTION__ , code, lino); \
} \
return code;
typedef enum {
SCHEMA_ACTION_NULL,
SCHEMA_ACTION_CREATE_STABLE,
@ -191,7 +211,6 @@ typedef struct {
cJSON *root; // for parse json
int8_t offset[OTD_JSON_FIELDS_NUM];
SSmlLineInfo *lines; // element is SSmlLineInfo
bool parseJsonByLib;
SArray *tagJsonArray;
SArray *valueJsonArray;
@ -211,13 +230,8 @@ typedef struct {
extern int64_t smlFactorNS[];
extern int64_t smlFactorS[];
typedef int32_t (*_equal_fn_sml)(const void *, const void *);
int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle);
void smlDestroyInfo(SSmlHandle *info);
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset);
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
void smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
int32_t smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision);
@ -237,7 +251,7 @@ void smlDestroyTableInfo(void *para);
void freeSSmlKv(void* data);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseJSON(SSmlHandle *info, char *payload);
int32_t smlParseJSONExt(SSmlHandle *info, char *payload);
int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSmlSTableMeta** sMeta);
bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv);
@ -246,7 +260,8 @@ int32_t smlProcessChildTable(SSmlHandle *info, SSmlLineInfo *elements);
int32_t smlProcessSuperTable(SSmlHandle *info, SSmlLineInfo *elements);
int32_t smlJoinMeasureTag(SSmlLineInfo *elements);
void smlBuildTsKv(SSmlKv *kv, int64_t ts);
int32_t smlParseEndTelnetJson(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv);
int32_t smlParseEndTelnetJsonFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv);
int32_t smlParseEndTelnetJsonUnFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv);
int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs);
static inline bool smlDoubleToInt64OverFlow(double num) {

View File

@ -222,6 +222,7 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags);
int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx);
int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields);
int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields);
int stmtGetStbColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_STB **fields);
int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums);
int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums);
int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert);

View File

@ -166,11 +166,11 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType)));
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(
json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows)));
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen) {
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen];
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = '\0';
if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen) {
char tmp = pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen];
pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = '\0';
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = tmp;
pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = tmp;
} else {
ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr)));
}
@ -284,7 +284,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
}
}
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) {
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) {
sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
@ -294,15 +294,15 @@ static void deregisterRequest(SRequestObj *pRequest) {
}
}
if ((duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThreshold * 1000000UL ||
duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->monitorParas.tsSlowLogExceptDb)) {
if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL ||
duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
if (pTscObj->pAppInfo->monitorParas.tsSlowLogScope & reqType) {
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s",
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration,
pRequest->sqlstr);
if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) {
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration);
if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) {
tscError("failed to generate write slow log");
@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread,
rpcInit.startReadTimer = 1;
rpcInit.readTimeout = tsReadTimeout;
int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
if (TSDB_CODE_SUCCESS != code) {
tscError("invalid version string.");
return code;
@ -689,7 +689,7 @@ void doDestroyRequest(void *p) {
int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
if (TSDB_CODE_SUCCESS != code) {
tscError("failed to remove request from hash, code:%s", tstrerror(code));
tscWarn("failed to remove request from hash, code:%s", tstrerror(code));
}
schedulerFreeJob(&pRequest->body.queryJob, 0);
@ -983,6 +983,7 @@ void taos_init_imp(void) {
SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100};
ENV_ERR_RET(catalogInit(&cfg), "failed to init catalog");
ENV_ERR_RET(schedulerInit(), "failed to init scheduler");
ENV_ERR_RET(initClientId(), "failed to init clientId");
tscDebug("starting to initialize TAOS driver");

View File

@ -605,7 +605,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
return code;
}
pInst->monitorParas = pRsp.monitorParas;
pInst->serverCfg.monitorParas = pRsp.monitorParas;
pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete;
tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId,
pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope);

View File

@ -1700,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf
tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app));
tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user));
tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd));
tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer));
tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer));
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
void* pReq = taosMemoryMalloc(contLen);
@ -1770,19 +1770,15 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
}
}
int32_t doProcessMsgFromServer(void* param) {
AsyncArg* arg = (AsyncArg*)param;
SRpcMsg* pMsg = &arg->msg;
SEpSet* pEpSet = arg->pEpset;
int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) {
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
if (pMsg->info.ahandle == NULL) {
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
taosMemoryFree(arg->pEpset);
rpcFreeCont(pMsg->pCont);
taosMemoryFree(arg);
taosMemoryFree(pEpSet);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
STscObj* pTscObj = NULL;
STraceId* trace = &pMsg->info.traceId;
@ -1802,10 +1798,9 @@ int32_t doProcessMsgFromServer(void* param) {
if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) {
tscError("doProcessMsgFromServer taosReleaseRef failed");
}
taosMemoryFree(arg->pEpset);
rpcFreeCont(pMsg->pCont);
taosMemoryFree(pEpSet);
destroySendMsgInfo(pSendInfo);
taosMemoryFree(arg);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
pTscObj = pRequest->pTscObj;
@ -1844,20 +1839,24 @@ int32_t doProcessMsgFromServer(void* param) {
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pSendInfo);
taosMemoryFree(arg);
return TSDB_CODE_SUCCESS;
}
int32_t doProcessMsgFromServer(void* param) {
AsyncArg* arg = (AsyncArg*)param;
int32_t code = doProcessMsgFromServerImpl(&arg->msg, arg->pEpset);
taosMemoryFree(arg);
return code;
}
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
int32_t code = 0;
SEpSet* tEpSet = NULL;
if (pEpSet != NULL) {
tEpSet = taosMemoryCalloc(1, sizeof(SEpSet));
if (NULL == tEpSet) {
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pMsg->info.ahandle);
return;
code = terrno;
pMsg->code = terrno;
goto _exit;
}
(void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet));
}
@ -1879,21 +1878,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg));
if (NULL == arg) {
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(tEpSet);
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pMsg->info.ahandle);
return;
code = terrno;
pMsg->code = code;
goto _exit;
}
arg->msg = *pMsg;
arg->pEpset = tEpSet;
if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) {
tscError("failed to sched msg to tsc, tsc ready to quit");
rpcFreeCont(pMsg->pCont);
taosMemoryFree(arg->pEpset);
destroySendMsgInfo(pMsg->info.ahandle);
if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) {
pMsg->code = code;
taosMemoryFree(arg);
goto _exit;
}
return;
_exit:
tscError("failed to sched msg to tsc since %s", tstrerror(code));
code = doProcessMsgFromServerImpl(pMsg, tEpSet);
if (code != 0) {
tscError("failed to sched msg to tsc, tsc ready quit");
}
}
@ -2081,12 +2084,12 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
return TSDB_CODE_SUCCESS;
}
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) {
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
int32_t idx = -1;
iconv_t conv = taosAcquireConv(&idx, C2M);
if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR;
for (int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
int32_t type = pResultInfo->fields[i].type;
int32_t bytes = pResultInfo->fields[i].bytes;
@ -2100,7 +2103,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
pResultInfo->convertBuf[i] = p;
SResultColumn* pCol = &pResultInfo->pCol[i];
for (int32_t j = 0; j < numOfRows; ++j) {
for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) {
if (pCol->offset[j] != -1) {
char* pStart = pCol->offset[j] + pCol->pData;
@ -2133,10 +2136,13 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
numOfCols * (sizeof(int8_t) + sizeof(int32_t));
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) {
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t numOfRows = pResultInfo->numOfRows;
int32_t numOfCols = pResultInfo->numOfCols;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
@ -2195,10 +2201,16 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
}
pStart += colLen;
}
// Ensure the complete structure of the block, including the blankfill field,
// even though it is not used on the client side.
len += sizeof(bool);
return len;
}
static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
int32_t numOfRows = pResultInfo->numOfRows;
int32_t numOfCols = pResultInfo->numOfCols;
bool needConvert = false;
for (int32_t i = 0; i < numOfCols; ++i) {
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
@ -2215,7 +2227,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
int32_t dataLen = estimateJsonLen(pResultInfo);
if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -2338,27 +2350,36 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
pStart1 += colLen1;
}
// Ensure the complete structure of the block, including the blankfill field,
// even though it is not used on the client side.
// (void)memcpy(pStart1, pStart, sizeof(bool));
totalLen += sizeof(bool);
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
pResultInfo->pData = pResultInfo->convertJson;
return TSDB_CODE_SUCCESS;
}
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4) {
if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) {
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) {
if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) {
tscError("setResultDataPtr paras error");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (numOfRows == 0) {
if (pResultInfo->numOfRows == 0) {
return TSDB_CODE_SUCCESS;
}
if (pResultInfo->pData == NULL) {
tscError("setResultDataPtr error: pData is NULL");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
int32_t code = doPrepareResPtr(pResultInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
code = doConvertJson(pResultInfo, numOfCols, numOfRows);
code = doConvertJson(pResultInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -2378,9 +2399,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
int32_t cols = *(int32_t*)p;
p += sizeof(int32_t);
if (rows != numOfRows || cols != numOfCols) {
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols,
numOfCols);
if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) {
tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols,
pResultInfo->numOfCols);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -2391,7 +2412,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
p += sizeof(uint64_t);
// check fields
for (int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
int8_t type = *(int8_t*)p;
p += sizeof(int8_t);
@ -2400,10 +2421,14 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
}
int32_t* colLength = (int32_t*)p;
p += sizeof(int32_t) * numOfCols;
p += sizeof(int32_t) * pResultInfo->numOfCols;
char* pStart = p;
for (int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
if ((pStart - pResultInfo->pData) >= dataLen) {
tscError("setResultDataPtr invalid offset over dataLen %d", dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (blockVersion == BLOCK_VERSION_1) {
colLength[i] = htonl(colLength[i]);
}
@ -2411,10 +2436,13 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (IS_INVALID_TYPE(pResultInfo->fields[i].type)) {
tscError("invalid type %d", pResultInfo->fields[i].type);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
pResultInfo->pCol[i].offset = (int32_t*)pStart;
pStart += numOfRows * sizeof(int32_t);
pStart += pResultInfo->numOfRows * sizeof(int32_t);
} else {
pResultInfo->pCol[i].nullbitmap = pStart;
pStart += BitmapLen(pResultInfo->numOfRows);
@ -2427,11 +2455,17 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
pStart += colLength[i];
}
p = pStart;
// bool blankFill = *(bool*)p;
p += sizeof(bool);
int32_t offset = p - pResultInfo->pData;
if (offset > dataLen) {
tscError("invalid offset %d, dataLen %d", offset, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
if (convertUcs4) {
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
code = doConvertUCS4(pResultInfo, colLength);
}
return code;
@ -2544,7 +2578,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR
pResultInfo->totalRows += pResultInfo->numOfRows;
int32_t code =
setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4);
setResultDataPtr(pResultInfo, convertUcs4);
return code;
}
@ -2573,7 +2607,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
rpcInit.connLimitNum = connLimitNum;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
rpcInit.readTimeout = tsReadTimeout;
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) {
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) {
tscError("faild to convert taos version from str to int, errcode:%s", terrstr());
goto _OVER;
}
@ -2839,6 +2873,7 @@ void syncQueryFn(void* param, void* res, int32_t code) {
if (pParam->pRequest) {
pParam->pRequest->code = code;
clientOperateReport(pParam->pRequest);
}
if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) {

View File

@ -388,7 +388,6 @@ void taos_free_result(TAOS_RES *res) {
tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp);
}
taosMemoryFree(pRsp);
}
void taos_kill_query(TAOS *taos) {
@ -670,7 +669,7 @@ const char *taos_data_type(int type) {
}
}
const char *taos_get_client_info() { return version; }
const char *taos_get_client_info() { return td_version; }
// return int32_t
int taos_affected_rows(TAOS_RES *res) {
@ -1804,7 +1803,7 @@ int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
if (bind->num > 1) {
tscError("invalid bind number %d for %s", bind->num, __FUNCTION__);
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -1820,7 +1819,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
if (bind->num <= 0 || bind->num > INT16_MAX) {
tscError("invalid bind num %d", bind->num);
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -1832,7 +1831,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
}
if (0 == insert && bind->num > 1) {
tscError("only one row data allowed for query");
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -1860,7 +1859,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, in
}
if (0 == insert && bind->num > 1) {
tscError("only one row data allowed for query");
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -2020,7 +2019,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col
if (bind->num <= 0 || bind->num > INT16_MAX) {
tscError("invalid bind num %d", bind->num);
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -2028,7 +2027,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col
(void)stmtIsInsert2(stmt, &insert);
if (0 == insert && bind->num > 1) {
tscError("only one row data allowed for query");
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -2082,7 +2081,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) {
}
int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) {
if (stmt == NULL || NULL == count) {
if (stmt == NULL || count == NULL) {
tscError("NULL parameter for %s", __FUNCTION__);
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
@ -2103,12 +2102,28 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count,
}
}
int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields) {
if (stmt == NULL || count == NULL) {
tscError("NULL parameter for %s", __FUNCTION__);
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
return stmtGetStbColFields2(stmt, count, fields);
}
void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) {
(void)stmt;
if (!fields) return;
taosMemoryFree(fields);
}
DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields) {
(void)stmt;
if (!fields) return;
taosMemoryFree(fields);
}
TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) {
if (stmt == NULL) {
tscError("NULL parameter for %s", __FUNCTION__);
@ -2144,4 +2159,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) {
return 0;
}
char *getBuildInfo() { return buildinfo; }
char *getBuildInfo() { return td_buildinfo; }

View File

@ -2,8 +2,6 @@
#include "cJSON.h"
#include "clientInt.h"
#include "clientLog.h"
#include "os.h"
#include "tglobal.h"
#include "tmisce.h"
#include "tqueue.h"
#include "ttime.h"
@ -19,6 +17,7 @@ STaosQueue* monitorQueue;
SHashObj* monitorSlowLogHash;
char tmpSlowLogPath[PATH_MAX] = {0};
TdThread monitorThread;
extern bool tsEnableAuditDelete;
static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) {
int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir);
@ -216,7 +215,7 @@ static void reportSendProcess(void* param, void* tmrId) {
SEpSet ep = getEpSet_s(&pInst->mgmtEp);
generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep);
bool reset =
taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
taosTmrReset(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId);
tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset);
taosRUnLockLatch(&monitorLock);
}
@ -289,7 +288,7 @@ void monitorCreateClient(int64_t clusterId) {
goto fail;
}
pMonitor->timer =
taosTmrStart(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
taosTmrStart(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer);
if (pMonitor->timer == NULL) {
tscError("failed to start timer");
goto fail;
@ -660,7 +659,7 @@ static void monitorSendAllSlowLog() {
taosHashCancelIterate(monitorSlowLogHash, pIter);
return;
}
if (t - pClient->lastCheckTime > pInst->monitorParas.tsMonitorInterval * 1000) {
if (t - pClient->lastCheckTime > pInst->serverCfg.monitorParas.tsMonitorInterval * 1000) {
pClient->lastCheckTime = t;
} else {
continue;
@ -686,7 +685,7 @@ static void monitorSendAllSlowLog() {
static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) {
SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId);
if (pInst == NULL || !pInst->monitorParas.tsEnableMonitor) {
if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) {
tscInfo("[monitor] monitor is disabled, skip send slow log");
return;
}
@ -933,3 +932,100 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) {
}
return 0;
}
int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
tscDebug("[del report]delete reportCB code:%d", code);
return 0;
}
int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) {
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
tscError("[del report]failed to allocate memory for sendInfo");
return terrno;
}
sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL};
sendInfo->requestId = requestId;
sendInfo->requestObjRefId = 0;
sendInfo->param = NULL;
sendInfo->fp = reportCB;
sendInfo->msgType = TDMT_MND_AUDIT;
SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
if (code != 0) {
tscError("[del report]failed to send msg to server, code:%d", code);
taosMemoryFree(sendInfo);
return code;
}
return TSDB_CODE_SUCCESS;
}
static void reportDeleteSql(SRequestObj* pRequest) {
SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot;
STscObj* pTscObj = pRequest->pTscObj;
if (pTscObj == NULL || pTscObj->pAppInfo == NULL) {
tscError("[del report]invalid tsc obj");
return;
}
if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) {
tscDebug("[del report]audit delete is disabled");
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
tscDebug("[del report]delete request result code:%d", pRequest->code);
return;
}
if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) {
tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable));
return;
}
SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable;
SAuditReq req;
req.pSql = pRequest->sqlstr;
req.sqlLen = pRequest->sqlLen;
TAOS_UNUSED(tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName));
TAOS_UNUSED(tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName));
TAOS_UNUSED(tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"));
int32_t tlen = tSerializeSAuditReq(NULL, 0, &req);
void* pReq = taosMemoryCalloc(1, tlen);
if (pReq == NULL) {
tscError("[del report]failed to allocate memory for req");
return;
}
if (tSerializeSAuditReq(pReq, tlen, &req) < 0) {
tscError("[del report]failed to serialize req");
taosMemoryFree(pReq);
return;
}
int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId);
if (code != 0) {
tscError("[del report]failed to send audit info, code:%d", code);
taosMemoryFree(pReq);
return;
}
tscDebug("[del report]delete data, sql:%s", req.pSql);
}
void clientOperateReport(SRequestObj* pRequest) {
if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) {
tscError("[del report]invalid request");
return;
}
if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) {
reportDeleteSql(pRequest);
}
}

View File

@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
goto End;
}
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer);
goto End;
}
@ -135,7 +135,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
// update the appInstInfo
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
pTscObj->pAppInfo->monitorParas = connectRsp.monitorParas;
pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas;
pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete;
tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d",
connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope);
lastClusterId = connectRsp.clusterId;
@ -588,7 +589,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
return code;
}
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
code = terrno;
@ -603,7 +605,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, SHOW_VARIABLES_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, SHOW_VARIABLES_RESULT_COLS);
if(len < 0) {
uError("buildShowVariablesRsp error, len:%d", len);
code = terrno;
@ -741,7 +743,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
return code;
}
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN;
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
code = terrno;
@ -757,7 +760,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr
(*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows);
(*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, COMPACT_DB_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, COMPACT_DB_RESULT_COLS);
if(len < 0) {
uError("buildRetriveTableRspForCompactDb error, len:%d", len);
code = terrno;

View File

@ -53,9 +53,7 @@
#define TMQ_META_VERSION "1.0"
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t,
SColCmprWrapper* pColCmprRow, cJSON** pJson) {
int32_t code = TSDB_CODE_SUCCESS;
@ -890,9 +888,6 @@ end:
}
static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVCreateStbReq req = {0};
SDecoder coder;
SMCreateStbReq pReq = {0};
@ -1003,9 +998,6 @@ end:
}
static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVDropStbReq req = {0};
SDecoder coder = {0};
SMDropStbReq pReq = {0};
@ -1115,9 +1107,6 @@ static void destroyCreateTbReqBatch(void* data) {
}
static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVCreateTbBatchReq req = {0};
SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS;
@ -1304,9 +1293,6 @@ static void destroyDropTbReqBatch(void* data) {
}
static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVDropTbBatchReq req = {0};
SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS;
@ -1419,9 +1405,6 @@ end:
}
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SDeleteRes req = {0};
SDecoder coder = {0};
char sql[256] = {0};
@ -1457,9 +1440,6 @@ end:
}
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
if (taos == NULL || meta == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SVAlterTbReq req = {0};
SDecoder dcoder = {0};
int32_t code = TSDB_CODE_SUCCESS;
@ -1590,7 +1570,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
SHashObj* pVgHash = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest));
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
rows, pData, tbname, fields, numFields);
@ -1622,7 +1602,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
RAW_NULL_CHECK(pVgHash);
RAW_RETURN_CHECK(
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData)));
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0));
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0, false));
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
@ -1651,7 +1631,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
SHashObj* pVgHash = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest));
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid));
uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname);
@ -1682,7 +1662,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
RAW_NULL_CHECK(pVgHash);
RAW_RETURN_CHECK(
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData)));
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0));
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0, false));
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
@ -1708,116 +1688,6 @@ static void* getRawDataFromRes(void* pRetrieve) {
return rawData;
}
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if (taos == NULL || data == NULL) {
SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data);
return TSDB_CODE_INVALID_PARA;
}
int32_t code = TSDB_CODE_SUCCESS;
SHashObj* pVgHash = NULL;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
STableMeta* pTableMeta = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest));
uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
pRequest->syncQuery = true;
rspObj.resIter = -1;
rspObj.resType = RES_TYPE__TMQ;
int8_t dataVersion = *(int8_t*)data;
if (dataVersion >= MQ_DATA_RSP_VERSION) {
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
dataLen -= sizeof(int8_t) + sizeof(int32_t);
}
tDecoderInit(&decoder, data, dataLen);
code = tDecodeMqDataRsp(&decoder, &rspObj.dataRsp);
if (code != 0) {
SET_ERROR_MSG("decode mq data rsp failed");
code = TSDB_CODE_INVALID_MSG;
goto end;
}
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
struct SCatalog* pCatalog = NULL;
RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog));
SRequestConnInfo conn = {0};
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
conn.requestId = pRequest->requestId;
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pVgHash);
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
RAW_NULL_CHECK(pRetrieve);
if (!rspObj.dataRsp.withSchema) {
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
RAW_NULL_CHECK(tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta));
SVgroupInfo vg = {0};
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg));
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
if (hData == NULL) {
RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD));
RAW_NULL_CHECK(fields);
for (int i = 0; i < pSW->nCols; i++) {
fields[i].type = pSW->pSchema[i].type;
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
void* rawData = getRawDataFromRes(pRetrieve);
char err[ERR_MSG_LEN] = {0};
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true, err, ERR_MSG_LEN);
taosMemoryFree(fields);
taosMemoryFreeClear(pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", tbName, err);
goto end;
}
}
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code));
tDeleteMqDataRsp(&rspObj.dataRsp);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
taosMemoryFreeClear(pTableMeta);
return code;
}
static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) {
// find schema data info
int32_t code = 0;
@ -1855,152 +1725,368 @@ end:
return code;
}
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if (taos == NULL || data == NULL) {
SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data);
return TSDB_CODE_INVALID_PARA;
typedef enum {
WRITE_RAW_INIT_START = 0,
WRITE_RAW_INIT_OK,
WRITE_RAW_INIT_FAIL,
} WRITE_RAW_INIT_STATUS;
static SHashObj* writeRawCache = NULL;
static int8_t initFlag = 0;
static int8_t initedFlag = WRITE_RAW_INIT_START;
typedef struct {
SHashObj* pVgHash;
SHashObj* pNameHash;
SHashObj* pMetaHash;
} rawCacheInfo;
typedef struct {
SVgroupInfo vgInfo;
int64_t uid;
int64_t suid;
} tbInfo;
static void tmqFreeMeta(void* data) {
STableMeta* pTableMeta = *(STableMeta**)data;
taosMemoryFree(pTableMeta);
}
int32_t code = TSDB_CODE_SUCCESS;
SHashObj* pVgHash = NULL;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
STableMeta* pTableMeta = NULL;
SHashObj* pCreateTbHash = NULL;
SRequestObj* pRequest = NULL;
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest));
static void freeRawCache(void* data) {
rawCacheInfo* pRawCache = (rawCacheInfo*)data;
taosHashCleanup(pRawCache->pMetaHash);
taosHashCleanup(pRawCache->pNameHash);
taosHashCleanup(pRawCache->pVgHash);
}
uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
pRequest->syncQuery = true;
rspObj.resIter = -1;
rspObj.resType = RES_TYPE__TMQ_METADATA;
static int32_t initRawCacheHash() {
if (writeRawCache == NULL) {
writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
if (writeRawCache == NULL) {
return terrno;
}
taosHashSetFreeFp(writeRawCache, freeRawCache);
}
return 0;
}
static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) {
char* p = (char*)rawData;
// | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each
// column length |
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(int32_t);
p += sizeof(uint64_t);
int8_t* fields = p;
if (pSW->nCols != pTableMeta->tableInfo.numOfColumns) {
return true;
}
for (int i = 0; i < pSW->nCols; i++) {
int j = 0;
for (; j < pTableMeta->tableInfo.numOfColumns; j++) {
SSchema* pColSchema = &pTableMeta->schema[j];
char* fieldName = pSW->pSchema[i].name;
if (strcmp(pColSchema->name, fieldName) == 0) {
if (*fields != pColSchema->type || *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) {
return true;
}
break;
}
}
fields += sizeof(int8_t) + sizeof(int32_t);
if (j == pTableMeta->tableInfo.numOfColumns) return true;
}
return false;
}
static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) {
int32_t code = 0;
void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES);
if (cacheInfo == NULL) {
*pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(*pVgHash);
*pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
RAW_NULL_CHECK(*pNameHash);
*pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(*pMetaHash);
taosHashSetFreeFp(*pMetaHash, tmqFreeMeta);
rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash};
RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo)));
} else {
rawCacheInfo* info = (rawCacheInfo*)cacheInfo;
*pVgHash = info->pVgHash;
*pNameHash = info->pNameHash;
*pMetaHash = info->pMetaHash;
}
return 0;
end:
taosHashCleanup(*pMetaHash);
taosHashCleanup(*pNameHash);
taosHashCleanup(*pVgHash);
return code;
}
static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) {
int32_t code = 0;
RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, pRequest, 0));
(*pRequest)->syncQuery = true;
if (!(*pRequest)->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
RAW_RETURN_CHECK(catalogGetHandle((*pRequest)->pTscObj->pAppInfo->clusterId, pCatalog));
conn->pTrans = (*pRequest)->pTscObj->pAppInfo->pTransporter;
conn->requestId = (*pRequest)->requestId;
conn->requestObjRefId = (*pRequest)->self;
conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp);
end:
return code;
}
typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp);
static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func,
SMqRspObj* rspObj) {
int8_t dataVersion = *(int8_t*)data;
if (dataVersion >= MQ_DATA_RSP_VERSION) {
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
dataLen -= sizeof(int8_t) + sizeof(int32_t);
}
tDecoderInit(&decoder, data, dataLen);
code = tDecodeSTaosxRsp(&decoder, &rspObj.dataRsp);
rspObj->resIter = -1;
tDecoderInit(decoder, data, dataLen);
int32_t code = func(decoder, &rspObj->dataRsp);
if (code != 0) {
SET_ERROR_MSG("decode mq taosx data rsp failed");
code = TSDB_CODE_INVALID_MSG;
}
return code;
}
static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash,
SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName,
STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) {
int32_t code = 0;
STableMeta* pTableMeta = NULL;
tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname));
if (tmpInfo == NULL || retry > 0) {
tbInfo info = {0};
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, conn, pName, &info.vgInfo));
if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta
tstrncpy(pName->tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN);
}
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta));
info.uid = pTableMeta->uid;
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
info.suid = pTableMeta->suid;
} else {
info.suid = pTableMeta->uid;
}
code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES);
if (code != 0) {
taosMemoryFree(pTableMeta);
goto end;
}
if (pCreateReqDst) {
pTableMeta->vgId = info.vgInfo.vgId;
pTableMeta->uid = pCreateReqDst->uid;
pCreateReqDst->ctb.suid = pTableMeta->suid;
}
RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo)));
tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname));
RAW_RETURN_CHECK(
taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo)));
}
if (pTableMeta == NULL || retry > 0) {
STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES);
if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) {
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta));
code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES);
if (code != 0) {
taosMemoryFree(pTableMeta);
goto end;
}
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
} else {
pTableMeta = *pTableMetaTmp;
pTableMeta->uid = tmpInfo->uid;
pTableMeta->vgId = tmpInfo->vgInfo.vgId;
}
}
*pMeta = pTableMeta;
end:
return code;
}
struct SCatalog* pCatalog = NULL;
RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog));
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
int32_t code = TSDB_CODE_SUCCESS;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
SRequestObj* pRequest = NULL;
SCatalog* pCatalog = NULL;
SRequestConnInfo conn = {0};
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
conn.requestId = pRequest->requestId;
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn));
uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj));
SHashObj* pVgHash = NULL;
SHashObj* pNameHash = NULL;
SHashObj* pMetaHash = NULL;
RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos));
int retry = 0;
while (1) {
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pVgHash);
pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pCreateTbHash);
RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash));
uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
RAW_NULL_CHECK(pRetrieve);
if (!rspObj.dataRsp.withSchema) {
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
if (!tbName) {
SET_ERROR_MSG("block tbname is null");
code = terrno;
RAW_NULL_CHECK(tbName);
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
RAW_NULL_CHECK(pRetrieve);
void* rawData = getRawDataFromRes(pRetrieve);
RAW_NULL_CHECK(rawData);
uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
STableMeta* pTableMeta = NULL;
RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, &pName, &pTableMeta, pSW,
rawData, retry));
char err[ERR_MSG_LEN] = {0};
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", pName.tname, err);
goto end;
}
}
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) {
uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code));
qDestroyQuery(pQuery);
pQuery = NULL;
rspObj.resIter = -1;
continue;
}
break;
}
end:
uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code));
tDeleteMqDataRsp(&rspObj.dataRsp);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
return code;
}
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
int32_t code = TSDB_CODE_SUCCESS;
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
SHashObj* pCreateTbHash = NULL;
SRequestObj* pRequest = NULL;
SCatalog* pCatalog = NULL;
SRequestConnInfo conn = {0};
RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn));
uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj));
pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
RAW_NULL_CHECK(pCreateTbHash);
RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash));
SHashObj* pVgHash = NULL;
SHashObj* pNameHash = NULL;
SHashObj* pMetaHash = NULL;
RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos));
int retry = 0;
while (1) {
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
if (!rspObj.dataRsp.withSchema) {
goto end;
}
uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName);
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
RAW_NULL_CHECK(tbName);
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
RAW_NULL_CHECK(pRetrieve);
void* rawData = getRawDataFromRes(pRetrieve);
RAW_NULL_CHECK(rawData);
uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName);
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
// find schema data info
SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName));
SVgroupInfo vg = {0};
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg));
if (pCreateReqDst) { // change stable name to get meta
tstrncpy(pName.tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN);
}
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta));
if (pCreateReqDst) {
pTableMeta->vgId = vg.vgId;
pTableMeta->uid = pCreateReqDst->uid;
pCreateReqDst->ctb.suid = pTableMeta->suid;
}
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
if (hData == NULL) {
RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
RAW_NULL_CHECK(pSW);
TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD));
if (fields == NULL) {
SET_ERROR_MSG("calloc fields failed");
code = terrno;
goto end;
}
for (int i = 0; i < pSW->nCols; i++) {
fields[i].type = pSW->pSchema[i].type;
fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
}
void* rawData = getRawDataFromRes(pRetrieve);
SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname));
STableMeta* pTableMeta = NULL;
RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, &pName,
&pTableMeta, pSW, rawData, retry));
char err[ERR_MSG_LEN] = {0};
SVCreateTbReq* pCreateReqTmp = NULL;
if (pCreateReqDst) {
RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp));
}
code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN);
if (pCreateReqTmp != NULL) {
tdDestroySVCreateTbReq(pCreateReqTmp);
taosMemoryFree(pCreateReqTmp);
}
taosMemoryFree(fields);
taosMemoryFreeClear(pTableMeta);
code =
rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true);
if (code != TSDB_CODE_SUCCESS) {
SET_ERROR_MSG("table:%s, err:%s", tbName, err);
SET_ERROR_MSG("table:%s, err:%s", pName.tname, err);
goto end;
}
}
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) {
uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code));
qDestroyQuery(pQuery);
pQuery = NULL;
rspObj.resIter = -1;
continue;
}
break;
}
end:
uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code));
tDeleteSTaosxRsp(&rspObj.dataRsp);
void* pIter = taosHashIterate(pCreateTbHash, NULL);
while (pIter) {
tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE);
pIter = taosHashIterate(pCreateTbHash, pIter);
}
taosHashCleanup(pCreateTbHash);
tDeleteSTaosxRsp(&rspObj.dataRsp);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
taosMemoryFreeClear(pTableMeta);
return code;
}
@ -2199,7 +2285,31 @@ void tmq_free_raw(tmq_raw_data raw) {
(void)memset(terrMsg, 0, ERR_MSG_LEN);
}
static int32_t writeRawInit() {
while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) {
int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1);
if (old == 0) {
int32_t code = initRawCacheHash();
if (code != 0) {
uError("tmq writeRawImpl init error:%d", code);
atomic_store_8(&initedFlag, WRITE_RAW_INIT_FAIL);
return code;
}
atomic_store_8(&initedFlag, WRITE_RAW_INIT_OK);
}
}
if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL) {
return TSDB_CODE_INTERNAL_ERROR;
}
return 0;
}
static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) {
if (writeRawInit() != 0) {
return TSDB_CODE_INTERNAL_ERROR;
}
if (type == TDMT_VND_CREATE_STB) {
return taosCreateStb(taos, buf, len);
} else if (type == TDMT_VND_ALTER_STB) {
@ -2214,10 +2324,10 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type)
return taosDropTable(taos, buf, len);
} else if (type == TDMT_VND_DELETE) {
return taosDeleteData(taos, buf, len);
} else if (type == RES_TYPE__TMQ) {
return tmqWriteRawDataImpl(taos, buf, len);
} else if (type == RES_TYPE__TMQ_METADATA) {
return tmqWriteRawMetaDataImpl(taos, buf, len);
} else if (type == RES_TYPE__TMQ) {
return tmqWriteRawDataImpl(taos, buf, len);
} else if (type == RES_TYPE__TMQ_BATCH_META) {
return tmqWriteBatchMetaDataImpl(taos, buf, len);
}
@ -2225,7 +2335,8 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type)
}
int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
if (!taos) {
if (taos == NULL || raw.raw == NULL || raw.raw_len <= 0) {
SET_ERROR_MSG("taos:%p or data:%p is NULL or raw_len <= 0", taos, raw.raw);
return TSDB_CODE_INVALID_PARA;
}

File diff suppressed because it is too large Load Diff

View File

@ -21,259 +21,10 @@
#define OTD_JSON_SUB_FIELDS_NUM 2
#define JUMP_JSON_SPACE(start) \
while (*(start)) { \
if (unlikely(*(start) > 32)) \
break; \
else \
(start)++; \
}
static int32_t smlJsonGetObj(char **payload) {
int leftBracketCnt = 0;
bool isInQuote = false;
while (**payload) {
if (**payload == '"' && *((*payload) - 1) != '\\') {
isInQuote = !isInQuote;
} else if (!isInQuote && unlikely(**payload == '{')) {
leftBracketCnt++;
(*payload)++;
continue;
} else if (!isInQuote && unlikely(**payload == '}')) {
leftBracketCnt--;
(*payload)++;
if (leftBracketCnt == 0) {
return 0;
} else if (leftBracketCnt < 0) {
return -1;
}
continue;
}
(*payload)++;
}
return -1;
}
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) {
int index = 0;
while (*(*start)) {
if ((*start)[0] != '"') {
(*start)++;
continue;
}
if (unlikely(index >= OTD_JSON_FIELDS_NUM)) {
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start);
return TSDB_CODE_TSC_INVALID_JSON;
}
char *sTmp = *start;
if ((*start)[1] == 'm' && (*start)[2] == 'e' && (*start)[3] == 't' && (*start)[4] == 'r' && (*start)[5] == 'i' &&
(*start)[6] == 'c' && (*start)[7] == '"') {
(*start) += 8;
bool isInQuote = false;
while (*(*start)) {
if (unlikely(!isInQuote && *(*start) == '"')) {
(*start)++;
offset[index++] = *start - sTmp;
element->measure = (*start);
isInQuote = true;
continue;
}
if (unlikely(isInQuote && *(*start) == '"')) {
element->measureLen = (*start) - element->measure;
(*start)++;
break;
}
(*start)++;
}
} else if ((*start)[1] == 't' && (*start)[2] == 'i' && (*start)[3] == 'm' && (*start)[4] == 'e' &&
(*start)[5] == 's' && (*start)[6] == 't' && (*start)[7] == 'a' && (*start)[8] == 'm' &&
(*start)[9] == 'p' && (*start)[10] == '"') {
(*start) += 11;
bool hasColon = false;
while (*(*start)) {
if (unlikely(!hasColon && *(*start) == ':')) {
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->timestamp = (*start);
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->timestampLen = tmp - (*start);
*start = tmp;
}
break;
}
hasColon = true;
continue;
}
if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) {
element->timestampLen = (*start) - element->timestamp;
break;
}
(*start)++;
}
} else if ((*start)[1] == 'v' && (*start)[2] == 'a' && (*start)[3] == 'l' && (*start)[4] == 'u' &&
(*start)[5] == 'e' && (*start)[6] == '"') {
(*start) += 7;
bool hasColon = false;
while (*(*start)) {
if (unlikely(!hasColon && *(*start) == ':')) {
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->cols = (*start);
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->colsLen = tmp - (*start);
*start = tmp;
}
break;
}
hasColon = true;
continue;
}
if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) {
element->colsLen = (*start) - element->cols;
break;
}
(*start)++;
}
} else if ((*start)[1] == 't' && (*start)[2] == 'a' && (*start)[3] == 'g' && (*start)[4] == 's' &&
(*start)[5] == '"') {
(*start) += 6;
while (*(*start)) {
if (unlikely(*(*start) == ':')) {
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->tags = (*start);
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->tagsLen = tmp - (*start);
*start = tmp;
}
break;
}
(*start)++;
}
}
if (*(*start) == '\0') {
break;
}
if (*(*start) == '}') {
(*start)++;
break;
}
(*start)++;
}
if (unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL ||
element->measure == NULL || element->timestamp == NULL) {
uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM);
return TSDB_CODE_TSC_INVALID_JSON;
}
return 0;
}
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) {
int index = 0;
while (*(*start)) {
if ((*start)[0] != '"') {
(*start)++;
continue;
}
if (unlikely(index >= OTD_JSON_FIELDS_NUM)) {
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start);
return TSDB_CODE_TSC_INVALID_JSON;
}
if ((*start)[1] == 'm') {
(*start) += offset[index++];
element->measure = *start;
while (*(*start)) {
if (unlikely(*(*start) == '"')) {
element->measureLen = (*start) - element->measure;
(*start)++;
break;
}
(*start)++;
}
} else if ((*start)[1] == 't' && (*start)[2] == 'i') {
(*start) += offset[index++];
element->timestamp = *start;
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->timestampLen = tmp - (*start);
*start = tmp;
}
} else {
while (*(*start)) {
if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) {
element->timestampLen = (*start) - element->timestamp;
break;
}
(*start)++;
}
}
} else if ((*start)[1] == 'v') {
(*start) += offset[index++];
element->cols = *start;
if (*(*start) == '{') {
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->colsLen = tmp - (*start);
*start = tmp;
}
} else {
while (*(*start)) {
if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) {
element->colsLen = (*start) - element->cols;
break;
}
(*start)++;
}
}
} else if ((*start)[1] == 't' && (*start)[2] == 'a') {
(*start) += offset[index++];
element->tags = (*start);
char *tmp = *start;
int32_t code = smlJsonGetObj(&tmp);
if (code == 0) {
element->tagsLen = tmp - (*start);
*start = tmp;
}
}
if (*(*start) == '}') {
(*start)++;
break;
}
(*start)++;
}
if (unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) {
uError("elements != %d", OTD_JSON_FIELDS_NUM);
return TSDB_CODE_TSC_INVALID_JSON;
}
return TSDB_CODE_SUCCESS;
}
static inline int32_t smlParseMetricFromJSON(SSmlHandle *info, cJSON *metric, SSmlLineInfo *elements) {
elements->measureLen = strlen(metric->valuestring);
if (IS_INVALID_TABLE_LEN(elements->measureLen)) {
uError("OTD:0x%" PRIx64 " Metric length is 0 or large than 192", info->id);
uError("SML:0x%" PRIx64 " Metric length is 0 or large than 192", info->id);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
@ -293,7 +44,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) {
child = child->next;
}
if (*marks[i] == NULL) {
uError("smlGetJsonElements error, not find mark:%d:%s", i, jsonName[i]);
uError("SML %s error, not find mark:%d:%s", __FUNCTION__, i, jsonName[i]);
return TSDB_CODE_TSC_INVALID_JSON;
}
}
@ -302,7 +53,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) {
static int32_t smlConvertJSONBool(SSmlKv *pVal, char *typeStr, cJSON *value) {
if (strcasecmp(typeStr, "bool") != 0) {
uError("OTD:invalid type(%s) for JSON Bool", typeStr);
uError("SML:invalid type(%s) for JSON Bool", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
}
pVal->type = TSDB_DATA_TYPE_BOOL;
@ -316,7 +67,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// tinyint
if (strcasecmp(typeStr, "i8") == 0 || strcasecmp(typeStr, "tinyint") == 0) {
if (!IS_VALID_TINYINT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(tinyint)", value->valuedouble);
uError("SML:JSON value(%f) cannot fit in type(tinyint)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
}
pVal->type = TSDB_DATA_TYPE_TINYINT;
@ -327,7 +78,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// smallint
if (strcasecmp(typeStr, "i16") == 0 || strcasecmp(typeStr, "smallint") == 0) {
if (!IS_VALID_SMALLINT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(smallint)", value->valuedouble);
uError("SML:JSON value(%f) cannot fit in type(smallint)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
}
pVal->type = TSDB_DATA_TYPE_SMALLINT;
@ -338,7 +89,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// int
if (strcasecmp(typeStr, "i32") == 0 || strcasecmp(typeStr, "int") == 0) {
if (!IS_VALID_INT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(int)", value->valuedouble);
uError("SML:JSON value(%f) cannot fit in type(int)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
}
pVal->type = TSDB_DATA_TYPE_INT;
@ -362,7 +113,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
// float
if (strcasecmp(typeStr, "f32") == 0 || strcasecmp(typeStr, "float") == 0) {
if (!IS_VALID_FLOAT(value->valuedouble)) {
uError("OTD:JSON value(%f) cannot fit in type(float)", value->valuedouble);
uError("SML:JSON value(%f) cannot fit in type(float)", value->valuedouble);
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
}
pVal->type = TSDB_DATA_TYPE_FLOAT;
@ -379,7 +130,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
}
// if reach here means type is unsupported
uError("OTD:invalid type(%s) for JSON Number", typeStr);
uError("SML:invalid type(%s) for JSON Number", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
}
@ -391,7 +142,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
} else if (strcasecmp(typeStr, "nchar") == 0) {
pVal->type = TSDB_DATA_TYPE_NCHAR;
} else {
uError("OTD:invalid type(%s) for JSON String", typeStr);
uError("SML:invalid type(%s) for JSON String", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
}
pVal->length = strlen(value->valuestring);
@ -474,7 +225,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) {
case cJSON_String: {
int32_t ret = smlConvertJSONString(kv, "binary", root);
if (ret != TSDB_CODE_SUCCESS) {
uError("OTD:Failed to parse binary value from JSON Obj");
uError("SML:Failed to parse binary value from JSON Obj");
return ret;
}
break;
@ -482,7 +233,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) {
case cJSON_Object: {
int32_t ret = smlParseValueFromJSONObj(root, kv);
if (ret != TSDB_CODE_SUCCESS) {
uError("OTD:Failed to parse value from JSON Obj");
uError("SML:Failed to parse value from JSON Obj");
return ret;
}
break;
@ -511,7 +262,7 @@ static int32_t smlProcessTagJson(SSmlHandle *info, cJSON *tags){
}
size_t keyLen = strlen(tag->string);
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
uError("OTD:Tag key length is 0 or too large than 64");
uError("SML:Tag key length is 0 or too large than 64");
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
@ -539,28 +290,24 @@ static int32_t smlProcessTagJson(SSmlHandle *info, cJSON *tags){
}
static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo *elements) {
int32_t ret = 0;
if (is_same_child_table_telnet(elements, &info->preLine) == 0) {
elements->measureTag = info->preLine.measureTag;
return TSDB_CODE_SUCCESS;
}
int32_t code = 0;
int32_t lino = 0;
if(info->dataFormat){
ret = smlProcessSuperTable(info, elements);
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
}
ret = smlProcessTagJson(info, tags);
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
ret = smlJoinMeasureTag(elements);
if(ret != 0){
return ret;
SML_CHECK_CODE(smlProcessSuperTable(info, elements));
}
SML_CHECK_CODE(smlProcessTagJson(info, tags));
SML_CHECK_CODE(smlJoinMeasureTag(elements));
return smlProcessChildTable(info, elements);
END:
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
RETURN
}
static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPrecision) {
@ -678,7 +425,8 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) {
}
static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo *elements) {
int32_t ret = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
cJSON *metricJson = NULL;
cJSON *tsJson = NULL;
@ -688,57 +436,27 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
int32_t size = cJSON_GetArraySize(root);
// outmost json fields has to be exactly 4
if (size != OTD_JSON_FIELDS_NUM) {
uError("OTD:0x%" PRIx64 " Invalid number of JSON fields in data point %d", info->id, size);
uError("SML:0x%" PRIx64 " Invalid number of JSON fields in data point %d", info->id, size);
return TSDB_CODE_TSC_INVALID_JSON;
}
cJSON **marks[OTD_JSON_FIELDS_NUM] = {&metricJson, &tsJson, &valueJson, &tagsJson};
ret = smlGetJsonElements(root, marks);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
SML_CHECK_CODE(smlGetJsonElements(root, marks));
// Parse metric
ret = smlParseMetricFromJSON(info, metricJson, elements);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("OTD:0x%" PRIx64 " Unable to parse metric from JSON payload", info->id);
return ret;
}
SML_CHECK_CODE(smlParseMetricFromJSON(info, metricJson, elements));
// Parse metric value
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN};
ret = smlParseValueFromJSON(valueJson, &kv);
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse metric value from JSON payload", info->id);
return ret;
}
SML_CHECK_CODE(smlParseValueFromJSON(valueJson, &kv));
// Parse tags
bool needFree = info->dataFormat;
elements->tags = cJSON_PrintUnformatted(tagsJson);
if (elements->tags == NULL){
return TSDB_CODE_OUT_OF_MEMORY;
}
elements->tagsLen = strlen(elements->tags);
if (is_same_child_table_telnet(elements, &info->preLine) != 0) {
ret = smlParseTagsFromJSON(info, tagsJson, elements);
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id);
taosMemoryFree(elements->tags);
elements->tags = NULL;
return ret;
}
} else {
elements->measureTag = info->preLine.measureTag;
}
SML_CHECK_NULL(elements->tags);
if (needFree) {
taosMemoryFree(elements->tags);
elements->tags = NULL;
}
elements->tagsLen = strlen(elements->tags);
SML_CHECK_CODE(smlParseTagsFromJSON(info, tagsJson, elements));
if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS;
goto END;
}
// Parse timestamp
@ -747,29 +465,34 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
if (unlikely(ts < 0)) {
char* tmp = cJSON_PrintUnformatted(tsJson);
if (tmp == NULL) {
uError("cJSON_PrintUnformatted failed since %s", tstrerror(TSDB_CODE_OUT_OF_MEMORY));
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %" PRId64, info->id, info->msgBuf.buf, ts);
uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %" PRId64, info->id, info->msgBuf.buf, ts);
} else {
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %s %" PRId64, info->id, info->msgBuf.buf,tmp, ts);
uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %s %" PRId64, info->id, info->msgBuf.buf,tmp, ts);
taosMemoryFree(tmp);
}
return TSDB_CODE_INVALID_TIMESTAMP;
SML_CHECK_CODE(TSDB_CODE_INVALID_TIMESTAMP);
}
SSmlKv kvTs = {0};
smlBuildTsKv(&kvTs, ts);
if (info->dataFormat){
code = smlParseEndTelnetJsonFormat(info, elements, &kvTs, &kv);
} else {
code = smlParseEndTelnetJsonUnFormat(info, elements, &kvTs, &kv);
}
SML_CHECK_CODE(code);
taosMemoryFreeClear(info->preLine.tags);
info->preLine = *elements;
elements->tags = NULL;
return smlParseEndTelnetJson(info, elements, &kvTs, &kv);
END:
taosMemoryFree(elements->tags);
RETURN
}
static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
int32_t payloadNum = 0;
int32_t ret = TSDB_CODE_SUCCESS;
if (unlikely(payload == NULL)) {
uError("SML:0x%" PRIx64 " empty JSON Payload", info->id);
return TSDB_CODE_TSC_INVALID_JSON;
}
info->root = cJSON_Parse(payload);
if (unlikely(info->root == NULL)) {
uError("SML:0x%" PRIx64 " parse json failed:%s", info->id, payload);
@ -782,27 +505,11 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
} else if (cJSON_IsObject(info->root)) {
payloadNum = 1;
} else {
uError("SML:0x%" PRIx64 " Invalid JSON Payload 3:%s", info->id, payload);
uError("SML:0x%" PRIx64 " Invalid JSON type:%s", info->id, payload);
return TSDB_CODE_TSC_INVALID_JSON;
}
if (unlikely(info->lines != NULL)) {
for (int i = 0; i < info->lineNum; i++) {
taosArrayDestroyEx(info->lines[i].colArray, freeSSmlKv);
if (info->lines[i].measureTagsLen != 0) taosMemoryFree(info->lines[i].measureTag);
}
taosMemoryFree(info->lines);
info->lines = NULL;
}
info->lineNum = payloadNum;
info->dataFormat = true;
ret = smlClearForRerun(info);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
info->parseJsonByLib = true;
cJSON *head = (payloadNum == 1 && cJSON_IsObject(info->root)) ? info->root : info->root->child;
int cnt = 0;
@ -811,6 +518,7 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
if (info->dataFormat) {
SSmlLineInfo element = {0};
ret = smlParseJSONStringExt(info, dataPoint, &element);
if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag);
} else {
ret = smlParseJSONStringExt(info, dataPoint, info->lines + cnt);
}
@ -836,164 +544,3 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
return TSDB_CODE_SUCCESS;
}
static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *elements) {
int32_t ret = TSDB_CODE_SUCCESS;
if (info->offset[0] == 0) {
ret = smlJsonParseObjFirst(start, elements, info->offset);
} else {
ret = smlJsonParseObj(start, elements, info->offset);
}
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
if (unlikely(**start == '\0' && elements->measure == NULL)) return TSDB_CODE_SUCCESS;
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) {
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (unlikely(elements->colsLen == 0)) {
uError("SML:colsLen == 0");
return TSDB_CODE_TSC_INVALID_VALUE;
} else if (unlikely(elements->cols[0] == '{')) {
char tmp = elements->cols[elements->colsLen];
elements->cols[elements->colsLen] = '\0';
cJSON *valueJson = cJSON_Parse(elements->cols);
if (unlikely(valueJson == NULL)) {
uError("SML:0x%" PRIx64 " parse json cols failed:%s", info->id, elements->cols);
elements->cols[elements->colsLen] = tmp;
return TSDB_CODE_TSC_INVALID_JSON;
}
if (taosArrayPush(info->tagJsonArray, &valueJson) == NULL){
cJSON_Delete(valueJson);
elements->cols[elements->colsLen] = tmp;
return terrno;
}
ret = smlParseValueFromJSONObj(valueJson, &kv);
if (ret != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " Failed to parse value from JSON Obj:%s", info->id, elements->cols);
elements->cols[elements->colsLen] = tmp;
return TSDB_CODE_TSC_INVALID_VALUE;
}
elements->cols[elements->colsLen] = tmp;
} else if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " cols invalidate:%s", info->id, elements->cols);
return TSDB_CODE_TSC_INVALID_VALUE;
}
// Parse tags
if (is_same_child_table_telnet(elements, &info->preLine) != 0) {
char tmp = *(elements->tags + elements->tagsLen);
*(elements->tags + elements->tagsLen) = 0;
cJSON *tagsJson = cJSON_Parse(elements->tags);
*(elements->tags + elements->tagsLen) = tmp;
if (unlikely(tagsJson == NULL)) {
uError("SML:0x%" PRIx64 " parse json tag failed:%s", info->id, elements->tags);
return TSDB_CODE_TSC_INVALID_JSON;
}
if (taosArrayPush(info->tagJsonArray, &tagsJson) == NULL){
cJSON_Delete(tagsJson);
uError("SML:0x%" PRIx64 " taosArrayPush failed", info->id);
return terrno;
}
ret = smlParseTagsFromJSON(info, tagsJson, elements);
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id);
return ret;
}
} else {
elements->measureTag = info->preLine.measureTag;
}
if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS;
}
// Parse timestamp
// notice!!! put ts back to tag to ensure get meta->precision
int64_t ts = 0;
if (unlikely(elements->timestampLen == 0)) {
uError("OTD:0x%" PRIx64 " elements->timestampLen == 0", info->id);
return TSDB_CODE_INVALID_TIMESTAMP;
} else if (elements->timestamp[0] == '{') {
char tmp = elements->timestamp[elements->timestampLen];
elements->timestamp[elements->timestampLen] = '\0';
cJSON *tsJson = cJSON_Parse(elements->timestamp);
ts = smlParseTSFromJSON(info, tsJson);
if (unlikely(ts < 0)) {
uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload:%s", info->id, elements->timestamp);
elements->timestamp[elements->timestampLen] = tmp;
cJSON_Delete(tsJson);
return TSDB_CODE_INVALID_TIMESTAMP;
}
elements->timestamp[elements->timestampLen] = tmp;
cJSON_Delete(tsJson);
} else {
ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) {
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id);
return TSDB_CODE_INVALID_TIMESTAMP;
}
}
SSmlKv kvTs = {0};
smlBuildTsKv(&kvTs, ts);
return smlParseEndTelnetJson(info, elements, &kvTs, &kv);
}
int32_t smlParseJSON(SSmlHandle *info, char *payload) {
int32_t payloadNum = 1 << 15;
int32_t ret = TSDB_CODE_SUCCESS;
uDebug("SML:0x%" PRIx64 "json:%s", info->id, payload);
int cnt = 0;
char *dataPointStart = payload;
while (1) {
if (info->dataFormat) {
SSmlLineInfo element = {0};
ret = smlParseJSONString(info, &dataPointStart, &element);
if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag);
} else {
if (cnt >= payloadNum) {
payloadNum = payloadNum << 1;
void *tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo));
if (tmp == NULL) {
ret = terrno;
return ret;
}
info->lines = (SSmlLineInfo *)tmp;
(void)memset(info->lines + cnt, 0, (payloadNum - cnt) * sizeof(SSmlLineInfo));
}
ret = smlParseJSONString(info, &dataPointStart, info->lines + cnt);
if ((info->lines + cnt)->measure == NULL) break;
}
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("SML:0x%" PRIx64 " Invalid JSON Payload 1:%s", info->id, payload);
return smlParseJSONExt(info, payload);
}
if (unlikely(info->reRun)) {
cnt = 0;
dataPointStart = payload;
info->lineNum = payloadNum;
ret = smlClearForRerun(info);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
continue;
}
cnt++;
if (*dataPointStart == '\0') break;
}
info->lineNum = cnt;
return TSDB_CODE_SUCCESS;
}

View File

@ -63,7 +63,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
if (unlikely(ts == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid timestamp", data);
return TSDB_CODE_SML_INVALID_DATA;
}
return ts;
@ -84,7 +84,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
}
if (pVal->value[0] == 'l' || pVal->value[0] == 'L') { // nchar
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) {
if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') {
pVal->type = TSDB_DATA_TYPE_NCHAR;
pVal->length -= NCHAR_ADD_LEN;
if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
@ -97,7 +97,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
}
if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) {
if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') {
int32_t code = initCtxGeomFromText();
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -124,7 +124,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
}
if (pVal->value[0] == 'b' || pVal->value[0] == 'B') { // varbinary
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) {
if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') {
pVal->type = TSDB_DATA_TYPE_VARBINARY;
if(isHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){
if(!isValidateHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){
@ -298,7 +298,7 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
}
if (info->dataFormat && !isSmlTagAligned(info, cnt, &kv)) {
return TSDB_CODE_TSC_INVALID_JSON;
return TSDB_CODE_SML_INVALID_DATA;
}
cnt++;
@ -311,31 +311,24 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
}
static int32_t smlParseTagLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *elements) {
int32_t code = 0;
int32_t lino = 0;
bool isSameCTable = IS_SAME_CHILD_TABLE;
if(isSameCTable){
return TSDB_CODE_SUCCESS;
}
int32_t ret = 0;
if(info->dataFormat){
ret = smlProcessSuperTable(info, elements);
if(ret != 0){
if(info->reRun){
return TSDB_CODE_SUCCESS;
SML_CHECK_CODE(smlProcessSuperTable(info, elements));
}
return ret;
}
}
ret = smlProcessTagLine(info, sql, sqlEnd);
if(ret != 0){
if (info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
SML_CHECK_CODE(smlProcessTagLine(info, sql, sqlEnd));
return smlProcessChildTable(info, elements);
END:
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
RETURN
}
static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement) {
@ -353,7 +346,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
const char *escapeChar = NULL;
while (*sql < sqlEnd) {
if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(IS_EQUAL(*sql,escapeChar))) {
@ -370,7 +363,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
}
if (unlikely(IS_INVALID_COL_LEN(keyLen - keyLenEscaped))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
@ -404,18 +397,18 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
valueLen = *sql - value;
if (unlikely(quoteNum != 0 && quoteNum != 2)) {
smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line unbalanced quotes", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(valueLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid value", value);
return TSDB_CODE_SML_INVALID_DATA;
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen};
int32_t ret = smlParseValue(&kv, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value);
uError("SML:0x%" PRIx64 " %s parse value error:%d.", info->id, __FUNCTION__, ret);
return ret;
}
@ -437,11 +430,6 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
}
(void)memcpy(tmp, kv.value, kv.length);
PROCESS_SLASH_IN_FIELD_VALUE(tmp, kv.length);
if(kv.type == TSDB_DATA_TYPE_GEOMETRY) {
uError("SML:0x%" PRIx64 " smlParseColLine error, invalid GEOMETRY type.", info->id);
taosMemoryFree((void*)kv.value);
return TSDB_CODE_TSC_INVALID_VALUE;
}
if(kv.type == TSDB_DATA_TYPE_VARBINARY){
taosMemoryFree((void*)kv.value);
}
@ -510,7 +498,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
}
elements->measureLen = sql - elements->measure;
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen - measureLenEscaped))) {
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line measure is empty or too large than 192", NULL);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
@ -557,7 +545,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
elements->colsLen = sql - elements->cols;
if (unlikely(elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "cols is empty", NULL);
smlBuildInvalidDataMsg(&info->msgBuf, "SML line cols is empty", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -574,7 +562,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
int64_t ts = smlParseInfluxTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts <= 0)) {
uError("SML:0x%" PRIx64 " smlParseTS error:%" PRId64, info->id, ts);
uError("SML:0x%" PRIx64 " %s error:%" PRId64, info->id, __FUNCTION__, ts);
return TSDB_CODE_INVALID_TIMESTAMP;
}

View File

@ -148,31 +148,21 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
return TSDB_CODE_SUCCESS;
}
int32_t ret = 0;
int32_t code = 0;
int32_t lino = 0;
if(info->dataFormat){
ret = smlProcessSuperTable(info, elements);
if(ret != 0){
SML_CHECK_CODE(smlProcessSuperTable(info, elements));
}
SML_CHECK_CODE(smlProcessTagTelnet(info, data, sqlEnd));
SML_CHECK_CODE(smlJoinMeasureTag(elements));
code = smlProcessChildTable(info, elements);
END:
if(info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
}
ret = smlProcessTagTelnet(info, data, sqlEnd);
if(ret != 0){
if (info->reRun){
return TSDB_CODE_SUCCESS;
}
return ret;
}
ret = smlJoinMeasureTag(elements);
if(ret != 0){
return ret;
}
return smlProcessChildTable(info, elements);
RETURN
}
// format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
@ -182,14 +172,14 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
// parse metric
smlParseTelnetElement(&sql, sqlEnd, &elements->measure, &elements->measureLen);
if (unlikely((!(elements->measure) || IS_INVALID_TABLE_LEN(elements->measureLen)))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid measure", sql);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
// parse timestamp
smlParseTelnetElement(&sql, sqlEnd, &elements->timestamp, &elements->timestampLen);
if (unlikely(!elements->timestamp || elements->timestampLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid timestamp", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -199,19 +189,21 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
}
int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet parse timestamp failed", sql);
return TSDB_CODE_INVALID_TIMESTAMP;
}
// parse value
smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen);
if (unlikely(!elements->cols || elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid value", sql);
return TSDB_CODE_TSC_INVALID_VALUE;
}
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) {
int ret = smlParseValue(&kv, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " %s parse value error:%d.", info->id, __FUNCTION__, ret);
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -220,11 +212,11 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
elements->tags = sql;
elements->tagsLen = sqlEnd - sql;
if (unlikely(!elements->tags || elements->tagsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid tag value", sql);
return TSDB_CODE_TSC_INVALID_VALUE;
}
int ret = smlParseTelnetTags(info, sql, sqlEnd, elements);
ret = smlParseTelnetTags(info, sql, sqlEnd, elements);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
@ -239,5 +231,12 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision);
}
return smlParseEndTelnetJson(info, elements, &kvTs, &kv);
if (info->dataFormat){
ret = smlParseEndTelnetJsonFormat(info, elements, &kvTs, &kv);
} else {
ret = smlParseEndTelnetJsonUnFormat(info, elements, &kvTs, &kv);
}
info->preLine = *elements;
return ret;
}

Some files were not shown because too many files have changed in this diff Show More