Merge branch 'main' into fix/liaohj

This commit is contained in:
Haojun Liao 2024-11-16 01:31:50 +08:00
commit 20ad165ae3
141 changed files with 9238 additions and 3200 deletions

2
.gitignore vendored
View File

@ -161,4 +161,4 @@ version.h
geos_c.h
source/libs/parser/src/sql.c
include/common/ttokenauto.h
!packaging/smokeTest/pytest_require.txt

View File

@ -1,9 +1,11 @@
import hudson.model.Result
import hudson.model.*;
import jenkins.model.CauseOfInterruption
docs_only=0
node {
}
file_zh_changed = ''
file_en_changed = ''
file_no_doc_changed = ''
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
@ -29,7 +31,7 @@ def abort_previous(){
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
def check_docs() {
def check_docs(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
sh '''
hostname
@ -40,39 +42,94 @@ def check_docs() {
cd ${WKC}
git reset --hard
git clean -f
rm -rf examples/rust/
git remote prune origin
git fetch
'''
script {
sh '''
cd ${WKC}
git checkout ''' + env.CHANGE_TARGET + '''
'''
}
sh '''
cd ${WKC}
git remote prune origin
git checkout ''' + env.CHANGE_TARGET + '''
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git checkout -qf FETCH_HEAD
'''
def file_changed = sh (
file_zh_changed = sh (
script: '''
cd ${WKC}
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || :
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep "^docs/zh/" || :
''',
returnStdout: true
)
file_en_changed = sh (
script: '''
cd ${WKC}
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep "^docs/en/" || :
''',
returnStdout: true
)
file_no_doc_changed = sh (
script: '''
cd ${WKC}
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v "*.md" || :
''',
returnStdout: true
).trim()
if (file_changed == '') {
echo "docs PR"
docs_only=1
} else {
echo file_changed
}
env.FILE_CHANGED = file_changed
echo "file_zh_changed: ${file_zh_changed}"
echo "file_en_changed: ${file_en_changed}"
echo "file_no_doc_changed: ${file_no_doc_changed}"
}
}
def build_pre_docs(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
sh '''
hostname
date
env
'''
sh '''
cd ${DOC_WKC}/${td_repo}
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout ''' + env.CHANGE_TARGET + '''
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
sh '''
cd ${DOC_WKC}/${tools_repo}
git reset --hard
git clean -f
git fetch
git remote prune origin
git checkout ''' + env.CHANGE_TARGET + '''
git pull >/dev/null
'''
}
}
def build_zh_docs(){
sh '''
cd ${DOC_WKC}/${zh_doc_repo}
# git pull
yarn ass local
yarn build
'''
}
def build_en_docs(){
sh '''
cd ${DOC_WKC}/${en_doc_repo}
# git pull
yarn ass local
yarn build
'''
}
def pre_test(){
sh '''
hostname
@ -153,6 +210,7 @@ def pre_test(){
'''
return 1
}
def pre_test_build_mac() {
sh '''
hostname
@ -173,6 +231,7 @@ def pre_test_build_mac() {
date
'''
}
def pre_test_win(){
bat '''
hostname
@ -273,17 +332,8 @@ def pre_test_win(){
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
'''
bat '''
cd %WIN_CONNECTOR_ROOT%
git branch
git reset --hard
git pull
'''
bat '''
cd %WIN_CONNECTOR_ROOT%
git log -5
'''
}
def pre_test_build_win() {
bat '''
echo "building ..."
@ -303,16 +353,14 @@ def pre_test_build_win() {
time /t
'''
bat '''
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip uninstall taospy -y
python -m pip install taospy==2.7.16
python -m pip uninstall taos-ws-py -y
python -m pip install taos-ws-py==0.3.3
cd %WIN_COMMUNITY_ROOT%/tests/ci
pip3 install taospy==2.7.16
pip3 install taos-ws-py==0.3.3
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1
}
def run_win_ctest() {
bat '''
echo "windows ctest ..."
@ -322,10 +370,10 @@ def run_win_ctest() {
time /t
'''
}
def run_win_test() {
bat '''
echo "windows test ..."
cd %WIN_CONNECTOR_ROOT%
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll
time /t
@ -343,29 +391,67 @@ pipeline {
WKDIR = '/var/lib/jenkins/workspace'
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
WKPY = '/var/lib/jenkins/workspace/taos-connector-python'
DOC_WKC = '/root/doc_ci_work'
td_repo = 'TDengine'
zh_doc_repo = 'docs.taosdata.com'
en_doc_repo = 'docs.tdengine.com'
tools_repo = 'taos-tools'
}
stages {
stage('check') {
stage ('check doc file changed') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
steps {
check_docs()
}
}
stage ('pre for build docs') {
when {
allOf {
not { expression { env.CHANGE_BRANCH =~ /docs\// }}
}
beforeAgent true
expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' }
}
agent{label "doc_build_0_30"}
steps {
build_pre_docs()
}
}
stage('build Docs') {
when {
beforeAgent true
expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' }
}
parallel {
stage('check docs') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
stage('build zh docs') {
agent{label "doc_build_0_30"}
when {
expression { file_zh_changed != '' }
}
steps {
check_docs()
build_zh_docs()
}
}
stage('build en docs') {
agent{label "doc_build_0_30"}
when {
expression { file_en_changed != '' }
}
steps {
build_en_docs()
}
}
}
post {
unsuccessful {
error('build docs stage failed, terminating pipeline.')
}
}
}
stage('run test') {
when {
allOf {
not { expression { env.CHANGE_BRANCH =~ /docs\// }}
expression { docs_only == 0 }
not { expression { file_no_doc_changed == '' }}
}
}
parallel {
@ -375,7 +461,6 @@ pipeline {
WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal"
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python"
}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
@ -420,7 +505,7 @@ pipeline {
script {
sh '''
mkdir -p ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}
echo "''' + env.FILE_CHANGED + '''" > ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt
echo "''' + file_no_doc_changed + '''" > ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt
'''
sh '''
cd ${WKC}/tests/parallel_test
@ -570,4 +655,4 @@ pipeline {
)
}
}
}
}

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.3.4.0.alpha")
SET(TD_VER_NUMBER "3.3.4.3.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -1149,7 +1149,7 @@ TOP(expr, k)
UNIQUE(expr)
```
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. For a table with composite primary key, only the data with the smallest primary key value is returned.
**Description**: Return the unique values of this column. The effect is similar to `distinct` keyword. Return the row with the earliest timestamp for duplicate data. For a table with composite primary key, only the data with the smallest primary key value is returned.
**Return value type**:Same as the data type of the column being operated upon

View File

@ -20,6 +20,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3";
## 3.3.4.3
<Release type="tdengine" version="3.3.4.3" />
## 3.3.3.0
<Release type="tdengine" version="3.3.3.0" />

View File

@ -228,4 +228,35 @@ PAUSE STREAM [IF EXISTS] stream_name;
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
```
没有指定 IF EXISTS如果该 stream 不存在,则报错。如果存在,则恢复流计算。指定了 IF EXISTS如果 stream 不存在,则返回成功。如果存在,则恢复流计算。如果指定 IGNORE UNTREATED则恢复流计算时忽略流计算暂停期间写入的数据。
没有指定 IF EXISTS如果该 stream 不存在,则报错。如果存在,则恢复流计算。指定了 IF EXISTS如果 stream 不存在,则返回成功。如果存在,则恢复流计算。如果指定 IGNORE UNTREATED则恢复流计算时忽略流计算暂停期间写入的数据。
### 流计算升级故障恢复
升级 TDengine 后,如果流计算不兼容,需要删除流计算,然后重新创建流计算。步骤如下:
1.修改 taos.cfg添加 disableStream 1
2.重启 taosd。如果启动失败修改 stream 目录的名称,避免 taosd 启动的时候尝试加载 stream 目录下的流计算数据信息。不使用删除操作避免误操作导致的风险。需要修改的文件夹:$dataDir/vnode/vnode*/tq/stream$dataDir 指 TDengine 存储数据的目录,在 $dataDir/vnode/ 目录下会有多个类似 vnode1 、vnode2...vnode* 的目录,全部需要修改里面的 tq/stream 目录的名字,改为 tq/stream.bk
3.启动 taos
```sql
drop stream xxxx; ---- xxx 指stream name
flush database stream_source_db; ---- 流计算读取数据的超级表所在的 database
flush database stream_dest_db; ---- 流计算写入数据的超级表所在的 database
```
举例:
```sql
create stream streams1 into test1.streamst as select _wstart, count(a) c1 from test.st interval(1s) ;
drop database streams1;
flush database test;
flush database test1;
```
4.关闭 taosd
5.修改 taos.cfg去掉 disableStream 1或将 disableStream 改为 0
6.启动 taosd

View File

@ -16,7 +16,7 @@ TDengine 提供了类似于消息队列产品的数据订阅和消费接口。
**注意**
在 TDengine 连接器实现中,对于订阅查询,有以下限制。
- 查询语句限制:订阅查询只能使用 select 语句不支持其他类型的SQL如 insert、update 或 delete 等。
- 查询语句限制:订阅查询只能使用 select 语句,不支持其他类型的SQL订阅库,订阅超级表(非 select 方式),insert、update 或 delete 等。
- 原始始数据查询:订阅查询只能查询原始数据,而不能查询聚合或计算结果。
- 时间顺序限制:订阅查询只能按照时间正序查询数据。

View File

@ -165,6 +165,10 @@ toc_max_heading_level: 4
第一步 填写添加新主题需要的信息,点击“创建”按钮;
![topic-03-addTopicWizard.jpeg](./pic/topic-03-addTopicWizard.jpeg "添加新主题 Wizard 页面")
如上图,您可以选择是否 “同步 meta”。如果同步 meta 信息,则可以订阅到 meta 信息,比如增加或者删除超级表。
您需要根据使用场景来选择是否开启,如果您引用 taos 连接器编写业务代码订阅 topic则不能开启“同步 meta”只能订阅数据如果您创建 topic 在 explorer 配置同步任务使用,则可以开启“同步 meta”。
第二步 页面出现以下记录,则证明创建成功。
![topic-05-addTopicSucc1.jpeg](./pic/topic-05-addTopicSucc1.jpeg "查看已创建的流计算")

View File

@ -26,6 +26,22 @@ SHOW USERS;
```sql
ALTER USER TEST DROP HOST HOST_NAME1
```
说明
- 开源版和企业版本都能添加成功,且可以查询到,但是开源版本不会对 IP 做任何限制。
- create user u_write pass 'taosdata1' host 'iprange1','iprange2', 可以一次添加多个 iprange, 服务端会做去重,去重的逻辑是需要 iprange 完全一样
- 默认会把 127.0.0.1 添加到白名单列表,且在白名单列表可以查询
- 集群的节点 IP 集合会自动添加到白名单列表,但是查询不到。
- taosadaper 和 taosd 不在一个机器的时候,需要把 taosadaper IP 手动添加到 taosd 白名单列表中
- 集群情况下,各个节点 enableWhiteList 成一样,或者全为 false,或者全为 true, 要不然集群无法启动
- 白名单变更生效时间 1s不超过 2s, 每次变更对收发性能有些微影响(多一次判断,可以忽略),变更完之后、影响忽略不计, 变更过程中对集群没有影响,对正在访问客户端也没有影响(假设这些客户端的 IP 包含在 white list 内)
- 如果添加两个 ip range, 192.168.1.1/16(假设为 A), 192.168.1.1/24(假设为 B), 严格来说A 包含了 B但是考虑情况太复杂并不会对 A 和 B 做合并
- 要删除的时候,必须严格匹配。 也就是如果添加的是 192.168.1.1/24, 要删除也是 192.168.1.1/24
- 只有 root 才有权限对其他用户增删 ip white list
- 兼容之前的版本,但是不支持从当前版本回退到之前版本
- x.x.x.x/32 和 x.x.x.x 属于同一个 iprange, 显示为 x.x.x.x
- 如果客户端拿到的 0.0.0.0/0, 说明没有开启白名单
- 如果白名单发生了改变, 客户端会在 heartbeat 里检测到。
- 针对一个 user, 添加的 IP 个数上限是 2048
## 审计日志

Binary file not shown.

Before

Width:  |  Height:  |  Size: 275 KiB

After

Width:  |  Height:  |  Size: 83 KiB

View File

@ -26,42 +26,66 @@ taosd 命令行参数如下
:::
### 连接相关
| 参数名称 | 参数说明 |
| :--------------------: | :-------------------------------------------------------------------------------------: |
| firstEp | taosd 启动时,主动连接的集群中首个 dnode 的 end point缺省值localhost:6030 |
| secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint缺省值无 |
| fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname |
| serverPort | 启动 taosd 后所监听的端口缺省值6030 |
| numOfRpcSessions | 允许一个 dnode 能发起的最大连接数,取值范围 100-100000缺省值30000 |
| timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000单位为毫秒缺省值500000 |
|参数名称|支持版本|参数含义|
|-----------------------|----------|-|
|firstEp | |taosd 启动时,主动连接的集群中首个 dnode 的 end point默认值 localhost:6030|
|secondEp | |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint无默认值|
|fqdn | |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname|
|serverPort | |taosd 监听的端口,默认值 6030|
|compressMsgSize | |是否对 RPC 消息进行压缩;-1所有消息都不压缩0所有消息都压缩N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1|
|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120默认值 3|
|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000默认值 30000|
|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半|
|numOfTaskQueueThreads | |dnode 处理 RPC 消息的线程数|
|statusInterval | |dnode 与 mnode 之间的心跳间隔|
|rpcQueueMemoryAllowed | |dnode 允许的 rpc 消息占用的内存最大值,单位 bytes取值范围 104857600-INT64_MAX默认值 服务器内存的 1/10 |
|resolveFQDNRetryTime | |FQDN 解析失败时的重试次数|
|timeToGetAvailableConn | |获得可用连接的最长等待时间,取值范围 10-50000000单位为毫秒默认值 500000|
|maxShellConns | |允许创建的最大链接数|
|maxRetryWaitTime | |重连最大超时时间|
|shareConnLimit |3.3.4.3 后|内部参数,一个链接可以共享的查询数目,取值范围 1-256默认值 10|
|readTimeout |3.3.4.3 后|内部参数,最小超时时间,取值范围 64-604800单位为秒默认值 900|
### 监控相关
| 参数名称 | 参数说明 |
| :----------------: | :------------------------------------------------------------------------------------: |
| monitor | 是否收集监控数据并上报0: 关闭1:打开缺省值0 |
| monitorFqdn | taosKeeper 服务所在服务器的 FQDN缺省值无 |
| monitorPort | taosKeeper 服务所监听的端口号缺省值6043 |
| monitorInternal | 监控数据库记录系统参数CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 缺省值30 |
| telemetryReporting | 是否上传 telemetry0: 不上传1上传缺省值1 |
| crashReporting | 是否上传 crash 信息0: 不上传1: 上传;缺省值: 1 |
|参数名称|支持版本|参数含义|
|-----------------------|----------|-|
|monitor | |是否收集监控数据并上报0关闭1:打开;默认值 0|
|monitorFqdn | |taosKeeper 服务所在服务器的 FQDN默认值 无|
|monitorPort | |taosKeeper 服务所监听的端口号,默认值 6043|
|monitorInterval | |监控数据库记录系统参数CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30|
|monitorMaxLogs | |缓存的待上报日志条数|
|monitorComp | |是否采用压缩方式上报监控日志时|
|monitorLogProtocol | |是否打印监控日志|
|monitorForceV2 | |是否使用 V2 版本协议上报|
|telemetryReporting | |是否上传 telemetry0不上传1上传默认值 1|
|telemetryServer | |telemetry 服务器地址|
|telemetryPort | |telemetry 服务器端口编号|
|telemetryInterval | |telemetry 上传时间间隔,单位为秒,默认 43200|
|crashReporting | |是否上传 crash 信息0不上传1上传默认值 1|
### 查询相关
| 参数名称 | 参数说明 |
| :--------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| queryPolicy | 查询策略1: 只使用 vnode不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 4: 使用客户端聚合模式缺省值1 |
| maxNumOfDistinctRes | 允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿 |
| countAlwaysReturnValue | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值0: 返回空行1: 返回;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了TSMA时, 且相应的组或窗口内数据为空或者NULL 对应的组或窗口将不返回查询结果. 注意此参数客户端和服务端值应保持一致. |
|参数名称|支持版本|参数含义|
|------------------------|----------|-|
|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值0返回空行1返回默认值 1该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL对应的组或窗口将不返回查询结果注意此参数客户端和服务端值应保持一致|
|tagFilterCache | |是否缓存标签过滤结果|
|maxNumOfDistinctRes | |允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿|
|queryBufferSize | |暂不生效|
|queryRspPolicy | |查询响应策略|
|filterScalarMode | |强制使用标量过滤模式0关闭1开启默认值 0|
|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
|queryNodeChunkSize | |内部参数,查询计划的块大小|
|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
|queryMaxConcurrentTables| |内部参数,查询计划的并发数目|
|queryRsmaTolerance | |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
|enableQueryHb | |内部参数,是否发送查询心跳消息|
|pqSortMemThreshold | |内部参数,排序使用的内存阈值|
### 区域相关
| 参数名称 | 参数说明 |
| :------: | :------------------------------------------------------------------------------------------------------: |
| timezone | 时区,缺省值:当前服务器所配置的时区 |
| locale | 系统区位信息及编码格式 ,缺省值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过 API 设置 |
| charset | 字符集编码,缺省值:系统自动获取 |
|参数名称|支持版本|参数含义|
|-----------------|----------|-|
|timezone | |时区;缺省从系统中动态获取当前的时区设置|
|locale | |系统区位信息及编码格式,缺省从系统中获取|
|charset | |字符集编码,缺省从系统中获取|
:::info
1. 为应对多时区的数据写入和查询问题TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
@ -101,7 +125,7 @@ SELECT count(*) FROM table_name WHERE TS<1554984068000;
客户端的输入的字符均采用操作系统当前默认的编码格式,在 Linux/macOS 系统上多为 UTF-8部分中文系统编码则可能是 GB18030 或 GBK 等。在 docker 环境中默认的编码是 POSIX。在中文版 Windows 系统中,编码则是 CP936。客户端需要确保正确设置自己所使用的字符集即客户端运行的操作系统当前编码字符集才能保证 nchar 中的数据正确转换为 UCS4-LE 编码格式。
在 Linux/macOS 中 locale 的命名规则为: \<语言>_\<地区>.\<字符集编码> 如zh_CN.UTF-8zh 代表中文CN 代表大陆地区UTF-8 表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux/macOS 可以通过设置 locale 来确定系统的字符编码,由于 Windows 使用的 locale 中不是 POSIX 标准的 locale 格式,因此在 Windows 下需要采用另一个配置参数 charset 来指定字符编码。在 Linux/macOS 中也可以使用 charset 来指定字符编码。
在 Linux/macOS 中 locale 的命名规则为\<语言>_\<地区>.\<字符集编码> 如zh_CN.UTF-8zh 代表中文CN 代表大陆地区UTF-8 表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux/macOS 可以通过设置 locale 来确定系统的字符编码,由于 Windows 使用的 locale 中不是 POSIX 标准的 locale 格式,因此在 Windows 下需要采用另一个配置参数 charset 来指定字符编码。在 Linux/macOS 中也可以使用 charset 来指定字符编码。
3. 如果配置文件中不设置 charset在 Linux/macOS 中taos 在启动时候,自动读取系统当前的 locale 信息,并从 locale 信息中解析提取 charset 编码格式。如果自动读取 locale 信息失败,则尝试读取 charset 配置,如果读取 charset 配置也失败,则中断启动过程。
@ -139,73 +163,148 @@ charset 的有效值是 UTF-8。
:::
### 存储相关
| 参数名称 | 参数说明 |
| :--------------: | :--------------------------------------------------------------------: |
| dataDir | 数据文件目录,所有的数据文件都将写入该目录,缺省值:/var/lib/taos |
| tempDir | 指定所有系统运行过程中的临时文件生成的目录,缺省值:/tmp |
| minimalTmpDirGB | tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB缺省值: 1 |
| minimalDataDirGB | dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB缺省值: 2 |
|参数名称|支持版本|参数含义|
|--------------------|----------|-|
|dataDir | |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos|
|tempDir | |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp|
|minimalDataDirGB | |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB默认值 2|
|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB默认值 1|
|minDiskFreeSize |3.1.1.0 后|当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824默认值为 52428800企业版参数|
|s3MigrateIntervalSec|3.3.4.3 后|本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600企业版参数|
|s3MigrateEnabled |3.3.4.3 后|是否自动进行 S3 迁移,默认值为 0表示关闭自动 S3 迁移,可配置为 1企业版参数|
|s3Accesskey |3.3.4.3 后|冒号分隔的用户 SecretId:SecretKey例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E企业版参数|
|s3Endpoint |3.3.4.3 后|用户所在地域的 COS 服务域名,支持 http 和 httpsbucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数|
|s3BucketName |3.3.4.3 后|存储桶名称,减号后面是用户注册 COS 服务的 AppId其中 AppId 是 COS 特有AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725企业版参数|
|s3PageCacheSize |3.3.4.3 后|S3 page cache 缓存页数目,取值范围 4-1048576单位为页默认值 4096企业版参数|
|s3UploadDelaySec |3.3.4.3 后|data 文件持续多长时间不再变动后上传至 S3取值范围 1-2592000 (30天单位为秒默认值 60企业版参数|
|cacheLazyLoadThreshold| |内部参数,缓存的装载策略|
### 集群相关
|参数名称|支持版本|参数含义|
|--------------------------|----------|-|
|supportVnodes | |dnode 支持的最大 vnode 数目,取值范围 0-4096默认值 CPU 核数的 2 倍 + 5|
|numOfCommitThreads | |落盘线程的最大数量,取值范围 0-1024默认值为 4|
|numOfMnodeReadThreads | |mnode 的 Read 线程数目,取值范围 0-1024默认值为 CPU 核数的四分之一(不超过 4|
|numOfVnodeQueryThreads | |vnode 的 Query 线程数目,取值范围 0-1024默认值为 CPU 核数的两倍(不超过 16|
|numOfVnodeFetchThreads | |vnode 的 Fetch 线程数目,取值范围 0-1024默认值为 CPU 核数的四分之一(不超过 4|
|numOfVnodeRsmaThreads | |vnode 的 Rsma 线程数目,取值范围 0-1024默认值为 CPU 核数的四分之一(不超过 4|
|numOfQnodeQueryThreads | |qnode 的 Query 线程数目,取值范围 0-1024默认值为 CPU 核数的两倍(不超过 16|
|numOfSnodeSharedThreads | |snode 的共享线程数目,取值范围 0-1024默认值为 CPU 核数的四分之一(不小于 2不超过 4|
|numOfSnodeUniqueThreads | |snode 的独占线程数目,取值范围 0-1024默认值为 CPU 核数的四分之一(不小于 2不超过 4|
|ratioOfVnodeStreamThreads | |流计算使用 vnode 线程的比例,取值范围 0.01-4默认值 4|
|ttlUnit | |ttl 参数的单位,取值范围 1-31572500单位为秒默认值 86400|
|ttlPushInterval | |ttl 检测超时频率,取值范围 1-100000单位为秒默认值 10|
|ttlChangeOnWrite | |ttl 到期时间是否伴随表的修改操作改变0不改变1改变默认值为 0|
|ttlBatchDropNum | |ttl 一批删除子表的数目,最小值为 0默认值 10000|
|retentionSpeedLimitMB | |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024单位 MB默认值 0表示不限制|
|maxTsmaNum | |集群内可创建的TSMA个数取值范围 0-3默认值 3|
|tmqMaxTopicNum | |订阅最多可建立的 topic 数量;取值范围 1-10000默认值为 20|
|tmqRowSize | |订阅数据块的最大记录条数,取值范围 1-1000000默认值 4096|
|audit | |审计功能开关;企业版参数|
|auditInterval | |审计数据上报的时间间隔;企业版参数|
|auditCreateTable | |是否针对创建子表开启申计功能;企业版参数|
|encryptAlgorithm | |数据加密算法;企业版参数|
|encryptScope | |加密范围;企业版参数|
|enableWhiteList | |白名单功能开关;企业版参数|
|syncLogBufferMemoryAllowed| |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes取值范围 104857600-INT64_MAX默认值 服务器内存的 1/103.1.3.2/3.3.2.13 版本开始生效 |
|syncElectInterval | |内部参数,用于同步模块调试|
|syncHeartbeatInterval | |内部参数,用于同步模块调试|
|syncHeartbeatTimeout | |内部参数,用于同步模块调试|
|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
|arbHeartBeatIntervalSec | |内部参数,用于同步模块调试|
|arbCheckSyncIntervalSec | |内部参数,用于同步模块调试|
|arbSetAssignedTimeoutSec | |内部参数,用于同步模块调试|
|mndSdbWriteDelta | |内部参数,用于 mnode 模块调试|
|mndLogRetention | |内部参数,用于 mnode 模块调试|
|skipGrant | |内部参数,用于授权检查|
|trimVDbIntervalSec | |内部参数,用于删除过期数据|
|ttlFlushThreshold | |内部参数ttl 定时器的频率|
|compactPullupInterval | |内部参数,数据重整定时器的频率|
|walFsyncDataSizeLimit | |内部参数WAL 进行 FSYNC 的阈值|
|transPullupInterval | |内部参数mnode 执行事务的重试间隔|
|mqRebalanceInterval | |内部参数,消费者再平衡的时间间隔|
|uptimeInterval | |内部参数,用于记录系统启动时间|
|timeseriesThreshold | |内部参数,用于统计用量|
|udf | |是否启动 UDF 服务0不启动1启动默认值为 0 |
|udfdResFuncs | |内部参数,用于 UDF 结果集设置|
|udfdLdLibPath | |内部参数,表示 UDF 装载的库路径|
| 参数名称 | 参数说明 |
| :-----------: | :-------------------------------------------------------------------------: |
| supportVnodes | dnode 支持的最大 vnode 数目取值范围0-4096缺省值 CPU 核数的 2 倍 + 5 |
### 内存相关
| 参数名称 | 参数说明 |
| :----------------: | :---------------------------------------------: |
| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes取值范围104857600-INT64_MAX缺省值服务器内存的 1/10 |
| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes取值范围104857600-INT64_MAX缺省值服务器内存的 1/103.1.3.2/3.3.2.13 版本开始生效 |
### 性能调优
| 参数名称 | 参数说明 |
| :----------------: | :---------------------------------------------: |
| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024缺省值为 4 |
### 流计算参数
|参数名称|支持版本|参数含义|
|-----------------------|----------|-|
|disableStream | |流计算的启动开关|
|streamBufferSize | |控制内存中窗口状态缓存的大小,默认值为 128MB|
|streamAggCnt | |内部参数,并发进行聚合计算的数目|
|checkpointInterval | |内部参数checkponit 同步间隔|
|concurrentCheckpoint | |内部参数,是否并发检查 checkpoint|
|maxStreamBackendCache | |内部参数,流计算使用的最大缓存|
|streamSinkDataRate | |内部参数,用于控制流计算结果的写入速度|
### 日志相关
|参数名称|支持版本|参数含义|
|----------------|----------|-|
|logDir | |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos|
|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB默认值 1|
|numOfLogLines | |单个日志文件允许的最大行数,默认值 10,000,000|
|asyncLog | |日志写入模式0同步1异步默认值 1|
|logKeepDays | |日志文件的最长保存时间,单位:天,默认值 0意味着无限保存日志文件不会被重命名也不会有新的日志文件滚动产生但日志文件的内容有可能会不断滚动取决于日志文件大小的设置当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|slowLogThreshold|3.3.3.0 后|慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
|slowLogMaxLen |3.3.3.0 后|慢查询日志最大长度,取值范围 1-16384默认值 4096|
|slowLogScope |3.3.3.0 后|慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE默认值 QUERY|
|slowLogExceptDb |3.3.3.0 后|指定的数据库不上报慢查询,仅支持配置换一个数据库|
|debugFlag | |运行日志开关131输出错误和警告日志135输出错误、警告和调试日志143输出错误、警告、调试和跟踪日志默认值 131 或 135 (取决于不同模块)|
|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
|qDebugFlag | |query 模块的日志开关,取值范围同上|
|dDebugFlag | |dnode 模块的日志开关,取值范围同上|
|vDebugFlag | |vnode 模块的日志开关,取值范围同上|
|mDebugFlag | |mnode 模块的日志开关,取值范围同上|
|azDebugFlag |3.3.4.3 后|S3 模块的日志开关,取值范围同上|
|sDebugFlag | |sync 模块的日志开关,取值范围同上|
|tsdbDebugFlag | |tsdb 模块的日志开关,取值范围同上|
|tqDebugFlag | |tq 模块的日志开关,取值范围同上|
|fsDebugFlag | |fs 模块的日志开关,取值范围同上|
|udfDebugFlag | |udf 模块的日志开关,取值范围同上|
|smaDebugFlag | |sma 模块的日志开关,取值范围同上|
|idxDebugFlag | |index 模块的日志开关,取值范围同上|
|tdbDebugFlag | |tdb 模块的日志开关,取值范围同上|
|metaDebugFlag | |meta 模块的日志开关,取值范围同上|
|stDebugFlag | |stream 模块的日志开关,取值范围同上|
|sndDebugFlag | |snode 模块的日志开关,取值范围同上|
| 参数名称 | 参数说明 |
| :--------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------: |
| logDir | 日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos |
| minimalLogDirGB | 当日志文件夹所在磁盘可用空间大小小于该值时停止写日志单位GB缺省值1 |
| numOfLogLines | 单个日志文件允许的最大行数缺省值10,000,000 |
| asyncLog | 日志写入模式0: 同步1: 异步,缺省值: 1 |
| logKeepDays | 日志文件的最长保存时间 单位缺省值0意味着无限保存日志文件不会被重命名也不会有新的日志文件滚动产生但日志文件的内容有可能会不断滚动取决于日志文件大小的设置当设置为大于0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.xxx其中 xxx 为日志文件最后修改的时间戳,并滚动产生新的日志文件 |
| slowLogThreshold | 慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值: 3 |
| slowLogScope | 定启动记录哪些类型的慢查询可选值ALL, QUERY, INSERT, OHTERS, NONE; 默认值ALL |
| debugFlag | 运行日志开关131输出错误和警告日志135输出错误、警告和调试日志143输出错误、警告、调试和跟踪日志; 默认值131 或 135 (取决于不同模块) |
| tmrDebugFlag | 定时器模块的日志开关,取值范围同上 |
| uDebugFlag | 共用功能模块的日志开关,取值范围同上 |
| rpcDebugFlag | rpc 模块的日志开关,取值范围同上 |
| cDebugFlag | 客户端模块的日志开关,取值范围同上 |
| jniDebugFlag | jni 模块的日志开关,取值范围同上 |
| qDebugFlag | query 模块的日志开关,取值范围同上 |
| dDebugFlag | dnode 模块的日志开关,取值范围同上,缺省值 135 |
| vDebugFlag | vnode 模块的日志开关,取值范围同上 |
| mDebugFlag | mnode 模块的日志开关,取值范围同上 |
| wDebugFlag | wal 模块的日志开关,取值范围同上 |
| sDebugFlag | sync 模块的日志开关,取值范围同上 |
| tsdbDebugFlag | tsdb 模块的日志开关,取值范围同上 |
| tqDebugFlag | tq 模块的日志开关,取值范围同上 |
| fsDebugFlag | fs 模块的日志开关,取值范围同上 |
| udfDebugFlag | udf 模块的日志开关,取值范围同上 |
| smaDebugFlag | sma 模块的日志开关,取值范围同上 |
| idxDebugFlag | index 模块的日志开关,取值范围同上 |
| tdbDebugFlag | tdb 模块的日志开关,取值范围同上 |
### 调试相关
|参数名称|支持版本|参数含义|
|--------------------|----------|-|
|enableCoreFile | |crash 时是否生成 core 文件0不生成1生成默认值 1|
|configDir | |配置文件所在目录|
|scriptDir | |内部测试工具的脚本目录|
|assert | |断言控制开关,默认值 0|
|randErrorChance | |内部参数,用于随机失败测试|
|randErrorDivisor | |内部参数,用于随机失败测试|
|randErrorScope | |内部参数,用于随机失败测试|
|safetyCheckLevel | |内部参数,用于随机失败测试|
|experimental | |内部参数,用于一些实验特性|
|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
|rsyncPort | |内部参数,用于调试流计算|
|snodeAddress | |内部参数,用于调试流计算|
|checkpointBackupDir | |内部参数,用于恢复 snode 数据|
|enableAuditDelete | |内部参数,用于测试审计功能|
|slowLogThresholdTest| |内部参数,用于测试慢日志|
### 压缩参数
| 参数名称 | 参数说明 |
|:-------------:|:----------------------------------------------------------------:|
| compressMsgSize | 是否对 RPC 消息进行压缩;-1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩;缺省值 -1 |
| fPrecision | 设置 float 类型浮点数压缩精度 取值范围0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断 |
|dPrecision | 设置 double 类型浮点数压缩精度 , 取值范围0.1 ~ 0.0000000000000001 缺省值 0.0000000000000001 小于此值的浮点数尾数部分将被截取 |
|lossyColumn | 对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围: float, double, none缺省值: none表示关闭无损压缩。**注意:此参数在 3.3.0.0 及更高版本中不再使用** |
|ifAdtFse | 在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法, FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法; 0: 关闭1打开默认值为 0 |
|参数名称|支持版本|参数含义|
|------------|----------|-|
|fPrecision | |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断|
|dPrecision | |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 默认值 0.0000000000000001 小于此值的浮点数尾数部分将被截取|
|lossyColumn |3.3.0.0 前|对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none默认值 none表示关闭无损压缩|
|ifAdtFse | |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法FSE 算法压缩速度更快但解压稍慢追求压缩速度可选用此算法0关闭1打开默认值为 0|
|maxRange | |内部参数,用于有损压缩设置|
|curRange | |内部参数,用于有损压缩设置|
|compressor | |内部参数,用于有损压缩设置|
**补充说明**
1. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本
@ -220,16 +319,6 @@ lossyColumns float|double
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
```
### 其他参数
| 参数名称 | 参数说明 |
| :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| enableCoreFile | crash 时是否生成 core 文件0: 不生成1生成默认值为 1; 不同的启动方式,生成 core 文件的目录如下1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动就在 taosd 执行目录下。 |
| udf | 是否启动 UDF 服务0: 不启动1启动默认值为 0 |
| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变1改变默认值为 0 |
| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000缺省值为20 |
| maxTsmaNum | 集群内可创建的TSMA个数取值范围0-3缺省值为 3 |
## taosd 监控指标
@ -282,7 +371,7 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| :------------- | :-------- | :------ | :--------------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| tables\_num | DOUBLE | | vgroup 中 table 数量 |
| status | DOUBLE | | vgroup 状态, 取值范围unsynced = 0, ready = 1 |
| status | DOUBLE | | vgroup 状态, 取值范围 unsynced = 0, ready = 1 |
| vgroup\_id | VARCHAR | TAG | vgroup id |
| database\_name | VARCHAR | TAG | vgroup 所属的 database 名字 |
| cluster\_id | VARCHAR | TAG | cluster id |
@ -311,10 +400,10 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| io\_write\_disk | DOUBLE | | 磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 byte/s |
| vnodes\_num | DOUBLE | | dnode 上 vnodes 数量 |
| masters | DOUBLE | | dnode 上 master node 数量 |
| has\_mnode | DOUBLE | | dnode 是否包含 mnode取值范围包含=1,不包含=0 |
| has\_qnode | DOUBLE | | dnode 是否包含 qnode取值范围包含=1,不包含=0 |
| has\_snode | DOUBLE | | dnode 是否包含 snode取值范围包含=1,不包含=0 |
| has\_bnode | DOUBLE | | dnode 是否包含 bnode取值范围包含=1,不包含=0 |
| has\_mnode | DOUBLE | | dnode 是否包含 mnode取值范围 包含=1,不包含=0 |
| has\_qnode | DOUBLE | | dnode 是否包含 qnode取值范围 包含=1,不包含=0 |
| has\_snode | DOUBLE | | dnode 是否包含 snode取值范围 包含=1,不包含=0 |
| has\_bnode | DOUBLE | | dnode 是否包含 bnode取值范围 包含=1,不包含=0 |
| error\_log\_count | DOUBLE | | error 总数 |
| info\_log\_count | DOUBLE | | info 总数 |
| debug\_log\_count | DOUBLE | | debug 总数 |
@ -330,7 +419,7 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| field | type | is\_tag | comment |
| :---------- | :-------- | :------ | :--------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| status | DOUBLE | | dnode 状态,取值范围ready=1offline =0 |
| status | DOUBLE | | dnode 状态,取值范围 ready=1offline =0 |
| dnode\_id | VARCHAR | TAG | dnode id |
| dnode\_ep | VARCHAR | TAG | dnode endpoint |
| cluster\_id | VARCHAR | TAG | cluster id |
@ -373,7 +462,7 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| field | type | is\_tag | comment |
| :---------- | :-------- | :------ | :------------------------------------------------------------------------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| role | DOUBLE | | mnode 角色, 取值范围offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
| role | DOUBLE | | mnode 角色, 取值范围 offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
| mnode\_id | VARCHAR | TAG | master node id |
| mnode\_ep | VARCHAR | TAG | master node endpoint |
| cluster\_id | VARCHAR | TAG | cluster id |
@ -385,7 +474,7 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| field | type | is\_tag | comment |
| :------------- | :-------- | :------ | :------------------------------------------------------------------------------------------------------ |
| \_ts | TIMESTAMP | | timestamp |
| vnode\_role | DOUBLE | | vnode 角色,取值范围offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
| vnode\_role | DOUBLE | | vnode 角色,取值范围 offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
| vgroup\_id | VARCHAR | TAG | dnode id |
| dnode\_id | VARCHAR | TAG | dnode id |
| database\_name | VARCHAR | TAG | vgroup 所属的 database 名字 |
@ -399,9 +488,9 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| :---------- | :-------- | :------ | :--------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| count | DOUBLE | | sql 数量 |
| result | VARCHAR | TAG | sql的执行结果取值范围Success, Failed |
| result | VARCHAR | TAG | sql的执行结果取值范围 Success, Failed |
| username | VARCHAR | TAG | 执行sql的user name |
| sql\_type | VARCHAR | TAG | sql类型取值范围inserted_rows |
| sql\_type | VARCHAR | TAG | sql类型取值范围 inserted_rows |
| dnode\_id | VARCHAR | TAG | dnode id |
| dnode\_ep | VARCHAR | TAG | dnode endpoint |
| vgroup\_id | VARCHAR | TAG | dnode id |
@ -415,9 +504,9 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| :---------- | :-------- | :------ | :---------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| count | DOUBLE | | sql 数量 |
| result | VARCHAR | TAG | sql的执行结果取值范围Success, Failed |
| result | VARCHAR | TAG | sql的执行结果取值范围 Success, Failed |
| username | VARCHAR | TAG | 执行sql的user name |
| sql\_type | VARCHAR | TAG | sql类型取值范围select, insertdelete |
| sql\_type | VARCHAR | TAG | sql类型取值范围 select, insertdelete |
| cluster\_id | VARCHAR | TAG | cluster id |
### taos\_slow\_sql 表
@ -428,9 +517,9 @@ taosd 会将监控指标上报给 taosKeeper这些监控指标会被 taosKeep
| :---------- | :-------- | :------ | :---------------------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| count | DOUBLE | | sql 数量 |
| result | VARCHAR | TAG | sql的执行结果取值范围Success, Failed |
| result | VARCHAR | TAG | sql的执行结果取值范围 Success, Failed |
| username | VARCHAR | TAG | 执行sql的user name |
| duration | VARCHAR | TAG | sql执行耗时取值范围3-10s,10-100s,100-1000s,1000s- |
| duration | VARCHAR | TAG | sql执行耗时取值范围 3-10s,10-100s,100-1000s,1000s- |
| cluster\_id | VARCHAR | TAG | cluster id |
## 日志相关

View File

@ -8,38 +8,100 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API并且在
## 配置参数
| 参数名称 | 参数含义 |
|:-----------:|:----------------------------------------------------------:|
|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint缺省值hostname:6030若无法获取该服务器的 hostname则赋值为 localhost |
|secondEp | 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint没有缺省值 |
|numOfRpcSessions | 一个客户端能创建的最大连接数取值范围10-50000000(单位为毫秒)缺省值500000 |
|telemetryReporting | 是否上传 telemetry0: 不上传1 上传缺省值1 |
|crashReporting | 是否上传 telemetry0: 不上传1 上传缺省值1 |
|queryPolicy | 查询语句的执行策略1: 只使用 vnode不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 缺省值1 |
|querySmaOptimize | sma index 的优化策略0: 表示不使用 sma index永远从原始数据进行查询; 1: 表示使用 sma index对符合的语句直接从预计算的结果进行查询缺省值0 |
|keepColumnName | Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数; 1: 表示自动设置别名为列名(不包含函数名), 0: 表示不自动设置别名; 缺省值: 0 |
|countAlwaysReturnValue | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值; 0返回空行1返回; 缺省值 1; 该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了TSMA时, 且相应的组或窗口内数据为空或者NULL 对应的组或窗口将不返回查询结果. 注意此参数客户端和服务端值应保持一致. |
|multiResultFunctionStarReturnTags | 查询超级表时last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响; 0不返回标签列1返回标签列 ; 缺省值: 0; 该参数设置为 0 时last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列 |
|maxTsmaCalcDelay| 查询时客户端可允许的tsma计算延迟, 若tsma的计算延迟大于配置值, 则该TSMA将不会被使用.; 取值范围: 600s - 86400s, 即10分钟-1小时 ; 缺省值600 秒|
|tsmaDataDeleteMark |TSMA计算的历史数据中间结果保存时间, 单位为毫秒; 取值范围:>= 3600000, 即大于等于1h; 缺省值: 86400000, 即1d |
|timezone | 时区; 缺省从系统中动态获取当前的时区设置 |
|locale | 系统区位信息及编码格式, 缺省从系统中获取 |
|charset | 字符集编码,缺省从系统中获取 |
|metaCacheMaxSize | 指定单个客户端元数据缓存大小的最大值, 单位 MB; 缺省值 -1表示无限制 |
|logDir | 日志文件目录,客户端运行日志将写入该目录, 缺省值: /var/log/taos |
|minimalLogDirGB | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志; 缺省值 1 |
|numOfLogLines | 单个日志文件允许的最大行数; 缺省值 10,000,000 |
|asyncLog | 是否异步写入日志0同步1异步缺省值1 |
|logKeepDays | 日志文件的最长保存时间; 缺省值: 0表示无限保存; 大于 0 时,日志文件会被重命名为 taosdlog.xxx其中 xxx 为日志文件最后修改的时间戳|
|smlChildTableName | schemaless 自定义的子表名的 key, 无缺省值 |
|smlAutoChildTableNameDelimiter | schemaless tag之间的连接符连起来作为子表名无缺省值 |
|smlTagName | schemaless tag 为空时默认的 tag 名字, 缺省值 "_tag_null" |
|smlTsDefaultName | schemaless自动建表的时间列名字通过该配置设置, 缺省值 "_ts" |
|smlDot2Underline | schemaless 把超级表名中的 dot 转成下划线 |
|enableCoreFile | crash 时是否生成 core 文件0: 不生成, 1 生成缺省值1 |
|enableScience | 是否开启科学计数法显示浮点数; 0: 不开始, 1: 开启缺省值1 |
|compressMsgSize | 是否对 RPC 消息进行压缩; -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩; 缺省值 -1|
|queryTableNotExistAsEmpty | 查询表不存在时是否返回空结果集; false: 返回错误; true: 返回空结果集; 缺省值 false|
### 连接相关
|参数名称|支持版本|参数含义|
|----------------------|----------|-|
|firstEp | |启动时,主动连接的集群中首个 dnode 的 endpoint缺省值hostname:6030若无法获取该服务器的 hostname则赋值为 localhost|
|secondEp | |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint没有缺省值|
|compressMsgSize | |是否对 RPC 消息进行压缩;-1所有消息都不压缩0所有消息都压缩N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1|
|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120默认值 3|
|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000缺省值 30000|
|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半|
|timeToGetAvailableConn| |获得可用连接的最长等待时间,取值范围 10-50000000单位为毫秒缺省值 500000|
|useAdapter | |内部参数,是否使用 taosadapter影响 CSV 文件导入|
|shareConnLimit |3.3.4.3 后|内部参数,一个链接可以共享的查询数目,取值范围 1-256默认值 10|
|readTimeout |3.3.4.3 后|内部参数,最小超时时间,取值范围 64-604800单位为秒默认值 900|
### 查询相关
|参数名称|支持版本|参数含义|
|---------------------------------|---------|-|
|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值0返回空行1返回默认值 1该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL对应的组或窗口将不返回查询结果注意此参数客户端和服务端值应保持一致|
|keepColumnName | |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数1表示自动设置别名为列名(不包含函数名)0表示不自动设置别名缺省值0|
|multiResultFunctionStarReturnTags|3.3.3.0 后|查询超级表时last(\*)/last_row(\*)/first(\*) 是否返回标签列查询普通表、子表时不受该参数影响0不返回标签列1返回标签列缺省值0该参数设置为 0 时last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列|
|metaCacheMaxSize | |指定单个客户端元数据缓存大小的最大值,单位 MB缺省值 -1表示无限制|
|maxTsmaCalcDelay | |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s即 10 分钟 - 1 小时缺省值600 秒|
|tsmaDataDeleteMark | |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000即大于等于1h缺省值86400000即 1d |
|queryPolicy | |查询语句的执行策略1只使用 vnode不使用 qnode2没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行3vnode 只运行扫描算子,其余算子均在 qnode 执行缺省值1|
|queryTableNotExistAsEmpty | |查询表不存在时是否返回空结果集false返回错误true返回空结果集缺省值 false|
|querySmaOptimize | |sma index 的优化策略0表示不使用 sma index永远从原始数据进行查询1表示使用 sma index对符合的语句直接从预计算的结果进行查询缺省值0|
|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
|queryNodeChunkSize | |内部参数,查询计划的块大小|
|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
|queryMaxConcurrentTables | |内部参数,查询计划的并发数目|
|enableQueryHb | |内部参数,是否发送查询心跳消息|
|minSlidingTime | |内部参数sliding 的最小允许值|
|minIntervalTime | |内部参数interval 的最小允许值|
### 写入相关
|参数名称|支持版本|参数含义|
|------------------------------|----------|-|
|smlChildTableName | |schemaless 自定义的子表名的 key无缺省值|
|smlAutoChildTableNameDelimiter| |schemaless tag 之间的连接符,连起来作为子表名,无缺省值|
|smlTagName | |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"|
|smlTsDefaultName | |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"|
|smlDot2Underline | |schemaless 把超级表名中的 dot 转成下划线|
|maxInsertBatchRows | |内部参数,一批写入的最大条数|
### 区域相关
|参数名称|支持版本|参数含义|
|-----------------|----------|-|
|timezone | |时区;缺省从系统中动态获取当前的时区设置|
|locale | |系统区位信息及编码格式,缺省从系统中获取|
|charset | |字符集编码,缺省从系统中获取|
### 存储相关
|参数名称|支持版本|参数含义|
|-----------------|----------|-|
|tempDir | |指定所有运行过程中的临时文件生成的目录Linux 平台默认值为 /tmp|
|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB缺省值1|
### 日志相关
|参数名称|支持版本|参数含义|
|-----------------|----------|-|
|logDir | |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos|
|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB缺省值1|
|numOfLogLines | |单个日志文件允许的最大行数缺省值10,000,000|
|asyncLog | |日志写入模式0同步1异步缺省值1|
|logKeepDays | |日志文件的最长保存时间单位缺省值0意味着无限保存日志文件不会被重命名也不会有新的日志文件滚动产生但日志文件的内容有可能会不断滚动取决于日志文件大小的设置当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|debugFlag | |运行日志开关131输出错误和警告日志135输出错误、警告和调试日志143输出错误、警告、调试和跟踪日志默认值 131 或 135 (取决于不同模块)|
|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
|jniDebugFlag | |jni 模块的日志开关,取值范围同上|
|qDebugFlag | |query 模块的日志开关,取值范围同上|
|cDebugFlag | |客户端模块的日志开关,取值范围同上|
|simDebugFlag | |内部参数,测试工具的日志开关,取值范围同上|
|tqClientDebugFlag|3.3.4.3 后|客户端模块的日志开关,取值范围同上|
### 调试相关
|参数名称|支持版本|参数含义|
|-----------------|-----------|-|
|crashReporting | |是否上传 crash 到 telemetry0不上传1上传缺省值1|
|enableCoreFile | |crash 时是否生成 core 文件0不生成1生成缺省值1|
|assert | |断言控制开关缺省值0|
|configDir | |配置文件所在目录|
|scriptDir | |内部参数,测试用例的目录|
|randErrorChance |3.3.3.0 后|内部参数,用于随机失败测试|
|randErrorDivisor |3.3.3.0 后|内部参数,用于随机失败测试|
|randErrorScope |3.3.3.0 后|内部参数,用于随机失败测试|
|safetyCheckLevel |3.3.3.0 后|内部参数,用于随机失败测试|
|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
### SHELL 相关
|参数名称|支持版本|参数含义|
|-----------------|----------|-|
|enableScience | |是否开启科学计数法显示浮点数0不开始1开启缺省值1|
## API

View File

@ -122,6 +122,7 @@ alter_database_option: {
| KEEP value
| WAL_RETENTION_PERIOD value
| WAL_RETENTION_SIZE value
| MINROWS value
}
```

View File

@ -227,7 +227,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
如下 SQL 语句可以列出当前数据库中的所有表名。
```sql
SHOW TABLES [LIKE tb_name_wildchar];
SHOW TABLES [LIKE tb_name_wildcard];
```
### 显示表创建语句

View File

@ -1988,7 +1988,7 @@ TOP(expr, k)
UNIQUE(expr)
```
**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
**功能说明**:返回该列数据去重后的值。该函数功能与 distinct 相似。对于相同的数据,返回时间戳最小的一条,对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
**返回数据类型**:同应用的字段。

View File

@ -11,337 +11,470 @@ description: TDengine 保留关键字的详细列表
关键字列表如下:
### A
- ABORT
- ACCOUNT
- ACCOUNTS
- ADD
- AFTER
- AGGREGATE
- ALIVE
- ALL
- ALTER
- ANALYZE
- AND
- APPS
- AS
- ASC
- AT_ONCE
- ATTACH
|关键字|说明|
|----------------------|-|
| ABORT | |
| ACCOUNT | |
| ACCOUNTS | |
| ADD | |
| AFTER | |
| AGGREGATE | |
| ALIAS | |
| ALIVE | |
| ALL | |
| ALTER | |
| ANALYZE | 3.3.4.3 及后续版本 |
| AND | |
| ANODE | 3.3.4.3 及后续版本 |
| ANODES | 3.3.4.3 及后续版本 |
| ANOMALY_WINDOW | 3.3.4.3 及后续版本 |
| ANTI | |
| APPS | |
| ARBGROUPS | |
| ARROW | |
| AS | |
| ASC | |
| ASOF | |
| AT_ONCE | |
| ATTACH | |
### B
- BALANCE
- BEFORE
- BEGIN
- BETWEEN
- BIGINT
- BINARY
- BITAND
- BITNOT
- BITOR
- BLOCKS
- BNODE
- BNODES
- BOOL
- BUFFER
- BUFSIZE
- BY
|关键字|说明|
|----------------------|-|
| BALANCE | |
| BEFORE | |
| BEGIN | |
| BETWEEN | |
| BIGINT | |
| BIN | |
| BINARY | |
| BITAND | |
| BITAND | |
| BITNOT | |
| BITOR | |
| BLOB | |
| BLOCKS | |
| BNODE | |
| BNODES | |
| BOOL | |
| BOTH | |
| BUFFER | |
| BUFSIZE | |
| BWLIMIT | |
| BY | |
### C
- CACHE
- CACHEMODEL
- CACHESIZE
- CASCADE
- CAST
- CHANGE
- CLIENT_VERSION
- CLUSTER
- COLON
- COLUMN
- COMMA
- COMMENT
- COMP
- COMPACT
- CONCAT
- CONFLICT
- CONNECTION
- CONNECTIONS
- CONNS
- CONSUMER
- CONSUMERS
- CONTAINS
- COPY
- COUNT
- CREATE
- CURRENT_USER
|关键字|说明|
|----------------------|-|
| CACHE | |
| CACHEMODEL | |
| CACHESIZE | |
| CASE | |
| CAST | |
| CHANGE | |
| CHILD | |
| CLIENT_VERSION | |
| CLUSTER | |
| COLON | |
| COLUMN | |
| COMMA | |
| COMMENT | |
| COMP | |
| COMPACT | |
| COMPACTS | |
| CONCAT | |
| CONFLICT | |
| CONNECTION | |
| CONNECTIONS | |
| CONNS | |
| CONSUMER | |
| CONSUMERS | |
| CONTAINS | |
| COPY | |
| COUNT | |
| COUNT_WINDOW | |
| CREATE | |
| CREATEDB | |
| CURRENT_USER | |
### D
- DATABASE
- DATABASES
- DBS
- DEFERRED
- DELETE
- DELIMITERS
- DESC
- DESCRIBE
- DETACH
- DISTINCT
- DISTRIBUTED
- DIVIDE
- DNODE
- DNODES
- DOT
- DOUBLE
- DROP
- DURATION
|关键字|说明|
|----------------------|-|
| DATABASE | |
| DATABASES | |
| DBS | |
| DECIMAL | |
| DEFERRED | |
| DELETE | |
| DELETE_MARK | |
| DELIMITERS | |
| DESC | |
| DESCRIBE | |
| DETACH | |
| DISTINCT | |
| DISTRIBUTED | |
| DIVIDE | |
| DNODE | |
| DNODES | |
| DOT | |
| DOUBLE | |
| DROP | |
| DURATION | |
### E
- EACH
- ENABLE
- END
- EVERY
- EXISTS
- EXPIRED
- EXPLAIN
|关键字|说明|
|----------------------|-|
| EACH | |
| ELSE | |
| ENABLE | |
| ENCRYPT_ALGORITHM | |
| ENCRYPT_KEY | |
| ENCRYPTIONS | |
| END | |
| EQ | |
| EVENT_WINDOW | |
| EVERY | |
| EXCEPT | |
| EXISTS | |
| EXPIRED | |
| EXPLAIN | |
### F
- FAIL
- FILE
- FILL
- FIRST
- FLOAT
- FLUSH
- FOR
- FROM
- FUNCTION
- FUNCTIONS
|关键字|说明|
|----------------------|-|
| FAIL | |
| FHIGH | 3.3.4.3 及后续版本 |
| FILE | |
| FILL | |
| FILL_HISTORY | |
| FIRST | |
| FLOAT | |
| FLOW | 3.3.4.3 及后续版本 |
| FLUSH | |
| FOR | |
| FORCE | |
| FORCE_WINDOW_CLOSE | 3.3.4.3 及后续版本 |
| FROM | |
| FROWTS | 3.3.4.3 及后续版本 |
| FULL | |
| FUNCTION | |
| FUNCTIONS | |
### G
- GLOB
- GRANT
- GRANTS
- GROUP
|关键字|说明|
|----------------------|-|
| GE | |
| GEOMETRY | |
| GLOB | |
| GRANT | |
| GRANTS | |
| GROUP | |
| GT | |
### H
- HAVING
- HOST
|关键字|说明|
|----------------------|-|
| HAVING | |
| HEX | |
| HOST | |
### I
- ID
- IF
- IGNORE
- IMMEDIATE
- IMPORT
- IN
- INDEX
- INDEXES
- INITIALLY
- INNER
- INSERT
- INSTEAD
- INT
- INTEGER
- INTERVAL
- INTO
- IS
- IS NULL
|关键字|说明|
|----------------------|-|
| ID | |
| IF | |
| IGNORE | |
| ILLEGAL | |
| IMMEDIATE | |
| IMPORT | |
| IN | |
| INDEX | |
| INDEXES | |
| INITIALLY | |
| INNER | |
| INSERT | |
| INSTEAD | |
| INT | |
| INTEGER | |
| INTERSECT | |
| INTERVAL | |
| INTO | |
| IPTOKEN | |
| IROWTS | |
| IS | |
| IS_IMPORT | |
| ISFILLED | |
| ISNULL | |
### J
- JOIN
- JSON
|关键字|说明|
|----------------------|-|
| JLIMIT | |
| JOIN | |
| JSON | |
### K
- KEEP
- KEY
- KILL
|关键字|说明|
|----------------------|-|
| KEEP | |
| KEEP_TIME_OFFSET | |
| KEY | |
| KILL | |
### L
- LAST
- LAST_ROW
- LICENCES
- LIKE
- LIMIT
- LINEAR
- LOCAL
|关键字|说明|
|----------------------|-|
| LANGUAGE | |
| LAST | |
| LAST_ROW | |
| LE | |
| LEADER | |
| LEADING | |
| LEFT | |
| LICENCES | |
| LIKE | |
| LIMIT | |
| LINEAR | |
| LOCAL | |
| LOGS | |
| LP | |
| LSHIFT | |
| LT | |
### M
- MATCH
- MAX_DELAY
- BWLIMIT
- MAXROWS
- MAX_SPEED
- MERGE
- META
- MINROWS
- MINUS
- MNODE
- MNODES
- MODIFY
- MODULES
|关键字|说明|
|----------------------|-|
| MACHINES | |
| MATCH | |
| MAX_DELAY | |
| MAXROWS | |
| MEDIUMBLOB | |
| MERGE | |
| META | |
| MINROWS | |
| MINUS | |
| MNODE | |
| MNODES | |
| MODIFY | |
| MODULES | |
### N
- NCHAR
- NEXT
- NMATCH
- NONE
- NOT
- NOT NULL
- NOW
- NULL
- NULLS
|关键字|说明|
|----------------------|-|
| NCHAR | |
| NE | |
| NEXT | |
| NMATCH | |
| NONE | |
| NORMAL | |
| NOT | |
| NOTNULL | |
| NOW | |
| NULL | |
| NULL_F | |
| NULLS | |
### O
- OF
- OFFSET
- ON
- OR
- ORDER
- OUTPUTTYPE
|关键字|说明|
|----------------------|-|
| OF | |
| OFFSET | |
| ON | |
| ONLY | |
| OR | |
| ORDER | |
| OUTER | |
| OUTPUTTYPE | |
### P
- PAGES
- PAGESIZE
- PARTITIONS
- PASS
- PLUS
- PORT
- PPS
- PRECISION
- PREV
- PRIVILEGE
|关键字|说明|
|----------------------|-|
| PAGES | |
| PAGESIZE | |
| PARTITION | |
| PASS | |
| PAUSE | |
| PI | |
| PLUS | |
| PORT | |
| POSITION | |
| PPS | |
| PRECISION | |
| PREV | |
| PRIMARY | |
| PRIVILEGE | |
| PRIVILEGES | |
### Q
- QNODE
- QNODES
- QTIME
- QUERIES
- QUERY
|关键字|说明|
|----------------------|-|
| QDURATION | |
| QEND | |
| QNODE | |
| QNODES | |
| QSTART | |
| QTAGS | |
| QTIME | |
| QUERIES | |
| QUERY | |
| QUESTION | |
### R
- RAISE
- RANGE
- RATIO
- READ
- REDISTRIBUTE
- RENAME
- REPLACE
- REPLICA
- RESET
- RESTRICT
- RETENTIONS
- REVOKE
- ROLLUP
- ROW
|关键字|说明|
|----------------------|-|
| RAISE | |
| RAND | |
| RANGE | |
| RATIO | |
| READ | |
| RECURSIVE | |
| REDISTRIBUTE | |
| REM | |
| REPLACE | |
| REPLICA | |
| RESET | |
| RESTORE | |
| RESTRICT | |
| RESUME | |
| RETENTIONS | |
| REVOKE | |
| RIGHT | |
| ROLLUP | |
| ROW | |
| ROWTS | |
| RP | |
| RSHIFT | |
### S
- SCHEMALESS
- SCORES
- SELECT
- SEMI
- SERVER_STATUS
- SERVER_VERSION
- SESSION
- SET
- SHOW
- SINGLE_STABLE
- SLIDING
- SLIMIT
- SMA
- SMALLINT
- SNODE
- SNODES
- SOFFSET
- SPLIT
- STABLE
- STABLES
- START
- STATE
- STATE_WINDOW
- STATEMENT
- STORAGE
- STREAM
- STREAMS
- STRICT
- STRING
- SUBSCRIPTIONS
- SYNCDB
- SYSINFO
|关键字|说明|
|----------------------|-|
| S3_CHUNKPAGES | |
| S3_COMPACT | |
| S3_KEEPLOCAL | |
| SCHEMALESS | |
| SCORES | |
| SELECT | |
| SEMI | |
| SERVER_STATUS | |
| SERVER_VERSION | |
| SESSION | |
| SET | |
| SHOW | |
| SINGLE_STABLE | |
| SLASH | |
| SLIDING | |
| SLIMIT | |
| SMA | |
| SMALLINT | |
| SMIGRATE | |
| SNODE | |
| SNODES | |
| SOFFSET | |
| SPLIT | |
| STABLE | |
| STABLES | |
| STAR | |
| START | |
| STATE | |
| STATE_WINDOW | |
| STATEMENT | |
| STORAGE | |
| STREAM | |
| STREAMS | |
| STRICT | |
| STRING | |
| STT_TRIGGER | |
| SUBSCRIBE | |
| SUBSCRIPTIONS | |
| SUBSTR | |
| SUBSTRING | |
| SUBTABLE | |
| SYSINFO | |
| SYSTEM | |
### T
- TABLE
- TABLES
- TAG
- TAGS
- TBNAME
- TIMES
- TIMESTAMP
- TIMEZONE
- TINYINT
- TO
- TODAY
- TOPIC
- TOPICS
- TRANSACTION
- TRANSACTIONS
- TRIGGER
- TRIM
- TSERIES
- TTL
|关键字|说明|
|----------------------|-|
| TABLE | |
| TABLE_PREFIX | |
| TABLE_SUFFIX | |
| TABLES | |
| TAG | |
| TAGS | |
| TBNAME | |
| THEN | |
| TIMES | |
| TIMESTAMP | |
| TIMEZONE | |
| TINYINT | |
| TO | |
| TODAY | |
| TOPIC | |
| TOPICS | |
| TRAILING | |
| TRANSACTION | |
| TRANSACTIONS | |
| TRIGGER | |
| TRIM | |
| TSDB_PAGESIZE | |
| TSERIES | |
| TSMA | |
| TSMAS | |
| TTL | |
### U
- UNION
- UNSIGNED
- UPDATE
- USE
- USER
- USERS
- USING
|关键字|说明|
|----------------------|-|
| UNION | |
| UNSAFE | |
| UNSIGNED | |
| UNTREATED | |
| UPDATE | |
| USE | |
| USER | |
| USERS | |
| USING | |
### V
|关键字|说明|
|----------------------|-|
| VALUE | |
| VALUE_F | |
| VALUES | |
| VARBINARY | |
| VARCHAR | |
| VARIABLE | |
| VARIABLES | |
| VERBOSE | |
| VGROUP | |
| VGROUPS | |
| VIEW | |
| VIEWS | |
| VNODE | |
| VNODES | |
- VALUE
- VALUES
- VARCHAR
- VARIABLE
- VARIABLES
- VERBOSE
- VGROUP
- VGROUPS
- VIEW
- VNODES
### W
- WAL
- WAL_FSYNC_PERIOD
- WAL_LEVEL
- WAL_RETENTION_PERIOD
- WAL_RETENTION_SIZE
- WATERMARK
- WHERE
- WINDOW_CLOSE
- WITH
- WRITE
|关键字|说明|
|----------------------|-|
| WAL | |
| WAL_FSYNC_PERIOD | |
| WAL_LEVEL | |
| WAL_RETENTION_PERIOD | |
| WAL_RETENTION_SIZE | |
| WAL_ROLL_PERIOD | |
| WAL_SEGMENT_SIZE | |
| WATERMARK | |
| WDURATION | |
| WEND | |
| WHEN | |
| WHERE | |
| WINDOW | |
| WINDOW_CLOSE | |
| WINDOW_OFFSET | |
| WITH | |
| WRITE | |
| WSTART | |
### \_

View File

@ -24,6 +24,10 @@ TDengine 3.x 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 3.3.4.3
<Release type="tdengine" version="3.3.4.3" />
## 3.3.3.0
<Release type="tdengine" version="3.3.3.0" />

View File

@ -0,0 +1,69 @@
---
title: 3.3.4.3 版本说明
sidebar_label: 3.3.4.3
description: 3.3.4.3 版本说明
---
### 行为变更及兼容性
1. 多副本流计算中必须使用 snode
1. 增加了流计算的兼容性保证机制,避免后续函数变更产生新的兼容性问题,但之前版本的流计算必须重建,具体参见 https://docs.taosdata.com/advanced/stream/#流计算升级故障恢复
1. 调整 case when 语句结果类型的判断方法
### 新特性
1. 新功能:流计算的 TWA 函数支持时间驱动的结果推送模式
1. 新功能:流计算的 Interp 函数支持时间驱动的结果推送模式
1. 新功能:支持微软对象存储
### 优化
1. 优化:提升并发大查询时节点之间互相拉数据的效率
1. 优化:支持使用 AVX2 和 AVX512 对 double 、timestamp 和 bigint 类型进行解码优化
1. 优化:调整 case when 语句的结果类型判断方法
1. 优化:顺序执行 compact 和 split vgroup操作时的日志错误提示
1. 优化:提升查询 “select ... from ... where ts in (...)” 的数据扫描速度
1. 优化:增加了流计算的兼容性保证机制,避免后续函数变更产生新的兼容性问题,之前版本的流计算必须重建
1. 优化:提升 taosX 在交叉写入场景下的数据同步性能
1. 优化:支持关闭整数/浮点数类型的编码
1. 优化:多副本流计算中必须使用 snode
1. 优化:客户端生成唯一 ID 标识每一个查询任务,避免重复 ID 导致的内存损坏
1. 优化:加快数据库的创建时间
1. 优化:修改 s3MigrateEnabled 默认值为0
1. 优化:支持在审计数据库中记录删除操作
1. 优化:支持在指定的 dnode 中创建数据库 [企业版]
1. 优化:调整删除超级表数据列时的报错信息
### 修复
1. 修复last_row 查询性能在 3.3.3.0 中大幅下降的问题
1. 修复WAL 条目不完整时 taosd 无法启动的问题
1. 修复: partition by 常量时查询结果错误的问题
1. 修复:标量函数包含 _wstart 且填充方式为 prev 时计算结果错误
1. 修复Windows 平台下的时区设置问题
1. 修复:空数据库进行 compact 操作时,事务无法结束【企业版】
1. 修复:事务冲突的逻辑错误
1. 修复:管理节点某些错误会导致事务无法停止
1. 修复:管理节点某些错误会导致事务无法停止
1. 修复dnode 数据清空后 taosc 重试错误的问题
1. 修复Data Compact 被异常终止后,中间文件未被清理
1. 修复新增列后Kafka 连接器的 earliest 模式消费不到新列数据
1. 修复interp 函数在 fill(prev) 时行为不正确
1. 修复TSMA 在高频元数据操作时异常停止的问题
1. 修复show create stable 语句执行结果的标签显示错误
1. 修复Percentile 函数在大数据量查询时会崩溃。
1. 修复partition by 和 having 联合使用时的语法错误问题
1. 修复interp 在 partition by tbname,c1 时 tbname 为空的问题
1. 修复:通过 stmt 写入非法布尔数值时 taosd 可能 crash
1. 修复:库符号 version 与使用相同符号的库冲突的问题
1. 修复:在 windows 平台下 JDBC 驱动的句柄数持续升高问题
1. 修复3.3.3.1 升级至 3.3.4.0 偶现的启动失败问题
1. 修复Windows 平台重复增删表的内存泄漏
1. 修复:无法限制并发拉起 checkpoint 数量导致流计算消耗资源过多
1. 修复:并发查询时的 too many session 问题
1. 修复Windows 平台下 taos shell 在慢查询场景中崩溃的问题
1. 修复:当打开 dnode日志时加密数据库无法恢复的问题
1. 修复:由于 mnode 同步超时,进而导致 taosd 无法启动的问题
1. 修复:由于在快照同步过程中整理文件组数据的速度过慢,从而导致 Vnode虚拟节点无法恢复的问题
1. 修复通过行协议向字符串类型的字段中写入带转义符的数据时taosd 会崩溃
1. 修复Error Code 逻辑处理错误导致的元数据文件损坏
1. 修复:查询语句中包含多个 “not” 条件语句嵌套时,未设置标量模式导致查询错误
1. 修复vnode 统计信息上报超时导致的 dnode offline 问题
1. 修复:在不支持 avx 指令集的服务器上taosd 启动失败问题
1. 修复taosX 数据迁移容错处理 0x09xx 错误码

View File

@ -3,5 +3,7 @@ title: 版本说明
sidebar_label: 版本说明
description: 各版本版本说明
---
[3.3.4.3](./3.3.4.3)
[3.3.3.0](./3.3.3.0)
[3.3.2.0](./3.3.2.0)

View File

@ -2187,8 +2187,9 @@ int32_t tSerializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq
typedef struct {
char name[TSDB_CONFIG_OPTION_LEN + 1];
char value[TSDB_CONFIG_VALUE_LEN + 1];
char value[TSDB_CONFIG_PATH_LEN + 1];
char scope[TSDB_CONFIG_SCOPE_LEN + 1];
char info[TSDB_CONFIG_INFO_LEN + 1];
} SVariablesInfo;
typedef struct {
@ -2307,6 +2308,7 @@ typedef struct {
typedef struct {
SExplainRsp rsp;
uint64_t qId;
uint64_t cId;
uint64_t tId;
int64_t rId;
int32_t eId;
@ -2660,6 +2662,7 @@ typedef struct SSubQueryMsg {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2689,6 +2692,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int32_t execId;
} SQueryContinueReq;
@ -2723,6 +2727,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int32_t execId;
SOperatorParam* pOpParam;
@ -2738,6 +2743,7 @@ typedef struct {
typedef struct {
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2784,6 +2790,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2797,6 +2804,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -2813,6 +2821,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@ -4261,6 +4270,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t clientId;
uint64_t taskId;
uint32_t sqlLen;
uint32_t phyLen;

View File

@ -62,7 +62,8 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
: (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
: 1000000000;
time_t t = taosTime(NULL);
time_t t;
(void) taosTime(&t);
struct tm tm;
(void) taosLocalTime(&t, &tm, NULL, 0);
tm.tm_hour = 0;

View File

@ -31,7 +31,7 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef struct {
void* handle;

View File

@ -61,6 +61,35 @@ extern "C" {
} \
} while (0)
#define TAOS_UDF_CHECK_PTR_RCODE(...) \
do { \
const void *ptrs[] = {__VA_ARGS__}; \
for (int i = 0; i < sizeof(ptrs) / sizeof(ptrs[0]); ++i) { \
if (ptrs[i] == NULL) { \
fnError("udfd %dth parameter invalid, NULL PTR.line:%d", i, __LINE__); \
return TSDB_CODE_INVALID_PARA; \
} \
} \
} while (0)
#define TAOS_UDF_CHECK_PTR_RVOID(...) \
do { \
const void *ptrs[] = {__VA_ARGS__}; \
for (int i = 0; i < sizeof(ptrs) / sizeof(ptrs[0]); ++i) { \
if (ptrs[i] == NULL) { \
fnError("udfd %dth parameter invalid, NULL PTR.line:%d", i, __LINE__); \
return; \
} \
} \
} while (0)
#define TAOS_UDF_CHECK_CONDITION(o, code) \
do { \
if ((o) == false) { \
fnError("Condition not met.line:%d", __LINE__); \
return code; \
} \
} while (0)
// low level APIs
/**

View File

@ -42,10 +42,11 @@ extern "C" {
#define SHOW_CREATE_VIEW_RESULT_FIELD1_LEN (TSDB_VIEW_FNAME_LEN + 4 + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_VIEW_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 3
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 4
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE)
#define COMPACT_DB_RESULT_COLS 3
#define COMPACT_DB_RESULT_FIELD1_LEN 32

View File

@ -624,6 +624,7 @@ typedef struct SAggPhysiNode {
typedef struct SDownstreamSourceNode {
ENodeType type;
SQueryNodeAddr addr;
uint64_t clientId;
uint64_t taskId;
uint64_t schedId;
int32_t execId;

View File

@ -105,11 +105,11 @@ void qWorkerDestroy(void **qWorkerMgmt);
int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat);
int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
SQWMsg *qwMsg, SArray *explainRes);
int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
int32_t eId, SQWMsg *qwMsg, SArray *explainRes);
int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
void **pRsp, SArray *explainRes);
int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
int32_t eId, void **pRsp, SArray *explainRes);
int32_t qWorkerDbgEnableDebug(char *option);

View File

@ -83,6 +83,9 @@ void schedulerStopQueryHb(void* pTrans);
int32_t schedulerUpdatePolicy(int32_t policy);
int32_t schedulerEnableReSchedule(bool enableResche);
int32_t initClientId(void);
uint64_t getClientId(void);
/**
* Cancel query job
* @param pJob

View File

@ -137,6 +137,14 @@ extern threadlocal bool tsEnableRandErr;
terrno = _code; \
}
#define OS_PARAM_CHECK(_o) \
do { \
if ((_o) == NULL) { \
terrno = TSDB_CODE_INVALID_PARA; \
return terrno; \
} \
} while (0)
#ifdef __cplusplus
}
#endif

View File

@ -48,8 +48,6 @@ void taosCloseCmd(TdCmdPtr *ppCmd);
void *taosLoadDll(const char *filename);
void *taosLoadSym(void *handle, char *name);
void taosCloseDll(void *handle);
int32_t taosSetConsoleEcho(bool on);

View File

@ -93,7 +93,7 @@ static FORCE_INLINE int64_t taosGetMonoTimestampMs() {
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf, int32_t bufSize);
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
time_t taosTime(time_t *t);
int32_t taosTime(time_t *t);
time_t taosMktime(struct tm *timep);
int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour,
const uint32_t min, const uint32_t sec, int64_t time_zone);

View File

@ -208,6 +208,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_TSC_COMPRESS_PARAM_ERROR TAOS_DEF_ERROR_CODE(0, 0X0233)
#define TSDB_CODE_TSC_COMPRESS_LEVEL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0234)
#define TSDB_CODE_TSC_FAIL_GENERATE_JSON TAOS_DEF_ERROR_CODE(0, 0X0235)
#define TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR TAOS_DEF_ERROR_CODE(0, 0X0236)
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X02FF)
// mnode-common

View File

@ -195,9 +195,9 @@ static const EOperatorType OPERATOR_ARRAY[] = {
OP_TYPE_MULTI,
OP_TYPE_DIV,
OP_TYPE_REM,
OP_TYPE_MINUS,
OP_TYPE_BIT_AND,
OP_TYPE_BIT_OR,
@ -213,7 +213,7 @@ static const EOperatorType OPERATOR_ARRAY[] = {
OP_TYPE_NOT_LIKE,
OP_TYPE_MATCH,
OP_TYPE_NMATCH,
OP_TYPE_IS_NULL,
OP_TYPE_IS_NOT_NULL,
OP_TYPE_IS_TRUE,
@ -222,7 +222,7 @@ static const EOperatorType OPERATOR_ARRAY[] = {
OP_TYPE_IS_NOT_TRUE,
OP_TYPE_IS_NOT_FALSE,
OP_TYPE_IS_NOT_UNKNOWN,
//OP_TYPE_COMPARE_MAX_VALUE,
//OP_TYPE_COMPARE_MAX_VALUE,
OP_TYPE_JSON_GET_VALUE,
OP_TYPE_JSON_CONTAINS,
@ -631,6 +631,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
#define TSDB_CONFIG_VALUE_LEN 64
#define TSDB_CONFIG_SCOPE_LEN 8
#define TSDB_CONFIG_NUMBER 16
#define TSDB_CONFIG_PATH_LEN 4096
#define TSDB_CONFIG_INFO_LEN 64
#define QUERY_ID_SIZE 20
#define QUERY_OBJ_ID_SIZE 18

View File

@ -25,7 +25,7 @@ extern "C" {
#define tjsonGetNumberValue(pJson, pName, val, code) \
do { \
uint64_t _tmp = 0; \
int64_t _tmp = 0; \
code = tjsonGetBigIntValue(pJson, pName, &_tmp); \
val = _tmp; \
} while (0)

View File

@ -120,6 +120,18 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
}
}
/*
* LIKELY and UNLIKELY macros for branch predication hints. Use them judiciously
* only in very hot code paths. Misuse or abuse can lead to performance degradation.
*/
#if __GNUC__ >= 3
#define LIKELY(x) __builtin_expect((x) != 0, 1)
#define UNLIKELY(x) __builtin_expect((x) != 0, 0)
#else
#define LIKELY(x) ((x) != 0)
#define UNLIKELY(x) ((x) != 0)
#endif
#define TAOS_CHECK_ERRNO(CODE) \
do { \
terrno = (CODE); \
@ -129,25 +141,27 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
} \
} while (0)
#define TSDB_CHECK_CODE(CODE, LINO, LABEL) \
do { \
if (TSDB_CODE_SUCCESS != (CODE)) { \
LINO = __LINE__; \
goto LABEL; \
} \
#define TSDB_CHECK_CODE(CODE, LINO, LABEL) \
do { \
if (UNLIKELY(TSDB_CODE_SUCCESS != (CODE))) { \
LINO = __LINE__; \
goto LABEL; \
} \
} while (0)
#define QUERY_CHECK_CODE TSDB_CHECK_CODE
#define QUERY_CHECK_CONDITION(condition, CODE, LINO, LABEL, ERRNO) \
if (!condition) { \
(CODE) = (ERRNO); \
(LINO) = __LINE__; \
goto LABEL; \
#define TSDB_CHECK_CONDITION(condition, CODE, LINO, LABEL, ERRNO) \
if (UNLIKELY(!(condition))) { \
(CODE) = (ERRNO); \
(LINO) = __LINE__; \
goto LABEL; \
}
#define QUERY_CHECK_CONDITION TSDB_CHECK_CONDITION
#define TSDB_CHECK_NULL(ptr, CODE, LINO, LABEL, ERRNO) \
if ((ptr) == NULL) { \
if (UNLIKELY((ptr) == NULL)) { \
(CODE) = (ERRNO); \
(LINO) = __LINE__; \
goto LABEL; \

View File

@ -0,0 +1,59 @@
import subprocess
import re
# 执行 git fetch 命令并捕获输出
def git_fetch():
result = subprocess.run(['git', 'fetch'], capture_output=True, text=True)
return result
# 解析分支名称
def parse_branch_name_type1(error_output):
# 使用正则表达式匹配 'is at' 前的分支名称
match = re.search(r"error: cannot lock ref '(refs/remotes/origin/[^']+)': is at", error_output)
if match:
return match.group(1)
return None
# 解析第二种错误中的分支名称
def parse_branch_name_type2(error_output):
# 使用正则表达式匹配 'exists' 前的第一个引号内的分支名称
match = re.search(r"'(refs/remotes/origin/[^']+)' exists;", error_output)
if match:
return match.group(1)
return None
# 执行 git update-ref -d 命令
def git_update_ref(branch_name):
if branch_name:
subprocess.run(['git', 'update-ref', '-d', f'{branch_name}'], check=True)
# 解析错误类型并执行相应的修复操作
def handle_error(error_output):
# 错误类型1本地引用的提交ID与远程不一致
if "is at" in error_output and "but expected" in error_output:
branch_name = parse_branch_name_type1(error_output)
if branch_name:
print(f"Detected error type 1, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print("Error parsing branch name for type 1.")
# 错误类型2尝试创建新的远程引用时本地已经存在同名的引用
elif "exists; cannot create" in error_output:
branch_name = parse_branch_name_type2(error_output)
if branch_name:
print(f"Detected error type 2, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print("Error parsing branch name for type 2.")
# 主函数
def main():
fetch_result = git_fetch()
if fetch_result.returncode != 0: # 如果 git fetch 命令失败
error_output = fetch_result.stderr
handle_error(error_output)
else:
print("Git fetch successful.")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,319 @@
body {
font-family: Helvetica, Arial, sans-serif;
font-size: 12px;
/* do not increase min-width as some may use split screens */
min-width: 800px;
color: #999;
}
h1 {
font-size: 24px;
color: black;
}
h2 {
font-size: 16px;
color: black;
}
p {
color: black;
}
a {
color: #999;
}
table {
border-collapse: collapse;
}
/******************************
* SUMMARY INFORMATION
******************************/
#environment td {
padding: 5px;
border: 1px solid #e6e6e6;
vertical-align: top;
}
#environment tr:nth-child(odd) {
background-color: #f6f6f6;
}
#environment ul {
margin: 0;
padding: 0 20px;
}
/******************************
* TEST RESULT COLORS
******************************/
span.passed,
.passed .col-result {
color: green;
}
span.skipped,
span.xfailed,
span.rerun,
.skipped .col-result,
.xfailed .col-result,
.rerun .col-result {
color: orange;
}
span.error,
span.failed,
span.xpassed,
.error .col-result,
.failed .col-result,
.xpassed .col-result {
color: red;
}
.col-links__extra {
margin-right: 3px;
}
/******************************
* RESULTS TABLE
*
* 1. Table Layout
* 2. Extra
* 3. Sorting items
*
******************************/
/*------------------
* 1. Table Layout
*------------------*/
#results-table {
border: 1px solid #e6e6e6;
color: #999;
font-size: 12px;
width: 100%;
}
#results-table th,
#results-table td {
padding: 5px;
border: 1px solid #e6e6e6;
text-align: left;
}
#results-table th {
font-weight: bold;
}
/*------------------
* 2. Extra
*------------------*/
.logwrapper {
max-height: 230px;
overflow-y: scroll;
background-color: #e6e6e6;
}
.logwrapper.expanded {
max-height: none;
}
.logwrapper.expanded .logexpander:after {
content: "collapse [-]";
}
.logwrapper .logexpander {
z-index: 1;
position: sticky;
top: 10px;
width: max-content;
border: 1px solid;
border-radius: 3px;
padding: 5px 7px;
margin: 10px 0 10px calc(100% - 80px);
cursor: pointer;
background-color: #e6e6e6;
}
.logwrapper .logexpander:after {
content: "expand [+]";
}
.logwrapper .logexpander:hover {
color: #000;
border-color: #000;
}
.logwrapper .log {
min-height: 40px;
position: relative;
top: -50px;
height: calc(100% + 50px);
border: 1px solid #e6e6e6;
color: black;
display: block;
font-family: "Courier New", Courier, monospace;
padding: 5px;
padding-right: 80px;
white-space: pre-wrap;
}
div.media {
border: 1px solid #e6e6e6;
float: right;
height: 240px;
margin: 0 5px;
overflow: hidden;
width: 320px;
}
.media-container {
display: grid;
grid-template-columns: 25px auto 25px;
align-items: center;
flex: 1 1;
overflow: hidden;
height: 200px;
}
.media-container--fullscreen {
grid-template-columns: 0px auto 0px;
}
.media-container__nav--right,
.media-container__nav--left {
text-align: center;
cursor: pointer;
}
.media-container__viewport {
cursor: pointer;
text-align: center;
height: inherit;
}
.media-container__viewport img,
.media-container__viewport video {
object-fit: cover;
width: 100%;
max-height: 100%;
}
.media__name,
.media__counter {
display: flex;
flex-direction: row;
justify-content: space-around;
flex: 0 0 25px;
align-items: center;
}
.collapsible td:not(.col-links) {
cursor: pointer;
}
.collapsible td:not(.col-links):hover::after {
color: #bbb;
font-style: italic;
cursor: pointer;
}
.col-result {
width: 130px;
}
.col-result:hover::after {
content: " (hide details)";
}
.col-result.collapsed:hover::after {
content: " (show details)";
}
#environment-header h2:hover::after {
content: " (hide details)";
color: #bbb;
font-style: italic;
cursor: pointer;
font-size: 12px;
}
#environment-header.collapsed h2:hover::after {
content: " (show details)";
color: #bbb;
font-style: italic;
cursor: pointer;
font-size: 12px;
}
/*------------------
* 3. Sorting items
*------------------*/
.sortable {
cursor: pointer;
}
.sortable.desc:after {
content: " ";
position: relative;
left: 5px;
bottom: -12.5px;
border: 10px solid #4caf50;
border-bottom: 0;
border-left-color: transparent;
border-right-color: transparent;
}
.sortable.asc:after {
content: " ";
position: relative;
left: 5px;
bottom: 12.5px;
border: 10px solid #4caf50;
border-top: 0;
border-left-color: transparent;
border-right-color: transparent;
}
.hidden, .summary__reload__button.hidden {
display: none;
}
.summary__data {
flex: 0 0 550px;
}
.summary__reload {
flex: 1 1;
display: flex;
justify-content: center;
}
.summary__reload__button {
flex: 0 0 300px;
display: flex;
color: white;
font-weight: bold;
background-color: #4caf50;
text-align: center;
justify-content: center;
align-items: center;
border-radius: 3px;
cursor: pointer;
}
.summary__reload__button:hover {
background-color: #46a049;
}
.summary__spacer {
flex: 0 0 550px;
}
.controls {
display: flex;
justify-content: space-between;
}
.filters,
.collapse {
display: flex;
align-items: center;
}
.filters button,
.collapse button {
color: #999;
border: none;
background: none;
cursor: pointer;
text-decoration: underline;
}
.filters button:hover,
.collapse button:hover {
color: #ccc;
}
.filter__label {
margin-right: 10px;
}

View File

@ -0,0 +1,115 @@
# conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption(
"--verMode", default="enterprise", help="community or enterprise"
)
parser.addoption(
"--tVersion", default="3.3.2.6", help="the version of taos"
)
parser.addoption(
"--baseVersion", default="smoking", help="the path of nas"
)
parser.addoption(
"--sourcePath", default="nas", help="only support nas currently"
)
# Collect the setup and teardown of each test case and their std information
setup_stdout_info = {}
teardown_stdout_info = {}
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
# Record the std of setup and teardown
if call.when == 'setup':
for i in rep.sections:
if i[0] == "Captured stdout setup":
if not setup_stdout_info:
setup_stdout_info[item.nodeid] = i[1]
elif call.when == 'teardown':
for i in rep.sections:
if i[0] == "Captured stdout teardown":
teardown_stdout_info[item.nodeid] = i[1]
# Insert setup and teardown's std in the summary section
def pytest_html_results_summary(prefix, summary, postfix):
if setup_stdout_info or teardown_stdout_info:
rows = []
# Insert setup stdout
if setup_stdout_info:
for nodeid, stdout in setup_stdout_info.items():
html_content = '''
<tr>
<td><b><span style="font-size: larger; color: black;">Setup:</span></b></td>
<td colspan="4">
<a href="#" id="toggleSetup">Show Setup</a>
<div id="setupContent" class="collapsible-content" style="display: none; white-space: pre-wrap; margin-top: 5px;">
<pre>{}</pre>
</div>
</td>
</tr>
'''.format(stdout.strip())
# 如果需要在 Python 脚本中生成 HTML并使用 JavaScript 控制折叠内容的显示,可以这样做:
html_script = '''
<script>
document.getElementById('toggleSetup').addEventListener('click', function(event) {
event.preventDefault();
var setupContentDiv = document.getElementById('setupContent');
setupContentDiv.style.display = setupContentDiv.style.display === 'none' ? 'block' : 'none';
var buttonText = setupContentDiv.style.display === 'none' ? 'Show Setup' : 'Hide Setup';
this.textContent = buttonText;
});
</script>
'''
# 输出完整的 HTML 代码
final_html = html_content + html_script
rows.append(final_html)
rows.append("<br>")
# Insert teardown stdout
if teardown_stdout_info:
for nodeid, stdout in teardown_stdout_info.items():
html_content = '''
<tr>
<td><b><span style="font-size: larger; color: black;">Teardown:</span></b></td>
<td colspan="4">
<a href="#" id="toggleTeardown">Show Teardown</a>
<div id="teardownContent" class="collapsible-content" style="display: none; white-space: pre-wrap; margin-top: 5px;">
<pre>{}</pre>
</div>
</td>
</tr>
'''.format(stdout.strip())
# 如果需要在 Python 脚本中生成 HTML并使用 JavaScript 控制折叠内容的显示,可以这样做:
html_script = '''
<script>
document.getElementById('toggleTeardown').addEventListener('click', function(event) {
event.preventDefault();
var teardownContentDiv = document.getElementById('teardownContent');
teardownContentDiv.style.display = teardownContentDiv.style.display === 'none' ? 'block' : 'none';
var buttonText = teardownContentDiv.style.display === 'none' ? 'Show Teardown' : 'Hide Teardown';
this.textContent = buttonText;
});
</script>
'''
# 输出完整的 HTML 代码
final_html = html_content + html_script
rows.append(final_html)
prefix.extend(rows)

View File

@ -0,0 +1,15 @@
#!/usr/bin/expect
set packageName [lindex $argv 0]
set packageSuffix [lindex $argv 1]
set timeout 30
if { ${packageSuffix} == "deb" } {
spawn dpkg -i ${packageName}
} elseif { ${packageSuffix} == "rpm"} {
spawn rpm -ivh ${packageName}
}
expect "*one:"
send "\r"
expect "*skip:"
send "\r"
expect eof

View File

@ -0,0 +1,57 @@
set baseVersion=%1%
set version=%2%
set verMode=%3%
set sType=%4%
echo %fileType%
rem stop services
if EXIST C:\TDengine (
if EXIST C:\TDengine\stop-all.bat (
call C:\TDengine\stop-all.bat /silent
echo "***************Stop taos services***************"
)
if exist C:\TDengine\unins000.exe (
call C:\TDengine\unins000.exe /silent
echo "***************uninstall TDengine***************"
)
rd /S /q C:\TDengine
)
if EXIST C:\ProDB (
if EXIST C:\ProDB\stop-all.bat (
call C:\ProDB\stop-all.bat /silent
echo "***************Stop taos services***************"
)
if exist C:\ProDB\unins000.exe (
call C:\ProDB\unins000.exe /silent
echo "***************uninstall TDengine***************"
)
rd /S /q C:\ProDB
)
if "%verMode%"=="enterprise" (
if "%sType%"=="client" (
set fileType=enterprise-client
) else (
set fileType=enterprise
)
) else (
set fileType=%sType%
)
if "%baseVersion%"=="ProDB" (
echo %fileType%
set installer=ProDB-%fileType%-%version%-Windows-x64.exe
) else (
echo %fileType%
set installer=TDengine-%fileType%-%version%-Windows-x64.exe
)
if "%baseVersion%"=="ProDB" (
echo %installer%
scp root@192.168.1.213:/nas/OEM/ProDB/v%version%/%installer% C:\workspace
) else (
echo %installer%
scp root@192.168.1.213:/nas/TDengine/%baseVersion%/v%version%/%verMode%/%installer% C:\workspace
)
echo "***************Finish installer transfer!***************"
C:\workspace\%installer% /silent
echo "***************Finish install!***************"

View File

@ -0,0 +1,325 @@
#!/bin/sh
function usage() {
echo "$0"
echo -e "\t -f test file type,server/client/tools/"
echo -e "\t -m pacakage version Type,community/enterprise"
echo -e "\t -l package type,lite or not"
echo -e "\t -c operation type,x64/arm64"
echo -e "\t -v pacakage version,3.0.1.7"
echo -e "\t -o pacakage version,3.0.1.7"
echo -e "\t -s source Path,web/nas"
echo -e "\t -t package Type,tar/rpm/deb"
echo -e "\t -h help"
}
#parameter
scriptDir=$(dirname $(readlink -f $0))
version="3.0.1.7"
originversion="smoking"
testFile="server"
verMode="communtity"
sourcePath="nas"
cpuType="x64"
lite="true"
packageType="tar"
subFile="package.tar.gz"
while getopts "m:c:f:l:s:o:t:v:h" opt; do
case $opt in
m)
verMode=$OPTARG
;;
v)
version=$OPTARG
;;
f)
testFile=$OPTARG
;;
l)
lite=$OPTARG
;;
s)
sourcePath=$OPTARG
;;
o)
originversion=$OPTARG
;;
c)
cpuType=$OPTARG
;;
t)
packageType=$OPTARG
;;
h)
usage
exit 0
;;
?)
echo "Invalid option: -$OPTARG"
usage
exit 0
;;
esac
done
systemType=`uname`
if [ ${systemType} == "Darwin" ]; then
platform="macOS"
else
platform="Linux"
fi
echo "testFile:${testFile},verMode:${verMode},lite:${lite},cpuType:${cpuType},packageType:${packageType},version-${version},originversion:${originversion},sourcePath:${sourcePath}"
# Color setting
RED='\033[41;30m'
GREEN='\033[1;32m'
YELLOW='\033[1;33m'
BLUE='\033[1;34m'
GREEN_DARK='\033[0;32m'
YELLOW_DARK='\033[0;33m'
BLUE_DARK='\033[0;34m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
if [ "${originversion}" = "ProDB" ]; then
TDengine="ProDB"
else
TDengine="TDengine"
fi
if [[ ${verMode} = "enterprise" ]];then
prePackage="${TDengine}-enterprise"
if [[ ${testFile} = "client" ]];then
prePackage="${TDengine}-enterprise-${testFile}"
fi
elif [ ${verMode} = "community" ];then
prePackage="${TDengine}-${testFile}"
fi
if [ ${lite} = "true" ];then
packageLite="-Lite"
elif [ ${lite} = "false" ];then
packageLite=""
fi
if [[ "$packageType" = "tar" ]] ;then
packageType="tar.gz"
fi
tdPath="${prePackage}-${version}"
packageName="${tdPath}-${platform}-${cpuType}${packageLite}.${packageType}"
if [ "$testFile" == "server" ] ;then
installCmd="install.sh"
elif [ ${testFile} = "client" ];then
installCmd="install_client.sh"
fi
echo "tdPath:${tdPath},packageName:${packageName}}"
cmdInstall() {
command=$1
if command -v ${command} ;then
echoColor YD "${command} is already installed"
else
if command -v apt ;then
apt-get install ${command} -y
elif command -v yum ;then
yum -y install ${command}
echoColor YD "you should install ${command} manually"
fi
fi
}
echoColor() {
color=$1
command=$2
if [ ${color} = 'Y' ];then
echo -e "${YELLOW}${command}${NC}"
elif [ ${color} = 'YD' ];then
echo -e "${YELLOW_DARK}${command}${NC}"
elif [ ${color} = 'R' ];then
echo -e "${RED}${command}${NC}"
elif [ ${color} = 'G' ];then
echo -e "${GREEN}${command}${NC}\r\n"
elif [ ${color} = 'B' ];then
echo -e "${BLUE}${command}${NC}"
elif [ ${color} = 'BD' ];then
echo -e "${BLUE_DARK}${command}${NC}"
fi
}
wgetFile() {
file=$1
versionPath=$2
sourceP=$3
nasServerIP="192.168.1.213"
if [ "${originversion}" = "ProDB" ]; then
packagePath="/nas/OEM/ProDB/v${versionPath}"
else
packagePath="/nas/TDengine/${originversion}/v${versionPath}/${verMode}"
fi
if [ -f ${file} ];then
echoColor YD "${file} already exists ,it will delete it and download it again "
# rm -rf ${file}
fi
if [[ ${sourceP} = 'web' ]];then
echoColor BD "====download====:wget https://www.taosdata.com/assets-download/3.0/${file}"
wget https://www.taosdata.com/assets-download/3.0/${file}
elif [[ ${sourceP} = 'nas' ]];then
echoColor BD "====download====:scp root@${nasServerIP}:${packagePath}/${file} ."
scp root@${nasServerIP}:${packagePath}/${file} .
fi
}
function newPath {
buildPath=$1
if [ ! -d ${buildPath} ] ;then
echoColor BD "mkdir -p ${buildPath}"
mkdir -p ${buildPath}
else
echoColor YD "${buildPath} already exists"
fi
}
echoColor G "===== install basesoft ====="
cmdInstall tree
cmdInstall wget
cmdInstall expect
echoColor G "===== Uninstall all components of TDeingne ====="
if command -v rmtaos ;then
echoColor YD "uninstall all components of TDeingne:rmtaos"
rmtaos
else
echoColor YD "os doesn't include TDengine"
fi
if [[ ${packageName} =~ "server" ]] ;then
echoColor BD " pkill -9 taosd "
pkill -9 taosd
fi
if command -v rmprodb ;then
echoColor YD "uninstall all components of TDeingne:rmprodb"
rmprodb
else
echoColor YD "os doesn't include TDengine"
fi
if [[ ${packageName} =~ "server" ]] ;then
echoColor BD " pkill -9 prodbd "
pkill -9 prodbd
fi
echoColor G "===== new workroom path ====="
installPath="/usr/local/src/packageTest"
if [ ${systemType} == "Darwin" ]; then
installPath="${WORK_DIR}/packageTest"
fi
newPath ${installPath}
#if [ -d ${installPath}/${tdPath} ] ;then
# echoColor BD "rm -rf ${installPath}/${tdPath}/*"
# rm -rf ${installPath}/${tdPath}/*
#fi
echoColor G "===== download installPackage ====="
cd ${installPath} && wgetFile ${packageName} ${version} ${sourcePath}
#cd ${oriInstallPath} && wgetFile ${originPackageName} ${originversion} ${sourcePath}
cd ${installPath}
cp -r ${scriptDir}/debRpmAutoInstall.sh .
packageSuffix=$(echo ${packageName} | awk -F '.' '{print $NF}')
if [ ! -f debRpmAutoInstall.sh ];then
echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
echo 'set packageName [lindex $argv 0]' >> debRpmAutoInstall.sh
echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
echo 'set timeout 30 ' >> debRpmAutoInstall.sh
echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
echo ' spawn dpkg -i ${packageName} ' >> debRpmAutoInstall.sh
echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
echo ' spawn rpm -ivh ${packageName}' >> debRpmAutoInstall.sh
echo '}' >> debRpmAutoInstall.sh
echo 'expect "*one:"' >> debRpmAutoInstall.sh
echo 'send "\r"' >> debRpmAutoInstall.sh
echo 'expect "*skip:"' >> debRpmAutoInstall.sh
echo 'send "\r" ' >> debRpmAutoInstall.sh
fi
echoColor G "===== install Package ====="
if [[ ${packageName} =~ "deb" ]];then
cd ${installPath}
dpkg -r taostools
dpkg -r tdengine
if [[ ${packageName} =~ "TDengine" ]];then
echoColor BD "./debRpmAutoInstall.sh ${packageName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packageName} ${packageSuffix}
else
echoColor BD "dpkg -i ${packageName}" && dpkg -i ${packageName}
fi
elif [[ ${packageName} =~ "rpm" ]];then
cd ${installPath}
sudo rpm -e tdengine
sudo rpm -e taostools
if [[ ${packageName} =~ "TDengine" ]];then
echoColor BD "./debRpmAutoInstall.sh ${packageName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packageName} ${packageSuffix}
else
echoColor BD "rpm -ivh ${packageName}" && rpm -ivh ${packageName}
fi
elif [[ ${packageName} =~ "tar" ]];then
echoColor G "===== check installPackage File of tar ====="
cd ${installPath}
echoColor YD "unzip the new installation package"
echoColor BD "tar -xf ${packageName}" && tar -xf ${packageName}
cd ${installPath}/${tdPath} && tree -I "driver" > ${installPath}/now_${version}_checkfile
cd ${installPath}
diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
if [ ${diffNumbers} != 0 ];then
echoColor R "The number and names of files is different from the previous installation package"
diffLog=`cat ${installPath}/diffFile.log`
echoColor Y "${diffLog}"
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"
rm -rf ${installPath}/diffFile.log
fi
echoColor YD "===== install Package of tar ====="
cd ${installPath}/${tdPath}
if [ ${testFile} = "server" ];then
echoColor BD "bash ${installCmd} -e no "
bash ${installCmd} -e no
else
echoColor BD "bash ${installCmd} "
bash ${installCmd}
fi
elif [[ ${packageName} =~ "pkg" ]];then
cd ${installPath}
sudo installer -pkg ${packageName} -target /
echoColor YD "===== install Package successfully! ====="
fi
#cd ${installPath}
#
#rm -rf ${installPath}/${packageName}
#if [ ${platform} == "Linux" ]; then
# rm -rf ${installPath}/${tdPath}/
#fi
echoColor YD "===== end of shell file ====="

View File

@ -0,0 +1,12 @@
import subprocess
def run_cmd(command):
print("CMD:", command)
result = subprocess.run(command, capture_output=True, text=True, shell=True)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
print("Return Code:", result.returncode)
#assert result.returncode == 0
return result

View File

@ -0,0 +1,21 @@
import pytest
# python3 -m pytest test_server.py -v --html=/var/www/html/report.html --json-report --json-report-file="/var/www/html/report.json" --timeout=60
# pytest.main(["-s", "-v"])
import pytest
import subprocess
# define cmd function
def main():
pytest.main(['--html=report.html'])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,17 @@
pytest-html
pytest-json-report
pytest-timeout
taospy
numpy
fabric2
psutil
pandas
toml
distro
requests
pexpect
faker
pyopenssl
taos-ws-py
taospy
tzlocal

View File

@ -0,0 +1,11 @@
rm -rf %WIN_TDENGINE_ROOT_DIR%\debug
mkdir %WIN_TDENGINE_ROOT_DIR%\debug
mkdir %WIN_TDENGINE_ROOT_DIR%\debug\build
mkdir %WIN_TDENGINE_ROOT_DIR%\debug\build\bin
xcopy C:\TDengine\taos*.exe %WIN_TDENGINE_ROOT_DIR%\debug\build\bin
set case_out_file=%cd%\case.out
cd %WIN_TDENGINE_ROOT_DIR%\tests\system-test
python3 .\test.py -f 0-others\taosShell.py
python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3

View File

@ -0,0 +1,29 @@
#!/bin/bash
ulimit -c unlimited
rm -rf ${TDENGINE_ROOT_DIR}/debug
mkdir ${TDENGINE_ROOT_DIR}/debug
mkdir ${TDENGINE_ROOT_DIR}/debug/build
mkdir ${TDENGINE_ROOT_DIR}/debug/build/bin
systemType=`uname`
if [ ${systemType} == "Darwin" ]; then
cp /usr/local/bin/taos* ${TDENGINE_ROOT_DIR}/debug/build/bin/
else
cp /usr/bin/taos* ${TDENGINE_ROOT_DIR}/debug/build/bin/
fi
case_out_file=`pwd`/case.out
python3 -m pip install -r ${TDENGINE_ROOT_DIR}/tests/requirements.txt >> $case_out_file
python3 -m pip install taos-ws-py taospy >> $case_out_file
cd ${TDENGINE_ROOT_DIR}/tests/army
python3 ./test.py -f query/query_basic.py -N 3 >> $case_out_file
cd ${TDENGINE_ROOT_DIR}/tests/system-test
python3 ./test.py -f 1-insert/insert_column_value.py >> $case_out_file
python3 ./test.py -f 2-query/primary_ts_base_5.py >> $case_out_file
python3 ./test.py -f 2-query/case_when.py >> $case_out_file
python3 ./test.py -f 2-query/partition_limit_interval.py >> $case_out_file
python3 ./test.py -f 2-query/join.py >> $case_out_file
python3 ./test.py -f 2-query/fill.py >> $case_out_file

View File

@ -0,0 +1,251 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys, os
import re
import platform
import getopt
import subprocess
# from this import d
import time
# input for server
opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:P:v:u', [
'host=', 'Port=', 'version='])
serverHost = ""
serverPort = 0
version = ""
uninstall = False
for key, value in opts:
if key in ['--help']:
print('A collection of test cases written using Python')
print('-h serverHost')
print('-P serverPort')
print('-v test client version')
print('-u test uninstall process, will uninstall TDengine')
sys.exit(0)
if key in ['-h']:
serverHost = value
if key in ['-P']:
serverPort = int(value)
if key in ['-v']:
version = value
if key in ['-u']:
uninstall = True
if not serverHost:
print("Please input use -h to specify your server host.")
sys.exit(0)
if not version:
print("No version specified, will not run version check.")
if serverPort == 0:
serverPort = 6030
print("No server port specified, use default 6030.")
system = platform.system()
arch = platform.machine()
databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
# install taospy
taospy_version = ""
if system == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
# prepare data by taosBenchmark
cmd = "taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d &" % (databaseName, serverHost, serverPort)
process_out = subprocess.getoutput(cmd)
print(cmd)
#os.system("taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d" % (databaseName, serverHost, serverPort))
taosBenchmark_test_result = True
time.sleep(10)
import taos
conn = taos.connect(host=serverHost,
user="root",
password="taosdata",
database=databaseName,
port=serverPort,
timezone="Asia/Shanghai") # default your host's timezone
server_version = conn.server_info
print("server_version", server_version)
client_version = conn.client_info
print("client_version", client_version) # 3.0.0.0
# Execute a sql and get its result set. It's useful for SELECT statement
result: taos.TaosResult = conn.query("SELECT count(*) from meters")
data = result.fetch_all()
print(data)
if data[0][0] !=10000:
print(" taosBenchmark work not as expected ")
print("!!!!!!!!!!!Test Result: taosBenchmark test failed! !!!!!!!!!!")
sys.exit(1)
#else:
# print("**********Test Result: taosBenchmark test passed **********")
# drop database of test
taos_test_result = False
print("drop database test")
print("run taos -s 'drop database %s;' -h %s -P %d" % (databaseName, serverHost, serverPort))
taos_cmd_outpur = subprocess.getoutput('taos -s "drop database %s;" -h %s -P %d' % (databaseName, serverHost, serverPort))
print(taos_cmd_outpur)
if ("Drop OK" in taos_cmd_outpur):
taos_test_result = True
#print("*******Test Result: taos test passed ************")
version_test_result = False
if version:
print("Client info is: %s"%conn.client_info)
taos_V_output = ""
if system == "Windows":
taos_V_output = subprocess.getoutput("taos -V | findstr version")
else:
taos_V_output = subprocess.getoutput("taos -V | grep version")
print("taos -V output is: %s" % taos_V_output)
if version in taos_V_output and version in conn.client_info:
version_test_result = True
#print("*******Test Result: Version check passed ************")
conn.close()
if uninstall:
print("Start to run rmtaos")
leftFile = False
print("Platform: ", system)
if system == "Linux":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /etc/systemd/system/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib64/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/taos")
#print(out)
if "No such file or directory" not in out:
print("Uninstall left some files in /usr/local/taos%s" % out)
leftFile = True
if not leftFile:
print("*******Test Result: uninstall test passed ************")
elif system == "Darwin":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['sudo', 'rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
#out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/")
#print(out)
#if out:
# print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out)
# leftFile = True
#if not leftFile:
# print("*******Test Result: uninstall test passed ************")
elif system == "Windows":
process = subprocess.Popen(['unins000','/silent'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
process.wait()
time.sleep(10)
out = subprocess.getoutput("ls C:\TDengine")
print(out)
if len(out.split("\n")) > 3:
leftFile = True
print("Uninstall left some files: %s" % out)
if taosBenchmark_test_result:
print("**********Test Result: taosBenchmark test passed! **********")
if taos_test_result:
print("**********Test Result: taos test passed! **********")
else:
print("!!!!!!!!!!!Test Result: taos test failed! !!!!!!!!!!")
if version_test_result:
print("**********Test Result: version test passed! **********")
else:
print("!!!!!!!!!!!Test Result: version test failed! !!!!!!!!!!")
if not leftFile:
print("**********Test Result: uninstall test passed! **********")
else:
print("!!!!!!!!!!!Test Result: uninstall test failed! !!!!!!!!!!")
if taosBenchmark_test_result and taos_test_result and version_test_result and not leftFile:
sys.exit(0)
else:
sys.exit(1)

View File

@ -0,0 +1,380 @@
def sync_source(branch_name) {
sh '''
hostname
ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
echo ''' + branch_name + '''
'''
sh '''
cd ${TDENGINE_ROOT_DIR}
set +e
git reset --hard
git fetch || git fetch
git checkout -f '''+branch_name+'''
git reset --hard origin/'''+branch_name+'''
git log | head -n 20
git clean -fxd
set -e
'''
return 1
}
def sync_source_win() {
bat '''
hostname
taskkill /f /t /im taosd.exe
ipconfig
set
date /t
time /t
'''
bat '''
echo %branch_name%
cd %WIN_TDENGINE_ROOT_DIR%
git reset --hard
git fetch || git fetch
git checkout -f ''' + env.BRANCH_NAME + '''
git reset --hard origin/''' + env.BRANCH_NAME + '''
git branch
git restore .
git remote prune origin
git pull || git pull
git log | head -n 20
git clean -fxd
'''
return 1
}
pipeline {
agent none
parameters {
choice(
name: 'sourcePath',
choices: ['nas','web'],
description: 'Choice which way to download the installation pacakge;web is Office Web and nas means taos nas server '
)
choice(
name: 'verMode',
choices: ['enterprise','community'],
description: 'Choice which types of package you want do check '
)
string (
name:'version',
defaultValue:'3.3.2.0',
description: 'Release version number,eg: 3.0.0.1'
)
string (
name:'baseVersion',
defaultValue:'smoking',
description: 'Tnas root path. eg:smoking, 3.3'
)
choice (
name:'mode',
choices: ['server','client'],
description: 'Choose which mode of package you want do run '
)
choice (
name:'smoke_branch',
choices: ['test/3.0/smokeTest','test/main/smokeTest','test/3.1/smokeTest'],
description: 'Choose which mode of package you want do run '
)
string (
name:'runPlatforms',
defaultValue:'server_Linux_x64, server_Linux_arm64, server_Windows_x64, server_Mac_x64',
description: 'run package list hotfix usually run: server: server_Linux_x64, server_Linux_arm64 client: client_Linux_x64, client_Linux_arm64 release usually run: enterprise server: server_Linux_x64, server_Linux_arm64, server_Windows_x64 enterprise client: client_Linux_x64, client_Linux_arm64, client_Windows_x64 community server: server_Linux_x64, server_Linux_arm64, server_Mac_x64, server_Mac_arm64(not supported), server_Linux_x64_lite(not supported) community client: client_Linux_x64, client_Linux_arm64, client_Windows_x64, client_Mac_x64, client_Mac_arm64(not supported), client_Linux_x64_lite(not supported)'
)
}
environment{
WORK_DIR = "/var/lib/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
BRANCH_NAME = "${smoke_branch}"
}
stages {
stage ('Start Server for Client Test') {
when {
beforeAgent true
expression { mode == 'client' }
}
agent{label " ubuntu18 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
withEnv(['JENKINS_NODE_COOKIE=dontkillme']) {
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
bash start3NodesServer.sh
'''
}
}
}
}
stage ('Run SmokeTest') {
parallel {
stage('server_Linux_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Linux_x64') }
}
}
agent{label " ubuntu16 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
mkdir -p /var/www/html/${baseVersion}/${version}/${verMode}/json
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_server.py -v --html=/var/www/html/${baseVersion}/${version}/${verMode}/${mode}_linux_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
cp report.json /var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=linux_x64"
'''
}
}
}
stage('server_Linux_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Linux_arm64') }
}
}
agent{label "worker06_arm64"}
steps {
timeout(time: 60, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_server.py -v --html=${mode}_linux_arm64_report.html --json-report --json-report-file=report.json --timeout=600 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_linux_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=linux_arm64"
'''
}
}
}
stage ('server_Mac_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Mac_x64') }
}
}
agent{label " release_Darwin_x64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
python3 -m pytest -v -k linux --html=${mode}_Mac_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_x64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=Mac_x64"
'''
}
}
}
stage ('server_Mac_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Mac_arm64') }
}
}
agent{label " release_Darwin_arm64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
python3 -m pytest -v -k linux --html=${mode}_Mac_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=Mac_arm64"
'''
}
}
}
stage('server_Windows_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'server' }
expression { runPlatforms.contains('server_Windows_x64') }
}
}
agent{label " windows11 "}
environment{
WIN_WORK_DIR="C:\\workspace"
WIN_TDINTERNAL_ROOT_DIR="C:\\workspace\\TDinternal"
WIN_TDENGINE_ROOT_DIR="C:\\workspace\\TDinternal\\community"
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source_win()
bat '''
cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest
call getAndRunInstaller.bat %baseVersion% %version% %verMode% server
cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest
pip3 install -r pytest_require.txt
python3 -m pytest test_server.py -v --html=%mode%_Windows_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=%verMode% --tVersion=%version% --baseVersion=%baseVersion% --sourcePath=%sourcePath%
scp %mode%_Windows_x64_report.html root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/
scp report.json root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/json/%mode%_Windows_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=%version%&tag=%baseVersion%&type=%verMode%&role=server&build=Windows_x64"
'''
}
}
}
stage('client_Linux_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Linux_x64') }
}
}
agent{label " ubuntu16 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
mkdir -p /var/www/html/${baseVersion}/${version}/${verMode}/json
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_client.py -v --html=/var/www/html/${baseVersion}/${version}/${verMode}/${mode}_linux_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
cp report.json /var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=linux_x64"
'''
}
}
}
stage('client_Linux_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Linux_arm64') }
}
}
agent{label " worker06_arm64 "}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
python3 -m pytest test_client.py -v --html=${mode}_linux_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_linux_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=linux_arm64"
'''
}
}
}
stage ('client_Mac_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Mac_x64') }
}
}
agent{label " release_Darwin_x64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
rm -rf /opt/taos/main/TDinternal/debug/* || true
python3 -m pytest test_client.py -v --html=${mode}_Mac_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_x64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=Mac_x64"
'''
}
}
}
stage ('client_Mac_arm64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Mac_arm64') }
}
}
agent{label " release_Darwin_arm64 "}
environment{
WORK_DIR = "/Users/zwen/jenkins/workspace"
TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal'
TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community'
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest
bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg
rm -rf /opt/taos/main/TDinternal/debug/* || true
python3 -m pytest test_client.py -v --html=${mode}_Mac_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true
scp ${mode}_Mac_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/
scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_arm64_report.json
curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=Mac_arm64"
'''
}
}
}
stage('client_Windows_x64') {
when {
beforeAgent true
allOf {
expression { mode == 'client' }
expression { runPlatforms.contains('client_Windows_x64') }
}
}
agent{label " windows71 "}
environment{
WIN_WORK_DIR="C:\\workspace"
WIN_TDINTERNAL_ROOT_DIR="C:\\workspace\\TDinternal"
WIN_TDENGINE_ROOT_DIR="C:\\workspace\\TDinternal\\community"
}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source_win()
bat '''
cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest
call getAndRunInstaller.bat %baseVersion% %version% %verMode% client
pip3 install -r pytest_require.txt
python3 -m pytest test_client.py -v --html=%mode%_Windows_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=%verMode% --tVersion=%version% --baseVersion=%baseVersion% --sourcePath=%sourcePath%
scp %mode%_Windows_x64_report.html root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/
scp report.json root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/json/%mode%_Windows_x64_report.json
curl "http://192.168.0.176/api/addSmoke?version=%version%&tag=%baseVersion%&type=%verMode%&role=client&build=Windows_x64"
'''
}
}
}
}
}
}
}

View File

@ -0,0 +1,67 @@
#!/bin/bash
BUILD_ID=dontKillMe
#******This script setup 3 nodes env for remote client installer test. Only for Linux *********
pwd=`pwd`
hostname=`hostname`
if [ -z $JENKINS_HOME ]; then
workdir="${pwd}/cluster"
echo $workdir
else
workdir="${JENKINS_HOME}/workspace/cluster"
echo $workdir
fi
name="taos"
if command -v prodb ;then
name="prodb"
fi
# Stop all taosd processes
for(( i=0; i<3; i++))
do
pid=$(ps -ef | grep ${name}d | grep -v grep | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
done
# Init 3 dnodes workdir and config file
rm -rf ${workdir}
mkdir ${workdir}
mkdir ${workdir}/output
mkdir ${workdir}/dnode1
mkdir ${workdir}/dnode1/data
mkdir ${workdir}/dnode1/log
mkdir ${workdir}/dnode1/cfg
touch ${workdir}/dnode1/cfg/${name}.cfg
echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6031\nlogDir ${workdir}/dnode1/log\ndataDir ${workdir}/dnode1/data\n" >> ${workdir}/dnode1/cfg/${name}.cfg
# Start first node
nohup ${name}d -c ${workdir}/dnode1/cfg/${name}.cfg & > /dev/null
sleep 5
${name} -P 6031 -s "CREATE DNODE \`${hostname}:6032\`;CREATE DNODE \`${hostname}:6033\`"
mkdir ${workdir}/dnode2
mkdir ${workdir}/dnode2/data
mkdir ${workdir}/dnode2/log
mkdir ${workdir}/dnode2/cfg
touch ${workdir}/dnode2/cfg/${name}.cfg
echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6032\nlogDir ${workdir}/dnode2/log\ndataDir ${workdir}/dnode2/data\n" >> ${workdir}/dnode2/cfg/${name}.cfg
nohup ${name}d -c ${workdir}/dnode2/cfg/${name}.cfg & > /dev/null
sleep 5
mkdir ${workdir}/dnode3
mkdir ${workdir}/dnode3/data
mkdir ${workdir}/dnode3/log
mkdir ${workdir}/dnode3/cfg
touch ${workdir}/dnode3/cfg/${name}.cfg
echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6033\nlogDir ${workdir}/dnode3/log\ndataDir ${workdir}/dnode3/data\n" >> ${workdir}/dnode3/cfg/${name}.cfg
nohup ${name}d -c ${workdir}/dnode3/cfg/${name}.cfg & > /dev/null
sleep 5
${name} -P 6031 -s "CREATE MNODE ON DNODE 2;CREATE MNODE ON DNODE 3;"

View File

@ -0,0 +1,137 @@
import pytest
import subprocess
import os
import sys
import platform
import getopt
import re
import time
import taos
from versionCheckAndUninstallforPytest import UninstallTaos
# python3 smokeTestClient.py -h 192.168.0.22 -P 6031 -v ${version} -u
OEM = ["ProDB"]
@pytest.fixture(scope="module")
def get_config(request):
verMode = request.config.getoption("--verMode")
taosVersion = request.config.getoption("--tVersion")
baseVersion = request.config.getoption("--baseVersion")
sourcePath = request.config.getoption("--sourcePath")
config = {
"verMode": verMode,
"taosVersion": taosVersion,
"baseVersion": baseVersion,
"sourcePath": sourcePath,
"system": platform.system(),
"arch": platform.machine(),
"serverHost": "192.168.0.22",
"serverPort": 6031,
"databaseName": re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
}
return config
@pytest.fixture(scope="module")
def setup_module(get_config):
config = get_config
# install taospy
if config["system"] == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
def get_connect(host, port, database=None):
conn = taos.connect(host=host,
user="root",
password="taosdata",
database=database,
port=port,
timezone="Asia/Shanghai") # default your host's timezone
return conn
def run_cmd(command):
print("CMD: %s" % command)
result = subprocess.run(command, capture_output=True, text=True, shell=True)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
print("Return Code:", result.returncode)
assert result.returncode == 0
return result
class TestClient:
@pytest.mark.all
def test_basic(self, get_config, setup_module):
config = get_config
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
if config["baseVersion"] in OEM and config["system"] == 'Windows':
cmd = f'{name} -s "create database {config["databaseName"]};" -h {config["serverHost"]} -P {config["serverPort"]}'
run_cmd(cmd)
cmd = f'{name} -s "CREATE STABLE {config["databaseName"]}.meters (`ts` TIMESTAMP,`current` FLOAT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(24));" -h {config["serverHost"]} -P {config["serverPort"]}'
run_cmd(cmd)
else:
cmd = f'{name}Benchmark -y -a 3 -n 100 -t 100 -d {config["databaseName"]} -h {config["serverHost"]} -P {config["serverPort"]} &'
run_cmd(cmd)
# os.system("taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d" % (databaseName, serverHost, serverPort))
time.sleep(5)
conn = get_connect(config["serverHost"], config["serverPort"], config["databaseName"])
sql = "SELECT count(*) from meters"
result: taos.TaosResult = conn.query(sql)
data = result.fetch_all()
print("SQL: %s" % sql)
print("Result: %s" % data)
if config["system"] == 'Windows' and config["baseVersion"] in OEM:
pass
elif data[0][0] != 10000:
raise f"{name}Benchmark work not as expected "
# drop database of test
cmd = f'{name} -s "drop database {config["databaseName"]};" -h {config["serverHost"]} -P {config["serverPort"]}'
result = run_cmd(cmd)
assert "Drop OK" in result.stdout
conn.close()
@pytest.mark.all
def test_version(self, get_config, setup_module):
config = get_config
conn = get_connect(config["serverHost"], config["serverPort"])
server_version = conn.server_info
print("server_version: ", server_version)
client_version = conn.client_info
print("client_version: ", client_version)
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
if config["system"] == "Windows":
taos_V_output = subprocess.getoutput(f"{name} -V | findstr version")
else:
taos_V_output = subprocess.getoutput(f"{name} -V | grep version")
assert config["taosVersion"] in taos_V_output
assert config["taosVersion"] in client_version
if config["taosVersion"] not in server_version:
print("warning: client version is not same as server version")
conn.close()
@pytest.mark.all
def test_uninstall(self, get_config, setup_module):
config = get_config
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
subprocess.getoutput("rm /usr/local/bin/taos")
subprocess.getoutput("pkill taosd")
UninstallTaos(config["taosVersion"], config["verMode"], True, name)

View File

@ -0,0 +1,240 @@
import pytest
import subprocess
import os
from versionCheckAndUninstallforPytest import UninstallTaos
import platform
import re
import time
import signal
system = platform.system()
current_path = os.path.abspath(os.path.dirname(__file__))
if system == 'Windows':
with open(r"%s\test_server_windows_case" % current_path) as f:
cases = f.read().splitlines()
else:
with open("%s/test_server_unix_case" % current_path) as f:
cases = f.read().splitlines()
OEM = ["ProDB"]
@pytest.fixture(scope="module")
def get_config(request):
verMode = request.config.getoption("--verMode")
taosVersion = request.config.getoption("--tVersion")
baseVersion = request.config.getoption("--baseVersion")
sourcePath = request.config.getoption("--sourcePath")
config = {
"verMode": verMode,
"taosVersion": taosVersion,
"baseVersion": baseVersion,
"sourcePath": sourcePath,
"system": platform.system(),
"arch": platform.machine()
}
return config
@pytest.fixture(scope="module")
def setup_module(get_config):
def run_cmd(command):
print("CMD:", command)
result = subprocess.run(command, capture_output=True, text=True, shell=True)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
print("Return Code:", result.returncode)
assert result.returncode == 0
return result
# setup before module tests
config = get_config
# bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
# t = "tar"
# if config["system"] == "Darwin":
# t = "pkg"
# cmd = "bash getAndRunInstaller.sh -m %s -f server -l false -c x64 -v %s -o %s -s %s -t %s" % (
# config["verMode"], config["taosVersion"], config["baseVersion"], config["sourcePath"], t)
# run_cmd(cmd)
if config["system"] == "Windows":
cmd = r"mkdir ..\..\debug\build\bin"
else:
cmd = "mkdir -p ../../debug/build/bin/"
subprocess.getoutput(cmd)
if config["system"] == "Linux" or config["system"] == "Darwin" : # add tmq_sim
cmd = "cp -rf ../../../debug/build/bin/tmq_sim ../../debug/build/bin/."
subprocess.getoutput(cmd)
if config["system"] == "Darwin":
cmd = "sudo cp -rf /usr/local/bin/taos* ../../debug/build/bin/"
elif config["system"] == "Windows":
cmd = r"xcopy C:\TDengine\taos*.exe ..\..\debug\build\bin /Y"
else:
if config["baseVersion"] in OEM:
cmd = '''sudo find /usr/bin -name 'prodb*' -exec sh -c 'for file; do cp "$file" "../../debug/build/bin/taos${file##/usr/bin/%s}"; done' sh {} +''' % (
config["baseVersion"].lower())
else:
cmd = "sudo cp /usr/bin/taos* ../../debug/build/bin/"
run_cmd(cmd)
if config["baseVersion"] in OEM: # mock OEM
cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower()
run_cmd(cmd)
cmd = "ln -s /usr/bin/prodb /usr/local/bin/taos"
subprocess.getoutput(cmd)
# yield
#
# name = "taos"
# if config["baseVersion"] in OEM:
# name = config["baseVersion"].lower()
# subprocess.getoutput("rm /usr/local/bin/taos")
# subprocess.getoutput("pkill taosd")
# UninstallTaos(config["taosVersion"], config["verMode"], True, name)
# use pytest fixture to exec case
@pytest.fixture(params=cases)
def run_command(request):
commands = request.param
if commands.strip().startswith("#"):
pytest.skip("This case has been marked as skipped")
d, command = commands.strip().split(",")
if system == "Windows":
cmd = r"cd %s\..\..\tests\%s && %s" % (current_path, d, command)
else:
cmd = "cd %s/../../tests/%s&&sudo %s" % (current_path, d, command)
print(cmd)
result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
return {
"command": command,
"stdout": result.stdout,
"stderr": result.stderr,
"returncode": result.returncode
}
class TestServer:
@pytest.mark.all
def test_taosd_up(self, setup_module):
# start process
if system == 'Windows':
subprocess.getoutput("taskkill /IM taosd.exe /F")
cmd = "..\\..\\debug\\build\\bin\\taosd.exe"
else:
subprocess.getoutput("pkill taosd")
cmd = "../../debug/build/bin/taosd"
process = subprocess.Popen(
[cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
# monitor output
while True:
line = process.stdout.readline()
if line:
print(line.strip())
if "succeed to write dnode" in line:
time.sleep(5)
# 发送终止信号
os.kill(process.pid, signal.SIGKILL)
# Waiting for the process to be completely killed
time.sleep(5)
break
@pytest.mark.all
def test_execute_cases(self, setup_module, run_command):
# assert the result
if run_command['returncode'] != 0:
print(f"Running command: {run_command['command']}")
print("STDOUT:", run_command['stdout'])
print("STDERR:", run_command['stderr'])
print("Return Code:", run_command['returncode'])
else:
print(f"Running command: {run_command['command']}")
if len(run_command['stdout']) > 1000:
print("STDOUT:", run_command['stdout'][:1000] + "...")
else:
print("STDOUT:", run_command['stdout'])
print("STDERR:", run_command['stderr'])
print("Return Code:", run_command['returncode'])
assert run_command[
'returncode'] == 0, f"Command '{run_command['command']}' failed with return code {run_command['returncode']}"
@pytest.mark.all
@pytest.mark.check_version
def test_check_version(self, get_config, setup_module):
config = get_config
databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
# install taospy
taospy_version = ""
system = config["system"]
version = config["taosVersion"]
verMode = config["verMode"]
if system == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
# start taosd server
if system == 'Windows':
cmd = ["C:\\TDengine\\start-all.bat"]
# elif system == 'Linux':
# cmd = "systemctl start taosd".split(' ')
else:
# cmd = "sudo launchctl start com.tdengine.taosd".split(' ')
cmd = "start-all.sh"
process_out = subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
print(cmd)
time.sleep(5)
import taos
conn = taos.connect()
check_list = {}
check_list["server_version"] = conn.server_info
check_list["client_version"] = conn.client_info
# Execute sql get version info
result: taos.TaosResult = conn.query("SELECT server_version()")
check_list["select_server"] = result.fetch_all()[0][0]
result: taos.TaosResult = conn.query("SELECT client_version()")
check_list["select_client"] = result.fetch_all()[0][0]
conn.close()
binary_files = ["taos", "taosd", "taosadapter", "taoskeeper", "taosBenchmark"]
if verMode.lower() == "enterprise":
binary_files.append("taosx")
if config["baseVersion"] in OEM:
binary_files = [i.replace("taos", config["baseVersion"].lower()) for i in binary_files]
if system == "Windows":
for i in binary_files:
check_list[i] = subprocess.getoutput("%s -V | findstr version" % i)
else:
for i in binary_files:
check_list[i] = subprocess.getoutput("%s -V | grep version | awk -F ' ' '{print $3}'" % i)
for i in check_list:
print("%s version is: %s" % (i, check_list[i]))
assert version in check_list[i]
@pytest.mark.all
def test_uninstall(self, get_config, setup_module):
config = get_config
name = "taos"
if config["baseVersion"] in OEM:
name = config["baseVersion"].lower()
subprocess.getoutput("rm /usr/local/bin/taos")
subprocess.getoutput("pkill taosd")
UninstallTaos(config["taosVersion"], config["verMode"], True, name)

View File

@ -0,0 +1,10 @@
system-test,python3 ./test.py -f 2-query/join.py
system-test,python3 ./test.py -f 1-insert/insert_column_value.py
system-test,python3 ./test.py -f 2-query/primary_ts_base_5.py
system-test,python3 ./test.py -f 2-query/case_when.py
system-test,python3 ./test.py -f 2-query/partition_limit_interval.py
system-test,python3 ./test.py -f 2-query/fill.py
army,python3 ./test.py -f query/query_basic.py -N 3
system-test,python3 ./test.py -f 7-tmq/basic5.py
system-test,python3 ./test.py -f 8-stream/stream_basic.py
system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3

View File

@ -0,0 +1,2 @@
system-test,python3 .\test.py -f 0-others\taosShell.py
system-test,python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3

View File

@ -0,0 +1,260 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys, os
import re
import platform
import getopt
import subprocess
# from this import d
import time
# input for server
opts, args = getopt.gnu_getopt(sys.argv[1:], 'v:m:u', ['version=', 'verMode='])
serverHost = ""
serverPort = 0
version = ""
uninstall = False
verMode = ""
for key, value in opts:
if key in ['--help']:
print('A collection of test cases written using Python')
print('-v test client version')
print('-u test uninstall process, will uninstall TDengine')
sys.exit(0)
if key in ['-v']:
version = value
if key in ['-u']:
uninstall = True
if key in ['-m']:
verMode = value
if not version:
print("No version specified, will not run version check.")
system = platform.system()
arch = platform.machine()
databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower()
# install taospy
taospy_version = ""
if system == 'Windows':
taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version")
else:
taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
print("taospy version %s " % taospy_version)
if taospy_version == "":
subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
else:
subprocess.getoutput("pip3 install taospy")
# start taosd server
if system == 'Windows':
cmd = ["C:\\TDengine\\start-all.bat"]
elif system == 'Linux':
cmd = "systemctl start taosd".split(' ')
else:
cmd = "sudo launchctl start com.tdengine.taosd".split(' ')
process_out = subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
print(cmd)
time.sleep(5)
#get taosc version info
version_test_result = False
if version:
import taos
conn = taos.connect()
server_version = conn.server_info
print("server_version", server_version)
client_version = conn.client_info
print("client_version", client_version)
# Execute sql get version info
result: taos.TaosResult = conn.query("SELECT server_version()")
select_server = result.fetch_all()[0][0]
print("SELECT server_version():" + select_server)
result: taos.TaosResult = conn.query("SELECT client_version()")
select_client = result.fetch_all()[0][0]
print("SELECT client_version():" + select_client)
conn.close()
taos_V_output = ""
taosd_V_output = ""
taosadapter_V_output = ""
taoskeeper_V_output = ""
taosx_V_output = ""
taosB_V_output = ""
taosxVersion = False
if system == "Windows":
taos_V_output = subprocess.getoutput("taos -V | findstr version")
taosd_V_output = subprocess.getoutput("taosd -V | findstr version")
taosadapter_V_output = subprocess.getoutput("taosadapter -V | findstr version")
taoskeeper_V_output = subprocess.getoutput("taoskeeper -V | findstr version")
taosB_V_output = subprocess.getoutput("taosBenchmark -V | findstr version")
if verMode == "Enterprise":
taosx_V_output = subprocess.getoutput("taosx -V | findstr version")
else:
taos_V_output = subprocess.getoutput("taos -V | grep version | awk -F ' ' '{print $3}'")
taosd_V_output = subprocess.getoutput("taosd -V | grep version | awk -F ' ' '{print $3}'")
taosadapter_V_output = subprocess.getoutput("taosadapter -V | grep version | awk -F ' ' '{print $3}'")
taoskeeper_V_output = subprocess.getoutput("taoskeeper -V | grep version | awk -F ' ' '{print $3}'")
taosB_V_output = subprocess.getoutput("taosBenchmark -V | grep version | awk -F ' ' '{print $3}'")
if verMode == "Enterprise":
taosx_V_output = subprocess.getoutput("taosx -V | grep version | awk -F ' ' '{print $3}'")
print("taos -V output is: %s" % taos_V_output)
print("taosd -V output is: %s" % taosd_V_output)
print("taosadapter -V output is: %s" % taosadapter_V_output)
print("taoskeeper -V output is: %s" % taoskeeper_V_output)
print("taosBenchmark -V output is: %s" % taosB_V_output)
if verMode == "Enterprise":
print("taosx -V output is: %s" % taosx_V_output)
taosxVersion = version in taosx_V_output
else:
taosxVersion = True
if (version in client_version
and version in server_version
and version in select_server
and version in select_client
and version in taos_V_output
and version in taosd_V_output
and version in taosadapter_V_output
and version in taoskeeper_V_output
and version in taosB_V_output
and taosxVersion
):
version_test_result = True
leftFile = False
if uninstall:
print("Start to run rmtaos")
print("Platform: ", system)
# stop taosd server
if system == 'Windows':
cmd = "C:\\TDengine\\stop_all.bat"
elif system == 'Linux':
cmd = "systemctl stop taosd"
else:
cmd = "sudo launchctl stop com.tdengine.taosd"
process_out = subprocess.getoutput(cmd)
print(cmd)
time.sleep(10)
if system == "Linux":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /etc/systemd/system/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib64/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/taos")
#print(out)
if "No such file or directory" not in out:
print("Uninstall left some files in /usr/local/taos%s" % out)
leftFile = True
if not leftFile:
print("*******Test Result: uninstall test passed ************")
elif system == "Darwin":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['sudo', 'rmtaos'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /usr/local/bin/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/lib/libtaos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/include/taos*")
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
#out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/")
#print(out)
#if out:
# print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out)
# leftFile = True
#if not leftFile:
# print("*******Test Result: uninstall test passed ************")
elif system == "Windows":
process = subprocess.Popen(['unins000','/silent'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
process.wait()
time.sleep(10)
out = subprocess.getoutput("ls C:\TDengine")
print(out)
if len(out.split("\n")) > 3:
leftFile = True
print("Uninstall left some files: %s" % out)
if version_test_result:
print("**********Test Result: version test passed! **********")
else:
print("!!!!!!!!!!!Test Result: version test failed! !!!!!!!!!!")
if not leftFile:
print("**********Test Result: uninstall test passed! **********")
else:
print("!!!!!!!!!!!Test Result: uninstall test failed! !!!!!!!!!!")
if version_test_result and not leftFile:
sys.exit(0)
else:
sys.exit(1)

View File

@ -0,0 +1,137 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys, os
import re
import platform
import getopt
import subprocess
# from this import d
import time
from lib import run_cmd
# input for server
def UninstallTaos(version, verMode, uninstall, name):
if not version:
raise "No version specified, will not run version check."
system = platform.system()
arch = platform.machine()
leftFile = False
if uninstall:
print("Start to run rm%s" % name)
print("Platform: ", system)
# stop taosd server
if system == 'Windows':
cmd = "C:\\TDengine\\stop_all.bat"
else:
cmd = "stop_all.sh"
process_out = subprocess.getoutput(cmd)
print(cmd)
time.sleep(5)
print("start to rm%s" % name)
if system == "Linux":
# 启动命令
process = subprocess.Popen(['rm%s' % name], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
# 发送交互输入
stdout, stderr = process.communicate(
input="y\nI confirm that I would like to delete all data, log and configuration files\n")
# 打印输出(可选)
print(stdout)
print(stderr)
# 检查目录清除情况
out = subprocess.getoutput("ls /etc/systemd/system/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/bin/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/bin/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib/lib%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/lib64/lib%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/include/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/%s" % name)
# print(out)
if "No such file or directory" not in out:
print("Uninstall left some files in /usr/local/%s%s" % (name, out))
leftFile = True
if not leftFile:
print("*******Test Result: uninstall test passed ************")
elif system == "Darwin":
# 创建一个subprocess.Popen对象并使用stdin和stdout进行交互
process = subprocess.Popen(['sudo', 'rm%s' % name],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
# 向子进程发送输入
process.stdin.write("y\n")
process.stdin.flush() # 确保输入被发送到子进程
process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n")
process.stdin.flush() # 确保输入被发送到子进程
# 关闭子进程的stdin防止它无限期等待更多输入
process.stdin.close()
# 等待子进程结束
process.wait()
# 检查目录清除情况
out = subprocess.getoutput("ls /usr/local/bin/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/lib/lib%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
out = subprocess.getoutput("ls /usr/local/include/%s*" % name)
if "No such file or directory" not in out:
print("Uninstall left some files: %s" % out)
leftFile = True
# out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/")
# print(out)
# if out:
# print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out)
# leftFile = True
# if not leftFile:
# print("*******Test Result: uninstall test passed ************")
elif system == "Windows":
process = subprocess.Popen(['unins000', '/silent'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
process.wait()
time.sleep(10)
for file in ["C:\TDengine\\taos.exe", "C:\TDengine\\unins000.exe", "C:\ProDB\prodb.exe",
"C:\ProDB\\unins000.exe"]:
if os.path.exists(file):
leftFile = True
if leftFile:
raise "uninstall %s fail, please check" % name
else:
print("**********Test Result: uninstall test passed! **********")

View File

@ -145,7 +145,14 @@ function kill_taosd() {
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}rm -rf ${install_main_dir}/cfg || :
${csudo}rm -rf ${install_main_dir}/bin || :
${csudo}rm -rf ${install_main_dir}/driver || :
${csudo}rm -rf ${install_main_dir}/examples || :
${csudo}rm -rf ${install_main_dir}/include || :
${csudo}rm -rf ${install_main_dir}/share || :
${csudo}rm -rf ${install_main_dir}/log || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin

View File

@ -47,10 +47,11 @@ enum {
RES_TYPE__TMQ_BATCH_META,
};
#define SHOW_VARIABLES_RESULT_COLS 3
#define SHOW_VARIABLES_RESULT_COLS 4
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE)
#define TD_RES_QUERY(res) (*(int8_t*)(res) == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)(res) == RES_TYPE__TMQ)

View File

@ -983,6 +983,7 @@ void taos_init_imp(void) {
SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100};
ENV_ERR_RET(catalogInit(&cfg), "failed to init catalog");
ENV_ERR_RET(schedulerInit(), "failed to init scheduler");
ENV_ERR_RET(initClientId(), "failed to init clientId");
tscDebug("starting to initialize TAOS driver");

View File

@ -1803,7 +1803,7 @@ int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
if (bind->num > 1) {
tscError("invalid bind number %d for %s", bind->num, __FUNCTION__);
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -1819,7 +1819,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
if (bind->num <= 0 || bind->num > INT16_MAX) {
tscError("invalid bind num %d", bind->num);
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -1831,7 +1831,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
}
if (0 == insert && bind->num > 1) {
tscError("only one row data allowed for query");
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -1859,7 +1859,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, in
}
if (0 == insert && bind->num > 1) {
tscError("only one row data allowed for query");
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -2019,7 +2019,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col
if (bind->num <= 0 || bind->num > INT16_MAX) {
tscError("invalid bind num %d", bind->num);
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}
@ -2027,7 +2027,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col
(void)stmtIsInsert2(stmt, &insert);
if (0 == insert && bind->num > 1) {
tscError("only one row data allowed for query");
terrno = TSDB_CODE_INVALID_PARA;
terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR;
return terrno;
}

View File

@ -541,6 +541,10 @@ static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) {
infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD3_LEN;
TSDB_CHECK_NULL(taosArrayPush(pBlock->pDataBlock, &infoData), code, line, END, terrno);
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD4_LEN;
TSDB_CHECK_NULL(taosArrayPush(pBlock->pDataBlock, &infoData), code, line, END, terrno);
int32_t numOfCfg = taosArrayGetSize(pVars);
code = blockDataEnsureCapacity(pBlock, numOfCfg);
TSDB_CHECK_CODE(code, line, END);
@ -569,6 +573,13 @@ static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) {
TSDB_CHECK_NULL(pColInfo, code, line, END, terrno);
code = colDataSetVal(pColInfo, i, scope, false);
TSDB_CHECK_CODE(code, line, END);
char info[TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(info, pInfo->info, TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE);
pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
TSDB_CHECK_NULL(pColInfo, code, line, END, terrno);
code = colDataSetVal(pColInfo, i, info, false);
TSDB_CHECK_CODE(code, line, END);
}
pBlock->info.rows = numOfCfg;
@ -825,7 +836,7 @@ int32_t processCompactDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
tscError("failed to post semaphore");
}
}
return code;
return code;
}
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType) {
@ -845,7 +856,7 @@ __async_send_cb_fn_t getMsgRspHandle(int32_t msgType) {
case TDMT_MND_SHOW_VARIABLES:
return processShowVariablesRsp;
case TDMT_MND_COMPACT_DB:
return processCompactDbRsp;
return processCompactDbRsp;
default:
return genericRspCallback;
}

View File

@ -327,8 +327,9 @@ static const SSysDbTableSchema configSchema[] = {
static const SSysDbTableSchema variablesSchema[] = {
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "value", .bytes = TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "scope", .bytes = TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "info", .bytes = TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema topicSchema[] = {
@ -437,6 +438,7 @@ static const SSysDbTableSchema userGrantsLogsSchema[] = {
{.name = "state", .bytes = 1536 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "active", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "machine", .bytes = TSDB_GRANT_LOG_COL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "active_info", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema userMachinesSchema[] = {

View File

@ -166,6 +166,7 @@ const char* columnCompressStr(uint16_t type) {
}
uint8_t columnLevelVal(const char* level) {
if (level == NULL) return TSDB_COLVAL_LEVEL_NOCHANGE;
uint8_t l = TSDB_COLVAL_LEVEL_MEDIUM;
if (0 == strcmp(level, "h") || 0 == strcmp(level, TSDB_COLUMN_LEVEL_HIGH)) {
l = TSDB_COLVAL_LEVEL_HIGH;
@ -180,6 +181,7 @@ uint8_t columnLevelVal(const char* level) {
}
uint16_t columnCompressVal(const char* compress) {
if (compress == NULL) return TSDB_COLVAL_COMPRESS_NOCHANGE;
uint16_t c = TSDB_COLVAL_COMPRESS_LZ4;
if (0 == strcmp(compress, TSDB_COLUMN_COMPRESS_LZ4)) {
c = TSDB_COLVAL_COMPRESS_LZ4;
@ -200,6 +202,7 @@ uint16_t columnCompressVal(const char* compress) {
}
uint8_t columnEncodeVal(const char* encode) {
if (encode == NULL) return TSDB_COLVAL_ENCODE_NOCHANGE;
uint8_t e = TSDB_COLVAL_ENCODE_SIMPLE8B;
if (0 == strcmp(encode, TSDB_COLUMN_ENCODE_SIMPLE8B)) {
e = TSDB_COLVAL_ENCODE_SIMPLE8B;
@ -311,6 +314,7 @@ void setColLevel(uint32_t* compress, uint8_t level) {
int32_t setColCompressByOption(uint8_t type, uint8_t encode, uint16_t compressType, uint8_t level, bool check,
uint32_t* compress) {
if(compress == NULL) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR;
if (check && !validColEncode(type, encode)) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR;
setColEncode(compress, encode);

View File

@ -59,7 +59,6 @@ int32_t tsNumOfRpcSessions = 30000;
int32_t tsShareConnLimit = 10;
int32_t tsReadTimeout = 900;
int32_t tsTimeToGetAvailableConn = 500000;
int32_t tsKeepAliveIdle = 60;
int32_t tsNumOfCommitThreads = 2;
int32_t tsNumOfTaskQueueThreads = 16;
@ -523,7 +522,7 @@ static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *input
int32_t taosAddClientLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddDir(pCfg, "configDir", configDir, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddDir(pCfg, "scriptDir", configDir, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddDir(pCfg, "scriptDir", configDir, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddDir(pCfg, "logDir", tsLogDir, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "minimalLogDirGB", 1.0f, 0.001f, 10000000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(
@ -531,13 +530,14 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "asyncLog", tsAsyncLog, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "logKeepDays", 0, -365000, 365000, CFG_SCOPE_BOTH, CFG_DYN_ENT_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "simDebugFlag", simDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "simDebugFlag", simDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "uDebugFlag", uDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "rpcDebugFlag", rpcDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "qDebugFlag", qDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "cDebugFlag", cDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_SERVER));
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
@ -550,7 +550,6 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@ -591,17 +590,18 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(
cfgAddBool(pCfg, "queryUseNodeAllocator", tsQueryUseNodeAllocator, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlChildTableName", tsSmlChildTableName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlAutoChildTableNameDelimiter", tsSmlAutoChildTableNameDelimiter,
CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlTagName", tsSmlTagName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlTsDefaultName", tsSmlTsDefaultName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "smlDot2Underline", tsSmlDot2Underline, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxInsertBatchRows", tsMaxInsertBatchRows, 1, INT32_MAX, CFG_SCOPE_CLIENT,
CFG_DYN_CLIENT) != 0);
TAOS_CHECK_RETURN(
cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_SERVER, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX,
@ -631,15 +631,12 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(
cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH, CFG_DYN_NONE));
tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 72000);
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "keepAliveIdle", tsKeepAliveIdle, 1, 7200000, CFG_SCOPE_BOTH, CFG_DYN_NONE));
tsNumOfTaskQueueThreads = tsNumOfCores * 2;
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 16);
TAOS_CHECK_RETURN(
cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 4, 1024, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "experimental", tsExperimental, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "experimental", tsExperimental, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "multiResultFunctionStarReturnTags", tsMultiResultFunctionStarReturnTags,
CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
@ -728,8 +725,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddString(pCfg, "encryptScope", tsEncryptScope, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
@ -747,7 +743,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
@ -784,12 +780,12 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "telemetryServer", tsTelemServer, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "telemetryServer", tsTelemServer, CFG_SCOPE_SERVER, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "rsyncPort", tsRsyncPort, 1, 65535, CFG_SCOPE_BOTH, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "rsyncPort", tsRsyncPort, 1, 65535, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "snodeAddress", tsSnodeAddress, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "checkpointBackupDir", tsCheckpointBackupDir, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@ -1295,9 +1291,6 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "timeToGetAvailableConn");
tsTimeToGetAvailableConn = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "keepAliveIdle");
tsKeepAliveIdle = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "experimental");
tsExperimental = pItem->bval;
@ -2036,7 +2029,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"cacheLazyLoadThreshold", &tsCacheLazyLoadThreshold},
{"checkpointInterval", &tsStreamCheckpointInterval},
{"keepAliveIdle", &tsKeepAliveIdle},
{"logKeepDays", &tsLogKeepDays},
{"maxStreamBackendCache", &tsMaxStreamBackendCache},
{"mqRebalanceInterval", &tsMqRebalanceInterval},
@ -2294,7 +2286,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
{"crashReporting", &tsEnableCrashReport},
{"enableQueryHb", &tsEnableQueryHb},
{"keepColumnName", &tsKeepColumnName},
{"keepAliveIdle", &tsKeepAliveIdle},
{"logKeepDays", &tsLogKeepDays},
{"maxInsertBatchRows", &tsMaxInsertBatchRows},
{"maxRetryWaitTime", &tsMaxRetryWaitTime},

View File

@ -267,7 +267,14 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
int8_t locked = 0;
TAOS_CHECK_GOTO(blockDataEnsureCapacity(pBlock, cfgGetSize(pConf)), NULL, _exit);
size_t exSize = 0;
size_t index = 0;
SConfigItem* pDataDirItem = cfgGetItem(pConf, "dataDir");
if (pDataDirItem) {
exSize = TMAX(taosArrayGetSize(pDataDirItem->array), 1) - 1;
}
TAOS_CHECK_GOTO(blockDataEnsureCapacity(pBlock, cfgGetSize(pConf) + exSize), NULL, _exit);
TAOS_CHECK_GOTO(cfgCreateIter(pConf, &pIter), NULL, _exit);
@ -275,6 +282,7 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
locked = 1;
while ((pItem = cfgNextIter(pIter)) != NULL) {
_start:
col = startCol;
// GRANT_CFG_SKIP;
@ -289,9 +297,18 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, name, false), NULL, _exit);
char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
char value[TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t valueLen = 0;
TAOS_CHECK_GOTO(cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen), NULL, _exit);
SDiskCfg* pDiskCfg = NULL;
if (strcasecmp(pItem->name, "dataDir") == 0 && exSize > 0) {
char* buf = &value[VARSTR_HEADER_SIZE];
pDiskCfg = taosArrayGet(pItem->array, index);
valueLen = tsnprintf(buf, TSDB_CONFIG_PATH_LEN, "%s", pDiskCfg->dir);
index++;
} else {
TAOS_CHECK_GOTO(cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_PATH_LEN, &valueLen), NULL,
_exit);
}
varDataSetLen(value, valueLen);
pColInfo = taosArrayGet(pBlock->pDataBlock, col++);
@ -313,8 +330,28 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
}
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, scope, false), NULL, _exit);
char info[TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE] = {0};
if (strcasecmp(pItem->name, "dataDir") == 0 && pDiskCfg) {
char* buf = &info[VARSTR_HEADER_SIZE];
valueLen = tsnprintf(buf, TSDB_CONFIG_INFO_LEN, "level %d primary %d disabled %" PRIi8, pDiskCfg->level,
pDiskCfg->primary, pDiskCfg->disable);
} else {
valueLen = 0;
}
varDataSetLen(info, valueLen);
pColInfo = taosArrayGet(pBlock->pDataBlock, col++);
if (pColInfo == NULL) {
code = terrno;
TAOS_CHECK_GOTO(code, NULL, _exit);
}
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, info, false), NULL, _exit);
numOfRows++;
}
if (index > 0 && index <= exSize) {
goto _start;
}
}
pBlock->info.rows = numOfRows;
_exit:
if (locked) cfgUnLock(pConf);

View File

@ -5642,6 +5642,12 @@ int32_t tSerializeSShowVariablesRsp(void *buf, int32_t bufLen, SShowVariablesRsp
SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i);
TAOS_CHECK_EXIT(tEncodeSVariablesInfo(&encoder, pInfo));
}
for (int32_t i = 0; i < varNum; ++i) {
SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i);
TAOS_CHECK_RETURN(tEncodeCStr(&encoder, pInfo->info));
}
tEndEncode(&encoder);
_exit:
@ -5675,6 +5681,13 @@ int32_t tDeserializeSShowVariablesRsp(void *buf, int32_t bufLen, SShowVariablesR
TAOS_CHECK_EXIT(terrno);
}
}
if (!tDecodeIsEnd(&decoder)) {
for (int32_t i = 0; i < varNum; ++i) {
SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i);
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pInfo->info));
}
}
}
tEndDecode(&decoder);
@ -8717,6 +8730,7 @@ int32_t tSerializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq) {
TAOS_CHECK_EXIT(tEncodeCStrWithLen(&encoder, pReq->sql, pReq->sqlLen));
TAOS_CHECK_EXIT(tEncodeU32(&encoder, pReq->msgLen));
TAOS_CHECK_EXIT(tEncodeBinary(&encoder, (uint8_t *)pReq->msg, pReq->msgLen));
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@ -8765,6 +8779,11 @@ int32_t tDeserializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq)
TAOS_CHECK_EXIT(tDecodeCStrAlloc(&decoder, &pReq->sql));
TAOS_CHECK_EXIT(tDecodeU32(&decoder, &pReq->msgLen));
TAOS_CHECK_EXIT(tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, NULL));
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
} else {
pReq->clientId = 0;
}
tEndDecode(&decoder);
@ -8894,6 +8913,7 @@ int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
} else {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, 0));
}
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@ -8943,6 +8963,11 @@ int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq)
}
TAOS_CHECK_EXIT(tDeserializeSOperatorParam(&decoder, pReq->pOpParam));
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
} else {
pReq->clientId = 0;
}
tEndDecode(&decoder);
@ -9055,6 +9080,7 @@ int32_t tSerializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq) {
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->taskId));
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->refId));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->execId));
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@ -9095,6 +9121,11 @@ int32_t tDeserializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq)
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->taskId));
TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->refId));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->execId));
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
} else {
pReq->clientId = 0;
}
tEndDecode(&decoder);
@ -9123,6 +9154,7 @@ int32_t tSerializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pReq
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->refId));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->execId));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->type));
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@ -9164,6 +9196,11 @@ int32_t tDeserializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pR
TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->refId));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->execId));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, (int32_t *)&pReq->type));
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
} else {
pReq->clientId = 0;
}
tEndDecode(&decoder);
@ -9353,6 +9390,10 @@ int32_t tSerializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *pR
TAOS_CHECK_EXIT(tEncodeI32(&encoder, status->execId));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, status->status));
}
for (int32_t i = 0; i < num; ++i) {
STaskStatus *status = taosArrayGet(pRsp->taskStatus, i);
TAOS_CHECK_EXIT(tEncodeU64(&encoder, status->clientId));
}
} else {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, 0));
}
@ -9396,6 +9437,12 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *
TAOS_CHECK_EXIT(terrno);
}
}
if (!tDecodeIsEnd(&decoder)) {
for (int32_t i = 0; i < num; ++i) {
STaskStatus *status = taosArrayGet(pRsp->taskStatus, i);
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &status->clientId));
}
}
} else {
pRsp->taskStatus = NULL;
}
@ -9560,6 +9607,7 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->sql));
TAOS_CHECK_EXIT(tEncodeBinary(&encoder, pReq->msg, pReq->phyLen));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->source));
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
_exit:
@ -9608,6 +9656,11 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->source));
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
} else {
pReq->clientId = 0;
}
tEndDecode(&decoder);
_exit:

View File

@ -30,7 +30,7 @@ static int64_t m_deltaUtc = 0;
void deltaToUtcInitOnce() {
struct tm tm = {0};
if (taosStrpTime("1970-01-01 00:00:00", (const char*)("%Y-%m-%d %H:%M:%S"), &tm) != 0) {
if (taosStrpTime("1970-01-01 00:00:00", (const char*)("%Y-%m-%d %H:%M:%S"), &tm) == NULL) {
uError("failed to parse time string");
}
m_deltaUtc = (int64_t)taosMktime(&tm);

View File

@ -37,7 +37,9 @@ typedef struct SVnodeMgmt {
SSingleWorker mgmtMultiWorker;
SHashObj *hash;
SHashObj *closedHash;
SHashObj *creatingHash;
TdThreadRwlock lock;
TdThreadMutex mutex;
SVnodesStat state;
STfs *pTfs;
TdThread thread;
@ -96,6 +98,7 @@ SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict);
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed);
void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId);
// vmHandle.c
SArray *vmGetMsgHandles();
@ -113,6 +116,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt);
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
// vmWorker.c
int32_t vmStartWorker(SVnodeMgmt *pMgmt);

View File

@ -67,6 +67,54 @@ int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnod
return 0;
}
int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
(void)taosThreadRwlockRdlock(&pMgmt->lock);
int32_t num = 0;
int32_t size = taosHashGetSize(pMgmt->hash);
int32_t creatingSize = taosHashGetSize(pMgmt->creatingHash);
size += creatingSize;
SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *));
if (pVnodes == NULL) {
(void)taosThreadRwlockUnlock(&pMgmt->lock);
return terrno;
}
void *pIter = taosHashIterate(pMgmt->hash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
SVnodeObj *pVnode = *ppVnode;
if (pVnode && num < size) {
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
dTrace("vgId:%d,acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount);
pVnodes[num++] = (*ppVnode);
pIter = taosHashIterate(pMgmt->hash, pIter);
} else {
taosHashCancelIterate(pMgmt->hash, pIter);
}
}
pIter = taosHashIterate(pMgmt->creatingHash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
SVnodeObj *pVnode = *ppVnode;
if (pVnode && num < size) {
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount);
pVnodes[num++] = (*ppVnode);
pIter = taosHashIterate(pMgmt->creatingHash, pIter);
} else {
taosHashCancelIterate(pMgmt->creatingHash, pIter);
}
}
(void)taosThreadRwlockUnlock(&pMgmt->lock);
*numOfVnodes = num;
*ppVnodes = pVnodes;
return 0;
}
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
(void)taosThreadRwlockRdlock(&pMgmt->lock);

View File

@ -381,6 +381,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
if (vnodeCreate(path, &vnodeCfg, diskPrimary, pMgmt->pTfs) < 0) {
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
vmReleaseVnode(pMgmt, pVnode);
vmRemoveFromCreatingHash(pMgmt, req.vgId);
(void)tFreeSCreateVnodeReq(&req);
code = terrno != 0 ? terrno : -1;
return code;
@ -422,6 +423,8 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
_OVER:
vmRemoveFromCreatingHash(pMgmt, req.vgId);
if (code != 0) {
int32_t r = 0;
r = taosThreadRwlockWrlock(&pMgmt->lock);

View File

@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "vmInt.h"
#include "libs/function/tudf.h"
#include "osMemory.h"
#include "tfs.h"
#include "vnd.h"
@ -62,10 +63,20 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
int32_t numOfVnodes = 0;
SVnodeObj **ppVnodes = NULL;
code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes);
code = taosThreadMutexLock(&pMgmt->mutex);
if (code != 0) {
return code;
}
code = vmGetAllVnodeListFromHashWithCreating(pMgmt, &numOfVnodes, &ppVnodes);
if (code != 0) {
int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
if (r != 0) {
dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
}
return code;
}
for (int32_t v = 0; v < numOfVnodes; v++) {
SVnodeObj *pVnode = ppVnodes[v];
disks[pVnode->diskPrimary] += 1;
@ -81,6 +92,51 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
}
}
SVnodeObj *pCreatingVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
if (pCreatingVnode == NULL) {
code = -1;
if (terrno != 0) code = terrno;
dError("failed to alloc vnode since %s", tstrerror(code));
int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
if (r != 0) {
dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
}
goto _OVER;
}
(void)memset(pCreatingVnode, 0, sizeof(SVnodeObj));
pCreatingVnode->vgId = vgId;
pCreatingVnode->diskPrimary = diskId;
code = taosThreadRwlockWrlock(&pMgmt->lock);
if (code != 0) {
int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
if (r != 0) {
dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
}
taosMemoryFree(pCreatingVnode);
goto _OVER;
}
dTrace("vgId:%d, put vnode into creating hash, pCreatingVnode:%p", vgId, pCreatingVnode);
code = taosHashPut(pMgmt->creatingHash, &vgId, sizeof(int32_t), &pCreatingVnode, sizeof(SVnodeObj *));
if (code != 0) {
dError("vgId:%d, failed to put vnode to creatingHash", vgId);
taosMemoryFree(pCreatingVnode);
}
int32_t r = taosThreadRwlockUnlock(&pMgmt->lock);
if (r != 0) {
dError("vgId:%d, failed to unlock since %s", vgId, tstrerror(r));
}
code = taosThreadMutexUnlock(&pMgmt->mutex);
if (code != 0) {
goto _OVER;
}
_OVER:
for (int32_t i = 0; i < numOfVnodes; ++i) {
if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
vmReleaseVnode(pMgmt, ppVnodes[i]);
@ -89,8 +145,13 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
taosMemoryFree(ppVnodes);
}
dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes);
return diskId;
if (code != 0) {
dError("vgId:%d, failed to alloc disk since %s", vgId, tstrerror(code));
return code;
} else {
dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes);
return diskId;
}
}
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) {
@ -216,12 +277,12 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
}
if (keepClosed) {
SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
(void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
if (pVnode == NULL) {
dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr());
if (pClosedVnode == NULL) {
dError("failed to alloc vnode since %s", terrstr());
(void)taosThreadRwlockUnlock(&pMgmt->lock);
return;
}
(void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
pClosedVnode->vgId = pVnode->vgId;
pClosedVnode->dropped = pVnode->dropped;
@ -427,11 +488,18 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
pMgmt->closedHash =
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (pMgmt->hash == NULL) {
if (pMgmt->closedHash == NULL) {
dError("failed to init vnode closed hash since %s", terrstr());
return TSDB_CODE_OUT_OF_MEMORY;
}
pMgmt->creatingHash =
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (pMgmt->creatingHash == NULL) {
dError("failed to init vnode creatingHash hash since %s", terrstr());
return TSDB_CODE_OUT_OF_MEMORY;
}
SWrapperCfg *pCfgs = NULL;
int32_t numOfVnodes = 0;
if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) {
@ -509,6 +577,30 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
return 0;
}
void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId) {
(void)taosThreadRwlockWrlock(&pMgmt->lock);
SVnodeObj *pOld = NULL;
int32_t r = taosHashGetDup(pMgmt->creatingHash, &vgId, sizeof(int32_t), (void *)&pOld);
if (r != 0) {
dError("vgId:%d, failed to get vnode from creating Hash", vgId);
}
if (pOld) {
dTrace("vgId:%d, free vnode pOld:%p", vgId, &pOld);
vmFreeVnodeObj(&pOld);
}
dTrace("vgId:%d, remove from creating Hash", vgId);
r = taosHashRemove(pMgmt->creatingHash, &vgId, sizeof(int32_t));
if (r != 0) {
dError("vgId:%d, failed to remove vnode from hash", vgId);
}
(void)taosThreadRwlockUnlock(&pMgmt->lock);
_OVER:
if (r != 0) {
dError("vgId:%d, failed to remove vnode from creatingHash since %s", vgId, tstrerror(r));
}
}
static void *vmCloseVnodeInThread(void *param) {
SVnodeThread *pThread = param;
SVnodeMgmt *pMgmt = pThread->pMgmt;
@ -614,6 +706,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) {
pMgmt->closedHash = NULL;
}
pIter = taosHashIterate(pMgmt->creatingHash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
vmFreeVnodeObj(ppVnode);
pIter = taosHashIterate(pMgmt->creatingHash, pIter);
}
if (pMgmt->creatingHash != NULL) {
taosHashCleanup(pMgmt->creatingHash);
pMgmt->creatingHash = NULL;
}
dInfo("total vnodes:%d are all closed", numOfVnodes);
}
@ -622,6 +726,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) {
vmStopWorker(pMgmt);
vnodeCleanup();
(void)taosThreadRwlockDestroy(&pMgmt->lock);
(void)taosThreadMutexDestroy(&pMgmt->mutex);
(void)taosThreadMutexDestroy(&pMgmt->fileLock);
taosMemoryFree(pMgmt);
}
@ -714,6 +819,12 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
goto _OVER;
}
code = taosThreadMutexInit(&pMgmt->mutex, NULL);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _OVER;
}
code = taosThreadMutexInit(&pMgmt->fileLock, NULL);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);

View File

@ -1104,6 +1104,7 @@ static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) {
(void)strcpy(info.name, "statusInterval");
(void)snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval);
(void)strcpy(info.scope, "server");
// fill info.info
if (taosArrayPush(rsp.variables, &info) == NULL) {
code = terrno;
goto _OVER;

View File

@ -172,7 +172,7 @@ void tsdbReleaseDataBlock2(STsdbReader *pReader);
int32_t tsdbRetrieveDataBlock2(STsdbReader *pReader, SSDataBlock **pBlock, SArray *pIdList);
int32_t tsdbReaderReset2(STsdbReader *pReader, SQueryTableDataCond *pCond);
int32_t tsdbGetFileBlocksDistInfo2(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader *pHandle);
int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader *pHandle, uint32_t *rows);
void *tsdbGetIdx2(SMeta *pMeta);
void *tsdbGetIvtIdx2(SMeta *pMeta);
uint64_t tsdbGetReaderMaxVersion2(STsdbReader *pReader);

View File

@ -324,7 +324,11 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
SMetaEntry me = {0};
tDecoderInit(&dc, value, valueSize);
if (metaDecodeEntry(&dc, &me) == 0) {
if (metaHandleEntry(pNewMeta, &me) != 0) {
if (me.type == TSDB_CHILD_TABLE &&
tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) {
metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64,
TD_VID(pVnode), me.ctbEntry.suid, uid);
} else if (metaHandleEntry(pNewMeta, &me) != 0) {
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
}
}

View File

@ -25,82 +25,109 @@
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
static int32_t setFirstLastResColToNull(SColumnInfoData* pCol, int32_t row) {
char* buf = taosMemoryCalloc(1, pCol->info.bytes);
if (buf == NULL) {
return terrno;
}
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
char* buf = NULL;
SFirstLastRes* pRes = NULL;
SFirstLastRes* pRes = (SFirstLastRes*)((char*)buf + VARSTR_HEADER_SIZE);
TSDB_CHECK_NULL(pCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
buf = taosMemoryCalloc(1, pCol->info.bytes);
TSDB_CHECK_NULL(buf, code, lino, _end, terrno);
pRes = (SFirstLastRes*)((char*)buf + VARSTR_HEADER_SIZE);
pRes->bytes = 0;
pRes->hasResult = true;
pRes->isNull = true;
varDataSetLen(buf, pCol->info.bytes - VARSTR_HEADER_SIZE);
int32_t code = colDataSetVal(pCol, row, buf, false);
taosMemoryFree(buf);
code = colDataSetVal(pCol, row, buf, false);
TSDB_CHECK_CODE(code, lino, _end);
_end:
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (buf != NULL) {
taosMemoryFreeClear(buf);
}
return code;
}
static int32_t saveOneRowForLastRaw(SLastCol* pColVal, SCacheRowsReader* pReader, const int32_t slotId,
SColumnInfoData* pColInfoData, int32_t numOfRows) {
SColVal* pVal = &pColVal->colVal;
int32_t code = 0;
SColumnInfoData* pColInfoData, int32_t numOfRows) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SColVal* pVal = NULL;
TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
pVal = &pColVal->colVal;
// allNullRow = false;
if (IS_VAR_DATA_TYPE(pColVal->colVal.value.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
code = colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
TSDB_CHECK_CODE(code, lino, _end);
}
} else {
code = colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
TSDB_CHECK_CODE(code, lino, _end);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds,
const int32_t* dstSlotIds, void** pRes, const char* idStr) {
int32_t numOfRows = pBlock->info.rows;
int32_t code = 0;
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t numOfRows = 0;
SArray* funcTypeBlockArray = NULL;
TSDB_CHECK_NULL(pBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (pReader->numOfCols > 0) {
TSDB_CHECK_NULL(slotIds, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(dstSlotIds, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pRes, code, lino, _end, TSDB_CODE_INVALID_PARA);
}
numOfRows = pBlock->info.rows;
if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) {
uint64_t ts = TSKEY_MIN;
SFirstLastRes* p = NULL;
col_id_t colId = -1;
SArray* funcTypeBlockArray = taosArrayInit(pReader->numOfCols, sizeof(int32_t));
if (funcTypeBlockArray == NULL) {
return terrno;
}
funcTypeBlockArray = taosArrayInit(pReader->numOfCols, sizeof(int32_t));
TSDB_CHECK_NULL(funcTypeBlockArray, code, lino, _end, terrno);
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
if (pColInfoData == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
int32_t funcType = FUNCTION_TYPE_CACHE_LAST;
if (pReader->pFuncTypeList != NULL && taosArrayGetSize(pReader->pFuncTypeList) > i) {
void* pVal = taosArrayGet(pReader->pFuncTypeList, i);
if (pVal == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
funcType = *(int32_t*) pVal;
funcType = *(int32_t*)pVal;
pVal = taosArrayGet(pReader->pFuncTypeList, i);
if (pVal == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
void* px = taosArrayInsert(funcTypeBlockArray, dstSlotIds[i], pVal);
if (px == NULL) {
return terrno;
}
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
if (slotIds[i] == -1) {
@ -110,24 +137,18 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
}
code = setFirstLastResColToNull(pColInfoData, numOfRows);
if (code) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
continue;
}
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
if (pColVal == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
colId = pColVal->colVal.cid;
if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) {
code = saveOneRowForLastRaw(pColVal, pReader, slotId, pColInfoData, numOfRows);
if (code) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
continue;
}
@ -154,22 +175,16 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
p->hasResult = true;
varDataSetLen(pRes[i], pColInfoData->info.bytes - VARSTR_HEADER_SIZE);
code = colDataSetVal(pColInfoData, numOfRows, (const char*)pRes[i], false);
if (code) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
}
for (int32_t idx = 0; idx < taosArrayGetSize(pBlock->pDataBlock); ++idx) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, idx);
if (pCol == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (idx < funcTypeBlockArray->size) {
void* pVal = taosArrayGet(funcTypeBlockArray, idx);
if (pVal == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
int32_t funcType = *(int32_t*)pVal;
if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) {
@ -182,17 +197,13 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
colDataSetNULL(pCol, numOfRows);
} else {
code = colDataSetVal(pCol, numOfRows, (const char*)&ts, false);
if (code) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
}
continue;
} else if (pReader->numOfCols == 1 && idx != dstSlotIds[0] && (pCol->info.colId == colId || colId == -1)) {
if (p && !p->isNull) {
code = colDataSetVal(pCol, numOfRows, p->buf, false);
if (code) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
} else {
colDataSetNULL(pCol, numOfRows);
}
@ -201,13 +212,10 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
// pBlock->info.rows += allNullRow ? 0 : 1;
++pBlock->info.rows;
taosArrayDestroy(funcTypeBlockArray);
} else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
if (pColInfoData == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
int32_t slotId = slotIds[i];
if (slotId == -1) {
@ -216,47 +224,53 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
if (pColVal == NULL) {
return TSDB_CODE_INVALID_PARA;
}
TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
code = saveOneRowForLastRaw(pColVal, pReader, slotId, pColInfoData, numOfRows);
if (code) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
}
// pBlock->info.rows += allNullRow ? 0 : 1;
++pBlock->info.rows;
} else {
tsdbError("invalid retrieve type:%d, %s", pReader->type, idStr);
return TSDB_CODE_INVALID_PARA;
code = TSDB_CODE_INVALID_PARA;
TSDB_CHECK_CODE(code, lino, _end);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (funcTypeBlockArray != NULL) {
taosArrayDestroy(funcTypeBlockArray);
}
return code;
}
static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* idstr) {
int32_t numOfTables = p->numOfTables;
int32_t code = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t numOfTables = 0;
TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
numOfTables = p->numOfTables;
if (suid != 0) {
code = metaGetTbTSchemaNotNull(p->pVnode->pMeta, suid, -1, 1, &p->pSchema);
if (TSDB_CODE_SUCCESS != code) {
tsdbWarn("stable:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", suid, idstr);
if(code == TSDB_CODE_NOT_FOUND) {
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
} else {
return code;
if (code == TSDB_CODE_NOT_FOUND) {
code = TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
TSDB_CHECK_CODE(code, lino, _end);
}
} else {
for (int32_t i = 0; i < numOfTables; ++i) {
uint64_t uid = p->pTableList[i].uid;
code = metaGetTbTSchemaMaybeNull(p->pVnode->pMeta, uid, -1, 1, &p->pSchema);
if(code != TSDB_CODE_SUCCESS) {
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
if (p->pSchema != NULL) {
break;
}
@ -267,33 +281,52 @@ static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* id
// all queried tables have been dropped already, return immediately.
if (p->pSchema == NULL) {
tsdbWarn("all queried tables has been dropped, try next group, %s", idstr);
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
code = TSDB_CODE_PAR_TABLE_NOT_EXIST;
TSDB_CHECK_CODE(code, lino, _end);
}
}
return TSDB_CODE_SUCCESS;
_end:
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
int32_t tsdbReuseCacherowsReader(void* reader, void* pTableIdList, int32_t numOfTables) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SCacheRowsReader* pReader = (SCacheRowsReader*)reader;
TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
pReader->pTableList = pTableIdList;
pReader->numOfTables = numOfTables;
pReader->lastTs = INT64_MIN;
destroySttBlockReader(pReader->pLDataIterArray, NULL);
pReader->pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
TSDB_CHECK_NULL(pReader->pLDataIterArray, code, lino, _end, terrno);
return (pReader->pLDataIterArray != NULL) ? TSDB_CODE_SUCCESS : terrno;
_end:
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr,
SArray* pFuncTypeList, SColumnInfo* pPkCol, int32_t numOfPks) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SCacheRowsReader* p = NULL;
TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
*pReader = NULL;
SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
return terrno;
}
p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
TSDB_CHECK_NULL(p, code, lino, _end, terrno);
p->type = type;
p->pVnode = pVnode;
@ -307,12 +340,13 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
p->rowKey.numOfPKs = numOfPks;
if (numOfPks > 0) {
TSDB_CHECK_NULL(pPkCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
p->rowKey.pks[0].type = pPkCol->type;
if (IS_VAR_DATA_TYPE(pPkCol->type)) {
p->rowKey.pks[0].pData = taosMemoryCalloc(1, pPkCol->bytes);
if (p->rowKey.pks[0].pData == NULL) {
taosMemoryFree(p);
return terrno;
taosMemoryFreeClear(p);
TSDB_CHECK_NULL(p->rowKey.pks[0].pData, code, lino, _end, terrno);
}
}
@ -321,48 +355,46 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
if (numOfTables == 0) {
*pReader = p;
return TSDB_CODE_SUCCESS;
p = NULL;
goto _end;
}
p->pTableList = pTableIdList;
p->numOfTables = numOfTables;
int32_t code = setTableSchema(p, suid, idstr);
if (code != TSDB_CODE_SUCCESS) {
tsdbCacherowsReaderClose(p);
return code;
}
code = setTableSchema(p, suid, idstr);
TSDB_CHECK_CODE(code, lino, _end);
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
if (p->transferBuf == NULL) {
tsdbCacherowsReaderClose(p);
return terrno;
}
TSDB_CHECK_NULL(p->transferBuf, code, lino, _end, terrno);
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
if (p->transferBuf[i] == NULL) {
tsdbCacherowsReaderClose(p);
return terrno;
}
TSDB_CHECK_NULL(p->transferBuf[i], code, lino, _end, terrno);
}
}
p->idstr = taosStrdup(idstr);
if (idstr != NULL && p->idstr == NULL) {
tsdbCacherowsReaderClose(p);
return terrno;
if (idstr != NULL) {
p->idstr = taosStrdup(idstr);
TSDB_CHECK_NULL(p->idstr, code, lino, _end, terrno);
}
code = taosThreadMutexInit(&p->readerMutex, NULL);
if (code) {
tsdbCacherowsReaderClose(p);
return code;
}
TSDB_CHECK_CODE(code, lino, _end);
p->lastTs = INT64_MIN;
*pReader = p;
p = NULL;
_end:
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
*pReader = NULL;
}
if (p != NULL) {
tsdbCacherowsReaderClose(p);
}
return code;
}
@ -393,6 +425,7 @@ void tsdbCacherowsReaderClose(void* pReader) {
if (p->pLDataIterArray) {
destroySttBlockReader(p->pLDataIterArray, NULL);
p->pLDataIterArray = NULL;
}
if (p->pFileReader) {
@ -401,7 +434,7 @@ void tsdbCacherowsReaderClose(void* pReader) {
}
taosMemoryFree((void*)p->idstr);
(void) taosThreadMutexDestroy(&p->readerMutex);
(void)taosThreadMutexDestroy(&p->readerMutex);
if (p->pTableMap) {
void* pe = NULL;
@ -443,39 +476,32 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) {
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, const int32_t* dstSlotIds,
SArray* pTableUidList, bool* pGotAll) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
bool hasRes = false;
SArray* pRow = NULL;
void** pRes = NULL;
SCacheRowsReader* pr = pReader;
SCacheRowsReader* pr = NULL;
int32_t pkBufLen = 0;
TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pResBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
pr = pReader;
pr->pReadSnap = NULL;
pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol));
if (pRow == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(pRow, code, lino, _end, terrno);
pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES);
if (pRes == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(pRes, code, lino, _end, terrno);
pkBufLen = (pr->rowKey.numOfPKs > 0) ? pr->pkColumn.bytes : 0;
for (int32_t j = 0; j < pr->numOfCols; ++j) {
int32_t bytes = (slotIds[j] == -1) ? 1 : pr->pSchema->columns[slotIds[j]].bytes;
pRes[j] = taosMemoryCalloc(1, sizeof(SFirstLastRes) + bytes + pkBufLen + VARSTR_HEADER_SIZE);
if (pRes[j] == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(pRes[j], code, lino, _end, terrno);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]);
p->ts = INT64_MIN;
@ -483,9 +509,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
(void)taosThreadMutexLock(&pr->readerMutex);
code = tsdbTakeReadSnap2((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap, pr->idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
TSDB_CHECK_CODE(code, lino, _end);
int8_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
@ -494,20 +518,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
SArray* pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
if (pLastCols == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(pLastCols, code, lino, _end, terrno);
for (int32_t i = 0; i < pr->numOfCols; ++i) {
int32_t slotId = slotIds[i];
if (slotId == -1) {
SLastCol p = {.rowKey.ts = INT64_MIN, .colVal.value.type = TSDB_DATA_TYPE_BOOL, .colVal.flag = CV_FLAG_NULL};
void* px = taosArrayPush(pLastCols, &p);
if (px == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
continue;
}
struct STColumn* pCol = &pr->pSchema->columns[slotId];
@ -518,29 +536,19 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
for (int32_t j = 0; j < pr->rowKey.numOfPKs; j++) {
p.rowKey.pks[j].type = pr->pkColumn.type;
if (IS_VAR_DATA_TYPE(pr->pkColumn.type)) {
p.rowKey.pks[j].pData = taosMemoryCalloc(1, pr->pkColumn.bytes);
if (p.rowKey.pks[j].pData == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(p.rowKey.pks[j].pData, code, lino, _end, terrno);
}
}
}
if (IS_VAR_DATA_TYPE(pCol->type)) {
p.colVal.value.pData = taosMemoryCalloc(pCol->bytes, sizeof(char));
if (p.colVal.value.pData == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(p.colVal.value.pData, code, lino, _end, terrno);
}
void* px = taosArrayPush(pLastCols, &p);
if (px == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
int64_t st = taosGetTimestampUs();
@ -549,11 +557,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
tb_uid_t uid = pTableList[i].uid;
code = tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype);
if (code == -1) {// fix the invalid return code
if (code == -1) { // fix the invalid return code
code = 0;
} else if (code != 0) {
goto _end;
}
TSDB_CHECK_CODE(code, lino, _end);
if (TARRAY_SIZE(pRow) <= 0 || COL_VAL_IS_NONE(&((SLastCol*)TARRAY_DATA(pRow))[0].colVal)) {
taosArrayClearEx(pRow, tsdbCacheFreeSLastColItem);
@ -600,10 +607,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
if (k == 0) {
if (TARRAY_SIZE(pTableUidList) == 0) {
void* px = taosArrayPush(pTableUidList, &uid);
if (px == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
} else {
taosArraySet(pTableUidList, 0, &uid);
}
@ -613,6 +617,16 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
singleTableLastTs = pColVal->rowKey.ts;
}
if (p->colVal.value.type != pColVal->colVal.value.type) {
// check for type/cid mismatch
tsdbError("last cache type mismatch, uid:%" PRIu64
", schema-type:%d, slotId:%d, cache-type:%d, cache-col:%d",
uid, p->colVal.value.type, slotIds[k], pColVal->colVal.value.type, pColVal->colVal.cid);
taosArrayClearEx(pRow, tsdbCacheFreeSLastColItem);
code = TSDB_CODE_INVALID_PARA;
goto _end;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.value.type)) {
p->colVal = pColVal->colVal;
} else {
@ -644,9 +658,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
if (hasRes) {
code = saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
if (code) {
goto _end;
}
TSDB_CHECK_CODE(code, lino, _end);
}
taosArrayDestroyEx(pLastCols, tsdbCacheFreeSLastColItem);
@ -656,11 +668,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
tb_uid_t uid = pTableList[i].uid;
if ((code = tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype)) != 0) {
if (code == -1) {// fix the invalid return code
if (code == -1) { // fix the invalid return code
code = 0;
} else if (code != 0) {
goto _end;
}
TSDB_CHECK_CODE(code, lino, _end);
}
if (TARRAY_SIZE(pRow) <= 0 || COL_VAL_IS_NONE(&((SLastCol*)TARRAY_DATA(pRow))[0].colVal)) {
@ -669,17 +680,12 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
code = saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
if (code) {
goto _end;
}
TSDB_CHECK_CODE(code, lino, _end);
taosArrayClearEx(pRow, tsdbCacheFreeSLastColItem);
void* px = taosArrayPush(pTableUidList, &uid);
if (px == NULL) {
code = terrno;
goto _end;
}
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
++pr->tableIndex;
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -692,6 +698,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
} else {
code = TSDB_CODE_INVALID_PARA;
TSDB_CHECK_CODE(code, lino, _end);
}
_end:
@ -713,5 +720,8 @@ _end:
taosMemoryFree(pRes);
taosArrayDestroy(pRow);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -30,13 +30,13 @@ extern "C" {
do { \
(_w)->skey = INT64_MAX; \
(_w)->ekey = INT64_MIN; \
} while (0);
} while (0)
#define INIT_KEYRANGE(_k) \
do { \
(_k)->skey.ts = INT64_MAX; \
(_k)->ekey.ts = INT64_MIN; \
} while (0);
} while (0)
#define tRowGetKeyEx(_pRow, _pKey) \
{ \
@ -72,7 +72,6 @@ typedef struct STsdbReaderInfo {
} STsdbReaderInfo;
typedef struct SBlockInfoBuf {
int32_t currentIndex;
SArray* pData;
int32_t numPerBucket;
int32_t numOfTables;
@ -241,7 +240,6 @@ typedef struct SDataBlockIter {
int32_t index;
SArray* blockList; // SArray<SFileDataBlockInfo>
int32_t order;
SDataBlk block; // current SDataBlk data
} SDataBlockIter;
typedef struct SFileBlockDumpInfo {
@ -321,7 +319,7 @@ int32_t createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, c
int32_t initTableBlockScanInfo(STableBlockScanInfo* pScanInfo, uint64_t uid, SSHashObj* pTableMap,
STsdbReader* pReader);
void clearBlockScanInfo(STableBlockScanInfo* p);
void destroyAllBlockScanInfo(SSHashObj* pTableMap);
void destroyAllBlockScanInfo(SSHashObj** pTableMap);
void resetAllDataBlockScanInfo(SSHashObj* pTableMap, int64_t ts, int32_t step);
void cleanupInfoForNextFileset(SSHashObj* pTableMap);
int32_t ensureBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables);
@ -335,7 +333,7 @@ void clearBrinBlockIter(SBrinRecordIter* pIter);
// initialize block iterator API
int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t numOfBlocks, SArray* pTableList);
bool blockIteratorNext(SDataBlockIter* pBlockIter, const char* idStr);
bool blockIteratorNext(SDataBlockIter* pBlockIter);
// load tomb data API (stt/mem only for one table each, tomb data from data files are load for all tables at one time)
int32_t loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piMemTbData, int64_t ver);

View File

@ -35,6 +35,9 @@
extern SConfig* tsCfg;
static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) {
if (NULL == pBlock || NULL == pRsp) {
return TSDB_CODE_INVALID_PARA;
}
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
@ -53,6 +56,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols);
if(len < 0) {
taosMemoryFree(*pRsp);
*pRsp = NULL;
return terrno;
}
SET_PAYLOAD_LEN((*pRsp)->data, len, len);
@ -216,6 +220,9 @@ static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock,
static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp, int8_t biMode) {
SDescribeStmt* pDesc = (SDescribeStmt*)pStmt;
if (NULL == pDesc || NULL == pDesc->pMeta) {
return TSDB_CODE_INVALID_PARA;
}
int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta);
SSDataBlock* pBlock = NULL;
@ -505,7 +512,7 @@ static int32_t buildCreateViewResultDataBlock(SSDataBlock** pOutput) {
return code;
}
void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
static void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
for (int32_t i = 0; i < pCfg->numOfColumns; ++i) {
SSchema* pSchema = pCfg->pSchemas + i;
#define LTYPE_LEN (32 + 60) // 60 byte for compress info
@ -539,7 +546,7 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
}
}
void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
static void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
for (int32_t i = 0; i < pCfg->numOfTags; ++i) {
SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i;
char type[32];
@ -558,7 +565,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
}
}
void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) {
static void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) {
for (int32_t i = 0; i < pCfg->numOfTags; ++i) {
SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i;
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len),
@ -566,7 +573,7 @@ void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) {
}
}
int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) {
static int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pTagVals = NULL;
STag* pTag = (STag*)pCfg->pTags;
@ -643,7 +650,7 @@ _exit:
return code;
}
void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* pCfg) {
static void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* pCfg) {
if (pCfg->commentLen > 0) {
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len),
" COMMENT '%s'", pCfg->pComment);
@ -947,12 +954,18 @@ static int32_t buildLocalVariablesResultDataBlock(SSDataBlock** pOutput) {
goto _exit;
}
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN;
if (taosArrayPush(pBlock->pDataBlock, &infoData) == NULL) {
goto _exit;
}
*pOutput = pBlock;
_exit:
if (terrno != TSDB_CODE_SUCCESS) {
taosMemoryFree(pBlock);
taosArrayDestroy(pBlock->pDataBlock);
taosMemoryFree(pBlock);
}
return terrno;
}
@ -997,7 +1010,7 @@ static int32_t createSelectResultDataBlock(SNodeList* pProjects, SSDataBlock** p
return code;
}
int32_t buildSelectResultDataBlock(SNodeList* pProjects, SSDataBlock* pBlock) {
static int32_t buildSelectResultDataBlock(SNodeList* pProjects, SSDataBlock* pBlock) {
QRY_ERR_RET(blockDataEnsureCapacity(pBlock, 1));
int32_t index = 0;

View File

@ -30,8 +30,8 @@ char *gJoinTypeStr[JOIN_TYPE_MAX_VALUE][JOIN_STYPE_MAX_VALUE] = {
/*FULL*/ {"Full Join", "Full Join", NULL, NULL, NULL, NULL},
};
int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel);
static int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
static int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel);
char *qExplainGetDynQryCtrlType(EDynQueryType type) {
switch (type) {
@ -118,7 +118,7 @@ void qExplainFreeCtx(SExplainCtx *pCtx) {
taosMemoryFree(pCtx);
}
int32_t qExplainInitCtx(SExplainCtx **pCtx, SHashObj *groupHash, bool verbose, double ratio, EExplainMode mode) {
static int32_t qExplainInitCtx(SExplainCtx **pCtx, SHashObj *groupHash, bool verbose, double ratio, EExplainMode mode) {
int32_t code = 0;
SExplainCtx *ctx = taosMemoryCalloc(1, sizeof(SExplainCtx));
if (NULL == ctx) {
@ -158,7 +158,7 @@ _return:
QRY_RET(code);
}
int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNodeList **pChildren) {
static int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNodeList **pChildren) {
int32_t tlen = 0;
SNodeList *pPhysiChildren = pNode->pChildren;
@ -180,7 +180,7 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
return TSDB_CODE_SUCCESS;
}
int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, SExplainGroup *group) {
static int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, SExplainGroup *group) {
*pExecInfo = taosArrayInit(group->nodeNum, sizeof(SExplainExecInfo));
if (NULL == (*pExecInfo)) {
qError("taosArrayInit %d explainExecInfo failed", group->nodeNum);
@ -217,7 +217,7 @@ int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, S
return TSDB_CODE_SUCCESS;
}
int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pResNode) {
static int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pResNode) {
if (NULL == pNode) {
*pResNode = NULL;
qError("physical node is NULL");
@ -250,7 +250,7 @@ _return:
QRY_RET(code);
}
int32_t qExplainBufAppendExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
static int32_t qExplainBufAppendExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
int32_t tlen = *len;
int32_t nodeNum = taosArrayGetSize(pExecInfo);
SExplainExecInfo maxExecInfo = {0};
@ -275,7 +275,7 @@ int32_t qExplainBufAppendExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
return TSDB_CODE_SUCCESS;
}
int32_t qExplainBufAppendVerboseExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
static int32_t qExplainBufAppendVerboseExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
int32_t tlen = 0;
bool gotVerbose = false;
int32_t nodeNum = taosArrayGetSize(pExecInfo);
@ -297,7 +297,7 @@ int32_t qExplainBufAppendVerboseExecInfo(SArray *pExecInfo, char *tbuf, int32_t
return TSDB_CODE_SUCCESS;
}
int32_t qExplainResAppendRow(SExplainCtx *ctx, char *tbuf, int32_t len, int32_t level) {
static int32_t qExplainResAppendRow(SExplainCtx *ctx, char *tbuf, int32_t len, int32_t level) {
SQueryExplainRowInfo row = {0};
row.buf = taosMemoryMalloc(len);
if (NULL == row.buf) {
@ -362,7 +362,7 @@ static char* qExplainGetScanDataLoad(STableScanPhysiNode* pScan) {
return "unknown";
}
int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
static int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
int32_t tlen = 0;
bool isVerboseLine = false;
char *tbuf = ctx->tbuf;
@ -1900,7 +1900,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
return TSDB_CODE_SUCCESS;
}
int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
static int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
if (NULL == pResNode) {
qError("explain res node is NULL");
QRY_ERR_RET(TSDB_CODE_APP_ERROR);
@ -1915,7 +1915,7 @@ int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32
return TSDB_CODE_SUCCESS;
}
int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel) {
static int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel) {
SExplainResNode *node = NULL;
int32_t code = 0;
SExplainCtx *ctx = (SExplainCtx *)pCtx;
@ -1940,7 +1940,7 @@ _return:
QRY_RET(code);
}
int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
static int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
int32_t code = 0;
SSDataBlock *pBlock = NULL;
SExplainCtx *pCtx = (SExplainCtx *)ctx;
@ -1997,7 +1997,7 @@ _return:
QRY_RET(code);
}
int32_t qExplainPrepareCtx(SQueryPlan *pDag, SExplainCtx **pCtx) {
static int32_t qExplainPrepareCtx(SQueryPlan *pDag, SExplainCtx **pCtx) {
int32_t code = 0;
SNodeListNode *plans = NULL;
int32_t taskNum = 0;
@ -2080,7 +2080,7 @@ _return:
QRY_RET(code);
}
int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
static int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
if (EXPLAIN_MODE_ANALYZE != pCtx->mode) {
return TSDB_CODE_SUCCESS;
}
@ -2103,7 +2103,7 @@ int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
return TSDB_CODE_SUCCESS;
}
int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
static int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
QRY_ERR_RET(qExplainAppendGroupResRows(pCtx, pCtx->rootGroupId, 0, false));
QRY_ERR_RET(qExplainAppendPlanRows(pCtx));
QRY_ERR_RET(qExplainGetRspFromCtx(pCtx, pRsp));
@ -2112,6 +2112,7 @@ int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
}
int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp) {
if(!pCtx || !pRspMsg || !pRsp) return TSDB_CODE_INVALID_PARA;
SExplainResNode *node = NULL;
int32_t code = 0;
bool groupDone = false;
@ -2176,6 +2177,7 @@ _exit:
}
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp) {
if (!pDag || !pRsp) return TSDB_CODE_INVALID_PARA;
int32_t code = 0;
SExplainCtx *pCtx = NULL;
@ -2188,6 +2190,7 @@ _return:
}
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs) {
if(!pDag || !pCtx) return TSDB_CODE_INVALID_PARA;
QRY_ERR_RET(qExplainPrepareCtx(pDag, pCtx));
(*pCtx)->reqStartTs = startTs;
@ -2197,6 +2200,7 @@ int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs)
}
int32_t qExecExplainEnd(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
if(!pCtx || !pRsp) return TSDB_CODE_INVALID_PARA;
int32_t code = 0;
pCtx->jobDoneTs = taosGetTimestampUs();

View File

@ -121,10 +121,10 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn
}
} else {
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64
", totalRows:%" PRIu64 ", try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pDataInfo->totalRows,
pExchangeInfo->loadInfo.totalRows, i + 1, totalSources);
qDebug("%s vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
" execId:%d index:%d completed, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId, i,
pDataInfo->totalRows, pExchangeInfo->loadInfo.totalRows, i + 1, totalSources);
taosMemoryFreeClear(pDataInfo->pRsp);
}
break;
@ -141,17 +141,17 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn
if (pRsp->completed == 1) {
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64
qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
" execId:%d index:%d completed, blocks:%d, numOfRows:%" PRId64 ", rowsOfSource:%" PRIu64
", totalRows:%" PRIu64 ", total:%.2f Kb, try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRsp->numOfBlocks,
pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0, i + 1,
totalSources);
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId, i,
pRsp->numOfBlocks, pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows,
pLoadInfo->totalSize / 1024.0, i + 1, totalSources);
} else {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d blocks:%d, numOfRows:%" PRId64
", totalRows:%" PRIu64 ", total:%.2f Kb",
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRsp->numOfBlocks,
pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0);
qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
" execId:%d blocks:%d, numOfRows:%" PRId64 ", totalRows:%" PRIu64 ", total:%.2f Kb",
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
pRsp->numOfBlocks, pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0);
}
taosMemoryFreeClear(pDataInfo->pRsp);
@ -640,9 +640,9 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
if (pSource->localExec) {
SDataBuf pBuf = {0};
int32_t code =
(*pTaskInfo->localFetch.fp)(pTaskInfo->localFetch.handle, pSource->schedId, pTaskInfo->id.queryId,
pSource->taskId, 0, pSource->execId, &pBuf.pData, pTaskInfo->localFetch.explainRes);
int32_t code = (*pTaskInfo->localFetch.fp)(pTaskInfo->localFetch.handle, pSource->schedId, pTaskInfo->id.queryId,
pSource->clientId, pSource->taskId, 0, pSource->execId, &pBuf.pData,
pTaskInfo->localFetch.explainRes);
code = loadRemoteDataCallback(pWrapper, &pBuf, code);
QUERY_CHECK_CODE(code, lino, _end);
taosMemoryFree(pWrapper);
@ -650,6 +650,7 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
SResFetchReq req = {0};
req.header.vgId = pSource->addr.nodeId;
req.sId = pSource->schedId;
req.clientId = pSource->clientId;
req.taskId = pSource->taskId;
req.queryId = pTaskInfo->id.queryId;
req.execId = pSource->execId;
@ -691,9 +692,10 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
freeOperatorParam(req.pOpParam, OP_GET_PARAM);
qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %p, %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId,
pSource->execId, pExchangeInfo, sourceIndex, totalSources);
qDebug("%s build fetch msg and send to vgId:%d, ep:%s, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
", execId:%d, %p, %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->clientId,
pSource->taskId, pSource->execId, pExchangeInfo, sourceIndex, totalSources);
// send the fetch remote task result reques
SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
@ -974,8 +976,9 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
}
if (pDataInfo->code != TSDB_CODE_SUCCESS) {
qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo),
pSource->addr.nodeId, pSource->taskId, pSource->execId, tstrerror(pDataInfo->code));
qError("%s vgId:%d, clientId:0x%" PRIx64 " taskID:0x%" PRIx64 " execId:%d error happens, code:%s",
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
tstrerror(pDataInfo->code));
pOperator->pTaskInfo->code = pDataInfo->code;
return pOperator->pTaskInfo->code;
}
@ -984,10 +987,10 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
if (pRsp->numOfRows == 0) {
qDebug("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d %d of total completed, rowsOfSource:%" PRIu64
", totalRows:%" PRIu64 " try next",
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pExchangeInfo->current + 1,
pDataInfo->totalRows, pLoadInfo->totalRows);
qDebug("%s vgId:%d, clientId:0x%" PRIx64 " taskID:0x%" PRIx64
" execId:%d %d of total completed, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 " try next",
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
pExchangeInfo->current + 1, pDataInfo->totalRows, pLoadInfo->totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1;
@ -1002,19 +1005,19 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
if (pRsp->completed == 1) {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64
qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64
", rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1,
totalSources);
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
pRetrieveRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize,
pExchangeInfo->current + 1, totalSources);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1;
} else {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64 ", totalRows:%" PRIu64
", totalBytes:%" PRIu64,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
pLoadInfo->totalRows, pLoadInfo->totalSize);
qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64
", totalRows:%" PRIu64 ", totalBytes:%" PRIu64,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
pRetrieveRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize);
}
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);

View File

@ -3468,11 +3468,6 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
goto _end;
}
void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
if (!pUpInfo) {
lino = __LINE__;
goto _end;
}
SDecoder decoder = {0};
pDeCoder = &decoder;
tDecoderInit(pDeCoder, buf, tlen);
@ -3481,14 +3476,21 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
goto _end;
}
code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
if (code == TSDB_CODE_SUCCESS) {
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
pInfo->pUpdateInfo = pUpInfo;
} else {
taosMemoryFree(pUpInfo);
lino = __LINE__;
goto _end;
if (pInfo->pUpdateInfo != NULL) {
void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
if (!pUpInfo) {
lino = __LINE__;
goto _end;
}
code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
if (code == TSDB_CODE_SUCCESS) {
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
pInfo->pUpdateInfo = pUpInfo;
} else {
taosMemoryFree(pUpInfo);
lino = __LINE__;
goto _end;
}
}
if (tDecodeIsEnd(pDeCoder)) {

View File

@ -685,10 +685,10 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
pAPI->metaFn.pauseTableMetaCursor(pInfo->pCur);
break;
}
} else {
code = sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, pDataBlock, tableName, schemaRow, typeName);
QUERY_CHECK_CODE(code, lino, _end);
}
// if pInfo->pRes->info.rows == 0, also need to add the meta to pDataBlock
code = sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, pDataBlock, tableName, schemaRow, typeName);
QUERY_CHECK_CODE(code, lino, _end);
}
if (numOfRows > 0) {
@ -761,7 +761,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
SMetaReader smrChildTable = {0};
pAPI->metaReaderFn.initReader(&smrChildTable, pInfo->readHandle.vnode, META_READER_LOCK, &pAPI->metaFn);
int32_t code = pAPI->metaReaderFn.getTableEntryByName(&smrChildTable, condTableName);
code = pAPI->metaReaderFn.getTableEntryByName(&smrChildTable, condTableName);
if (code != TSDB_CODE_SUCCESS) {
// terrno has been set by pAPI->metaReaderFn.getTableEntryByName, therefore, return directly
pAPI->metaReaderFn.clearReader(&smrChildTable);
@ -847,18 +847,18 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
pAPI->metaReaderFn.clearReader(&smrSuperTable);
break;
}
} else {
code = sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows,
dataBlock);
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
pAPI->metaReaderFn.clearReader(&smrSuperTable);
pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
blockDataDestroy(dataBlock);
dataBlock = NULL;
T_LONG_JMP(pTaskInfo->env, terrno);
}
}
// if pInfo->pRes->info.rows == 0, also need to add this meta into datablock.
code = sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows,
dataBlock);
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
pAPI->metaReaderFn.clearReader(&smrSuperTable);
pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
blockDataDestroy(dataBlock);
dataBlock = NULL;
T_LONG_JMP(pTaskInfo->env, terrno);
}
pAPI->metaReaderFn.clearReader(&smrSuperTable);
}
@ -2792,7 +2792,9 @@ static int32_t doBlockInfoScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes
code = pAPI->tsdReader.tsdReaderGetDataBlockDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
QUERY_CHECK_CODE(code, lino, _end);
blockDistInfo.numOfInmemRows = (int32_t)pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle);
blockDistInfo.numOfInmemRows = 0;
code = pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle, &blockDistInfo.numOfInmemRows);
QUERY_CHECK_CODE(code, lino, _end);
SSDataBlock* pBlock = pBlockScanInfo->pResBlock;

View File

@ -26,7 +26,7 @@ extern "C" {
struct tMemBucket;
int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval, bool hasWindowOrGroup,
struct tMemBucket **pBucket);
struct tMemBucket **pBucket, int32_t numOfElements);
void tMemBucketDestroy(struct tMemBucket **pBucket);

View File

@ -188,7 +188,11 @@ static int32_t countTrailingSpaces(const SValueNode* pVal, bool isLtrim) {
static int32_t addTimezoneParam(SNodeList* pList) {
char buf[TD_TIME_STR_LEN] = {0};
time_t t = taosTime(NULL);
time_t t;
int32_t code = taosTime(&t);
if (code != 0) {
return code;
}
struct tm tmInfo;
if (taosLocalTime(&t, &tmInfo, buf, sizeof(buf)) != NULL) {
(void)strftime(buf, sizeof(buf), "%z", &tmInfo);
@ -196,7 +200,7 @@ static int32_t addTimezoneParam(SNodeList* pList) {
int32_t len = (int32_t)strlen(buf);
SValueNode* pVal = NULL;
int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal);
code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal);
if (pVal == NULL) {
return code;
}

View File

@ -1805,7 +1805,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
pResInfo->complete = true;
return TSDB_CODE_SUCCESS;
} else {
code = tMemBucketCreate(pCol->info.bytes, type, pInfo->minval, pInfo->maxval, pCtx->hasWindowOrGroup, &pInfo->pMemBucket);
code = tMemBucketCreate(pCol->info.bytes, type, pInfo->minval, pInfo->maxval, pCtx->hasWindowOrGroup, &pInfo->pMemBucket, pInfo->numOfElems);
if (TSDB_CODE_SUCCESS != code) {
return code;
}

View File

@ -269,18 +269,16 @@ static void resetSlotInfo(tMemBucket *pBucket) {
}
int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval, bool hasWindowOrGroup,
tMemBucket **pBucket) {
tMemBucket **pBucket, int32_t numOfElements) {
*pBucket = (tMemBucket *)taosMemoryCalloc(1, sizeof(tMemBucket));
if (*pBucket == NULL) {
return terrno;
}
if (hasWindowOrGroup) {
// With window or group by, we need to shrink page size and reduce page num to save memory.
(*pBucket)->numOfSlots = DEFAULT_NUM_OF_SLOT / 8 ; // 128 bucket
// With window or group by, we need to shrink page size to save memory.
(*pBucket)->bufPageSize = 4096; // 4k per page
} else {
(*pBucket)->numOfSlots = DEFAULT_NUM_OF_SLOT;
(*pBucket)->bufPageSize = 16384 * 4; // 16k per page
}
@ -302,6 +300,8 @@ int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, dou
}
(*pBucket)->elemPerPage = ((*pBucket)->bufPageSize - sizeof(SFilePage)) / (*pBucket)->bytes;
(*pBucket)->numOfSlots = TMIN((int16_t)(numOfElements / ((*pBucket)->elemPerPage * 6)) + 1, DEFAULT_NUM_OF_SLOT);
(*pBucket)->comparFn = getKeyComparFunc((*pBucket)->type, TSDB_ORDER_ASC);
(*pBucket)->hashFunc = getHashFunc((*pBucket)->type);
@ -587,7 +587,7 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
// try next round
tMemBucket *tmpBucket = NULL;
int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal,
false, &tmpBucket);
false, &tmpBucket, pSlot->info.size);
if (TSDB_CODE_SUCCESS != code) {
tMemBucketDestroy(&tmpBucket);
return code;

View File

@ -62,8 +62,13 @@ static void udfUdfdStopAsyncCb(uv_async_t *async);
static void udfWatchUdfd(void *args);
void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal) {
TAOS_UDF_CHECK_PTR_RVOID(process);
fnInfo("udfd process exited with status %" PRId64 ", signal %d", exitStatus, termSignal);
SUdfdData *pData = process->data;
if(pData == NULL) {
fnError("udfd process data is NULL");
return;
}
if (exitStatus == 0 && termSignal == 0 || atomic_load_32(&pData->stopCalled)) {
fnInfo("udfd process exit due to SIGINT or dnode-mgmt called stop");
} else {
@ -77,6 +82,7 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal)
static int32_t udfSpawnUdfd(SUdfdData *pData) {
fnInfo("start to init udfd");
TAOS_UDF_CHECK_PTR_RCODE(pData);
int32_t err = 0;
uv_process_options_t options = {0};
@ -267,17 +273,20 @@ _OVER:
}
static void udfUdfdCloseWalkCb(uv_handle_t *handle, void *arg) {
TAOS_UDF_CHECK_PTR_RVOID(handle);
if (!uv_is_closing(handle)) {
uv_close(handle, NULL);
}
}
static void udfUdfdStopAsyncCb(uv_async_t *async) {
TAOS_UDF_CHECK_PTR_RVOID(async);
SUdfdData *pData = async->data;
uv_stop(&pData->loop);
}
static void udfWatchUdfd(void *args) {
TAOS_UDF_CHECK_PTR_RVOID(args);
SUdfdData *pData = args;
TAOS_UV_CHECK_ERRNO(uv_loop_init(&pData->loop));
TAOS_UV_CHECK_ERRNO(uv_async_init(&pData->loop, &pData->stopAsync, udfUdfdStopAsyncCb));
@ -873,6 +882,7 @@ void *decodeUdfResponse(const void *buf, SUdfResponse *rsp) {
}
void freeUdfColumnData(SUdfColumnData *data, SUdfColumnMeta *meta) {
TAOS_UDF_CHECK_PTR_RVOID(data, meta);
if (IS_VAR_DATA_TYPE(meta->type)) {
taosMemoryFree(data->varLenCol.varOffsets);
data->varLenCol.varOffsets = NULL;
@ -886,9 +896,13 @@ void freeUdfColumnData(SUdfColumnData *data, SUdfColumnMeta *meta) {
}
}
void freeUdfColumn(SUdfColumn *col) { freeUdfColumnData(&col->colData, &col->colMeta); }
void freeUdfColumn(SUdfColumn *col) {
TAOS_UDF_CHECK_PTR_RVOID(col);
freeUdfColumnData(&col->colData, &col->colMeta);
}
void freeUdfDataDataBlock(SUdfDataBlock *block) {
TAOS_UDF_CHECK_PTR_RVOID(block);
for (int32_t i = 0; i < block->numOfCols; ++i) {
freeUdfColumn(block->udfCols[i]);
taosMemoryFree(block->udfCols[i]);
@ -899,11 +913,17 @@ void freeUdfDataDataBlock(SUdfDataBlock *block) {
}
void freeUdfInterBuf(SUdfInterBuf *buf) {
TAOS_UDF_CHECK_PTR_RVOID(buf);
taosMemoryFree(buf->buf);
buf->buf = NULL;
}
int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlock) {
TAOS_UDF_CHECK_PTR_RCODE(block, udfBlock);
int32_t code = blockDataCheck(block);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
udfBlock->numOfRows = block->info.rows;
udfBlock->numOfCols = taosArrayGetSize(block->pDataBlock);
udfBlock->udfCols = taosMemoryCalloc(taosArrayGetSize(block->pDataBlock), sizeof(SUdfColumn *));
@ -973,6 +993,7 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
}
int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
TAOS_UDF_CHECK_PTR_RCODE(udfCol, block);
int32_t code = 0, lino = 0;
SUdfColumnMeta *meta = &udfCol->colMeta;
@ -998,6 +1019,8 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
}
block->info.rows = udfCol->colData.numOfRows;
code = blockDataCheck(block);
TAOS_CHECK_GOTO(code, &lino, _exit);
_exit:
if (code != 0) {
fnError("failed to convert udf column to data block, code:%d, line:%d", code, lino);
@ -1006,6 +1029,7 @@ _exit:
}
int32_t convertScalarParamToDataBlock(SScalarParam *input, int32_t numOfCols, SSDataBlock *output) {
TAOS_UDF_CHECK_PTR_RCODE(input, output);
int32_t code = 0, lino = 0;
int32_t numOfRows = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
@ -1053,6 +1077,7 @@ _exit:
}
int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) {
TAOS_UDF_CHECK_PTR_RCODE(input, output);
if (taosArrayGetSize(input->pDataBlock) != 1) {
fnError("scalar function only support one column");
return 0;
@ -1131,6 +1156,7 @@ int32_t compareUdfcFuncSub(const void *elem1, const void *elem2) {
}
int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
TAOS_UDF_CHECK_PTR_RCODE(udfName, pHandle);
int32_t code = 0, line = 0;
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
SUdfcFuncStub key = {0};
@ -1189,6 +1215,7 @@ _exit:
}
void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle) {
TAOS_UDF_CHECK_PTR_RVOID(udfName);
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
SUdfcFuncStub key = {0};
tstrncpy(key.udfName, udfName, TSDB_FUNC_NAME_LEN);
@ -1291,6 +1318,7 @@ int32_t cleanUpUdfs() {
}
int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output) {
TAOS_UDF_CHECK_PTR_RCODE(udfName, input, output);
UdfcFuncHandle handle = NULL;
int32_t code = acquireUdfFuncHandle(udfName, &handle);
if (code != 0) {
@ -1320,6 +1348,10 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
}
bool udfAggGetEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv) {
if (pFunc == NULL || pEnv == NULL) {
fnError("udfAggGetEnv: invalid input lint: %d", __LINE__);
return false;
}
if (fmIsScalarFunc(pFunc->funcId)) {
return false;
}
@ -1328,6 +1360,7 @@ bool udfAggGetEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv) {
}
int32_t udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResultCellInfo) {
TAOS_UDF_CHECK_PTR_RCODE(pCtx, pResultCellInfo);
if (pResultCellInfo->initialized) {
return TSDB_CODE_SUCCESS;
}
@ -1369,6 +1402,7 @@ int32_t udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pRes
}
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
TAOS_UDF_CHECK_PTR_RCODE(pCtx);
int32_t udfCode = 0;
UdfcFuncHandle handle = 0;
if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) {
@ -1440,6 +1474,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
}
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock) {
TAOS_UDF_CHECK_PTR_RCODE(pCtx, pBlock);
int32_t udfCode = 0;
UdfcFuncHandle handle = 0;
if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) {
@ -1534,7 +1569,7 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *
}
// TODO: the call buffer is setup and freed by udf invocation
taosMemoryFree(uvTask->rspBuf.base);
taosMemoryFreeClear(uvTask->rspBuf.base);
} else {
code = uvTask->errCode;
if (code != 0) {
@ -1759,7 +1794,7 @@ int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvT
}
if (uv_sem_init(&uvTask->taskSem, 0) != 0) {
if (uvTaskType == UV_TASK_REQ_RSP) {
taosMemoryFree(uvTask->reqBuf.base);
taosMemoryFreeClear(uvTask->reqBuf.base);
}
fnError("udfc create uv task, init semaphore failed.");
return TSDB_CODE_UDF_UV_EXEC_FAILURE;

View File

@ -55,6 +55,7 @@ int32_t udfdCPluginOpen(SScriptUdfEnvItem *items, int numItems) { return 0; }
int32_t udfdCPluginClose() { return 0; }
int32_t udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) {
TAOS_UDF_CHECK_PTR_RCODE(udfCtx, udfName);
char initFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *initSuffix = "_init";
snprintf(initFuncName, sizeof(initFuncName), "%s%s", udfName, initSuffix);
@ -68,6 +69,7 @@ int32_t udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const cha
}
int32_t udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) {
TAOS_UDF_CHECK_PTR_RCODE(udfCtx, udfName);
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
snprintf(processFuncName, sizeof(processFuncName), "%s", udfName);
TAOS_CHECK_RETURN(uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->aggProcFunc)));
@ -93,6 +95,7 @@ int32_t udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfNa
}
int32_t udfdCPluginUdfInit(SScriptUdfInfo *udf, void **pUdfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(udf, pUdfCtx);
int32_t err = 0;
SUdfCPluginCtx *udfCtx = taosMemoryCalloc(1, sizeof(SUdfCPluginCtx));
if (NULL == udfCtx) {
@ -146,6 +149,7 @@ _exit:
}
int32_t udfdCPluginUdfDestroy(void *udfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
int32_t code = 0;
if (ctx->destroyFunc) {
@ -157,6 +161,7 @@ int32_t udfdCPluginUdfDestroy(void *udfCtx) {
}
int32_t udfdCPluginUdfScalarProc(SUdfDataBlock *block, SUdfColumn *resultCol, void *udfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(block, resultCol, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->scalarProcFunc) {
return ctx->scalarProcFunc(block, resultCol);
@ -167,6 +172,7 @@ int32_t udfdCPluginUdfScalarProc(SUdfDataBlock *block, SUdfColumn *resultCol, vo
}
int32_t udfdCPluginUdfAggStart(SUdfInterBuf *buf, void *udfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(buf, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggStartFunc) {
return ctx->aggStartFunc(buf);
@ -178,6 +184,7 @@ int32_t udfdCPluginUdfAggStart(SUdfInterBuf *buf, void *udfCtx) {
}
int32_t udfdCPluginUdfAggProc(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf, void *udfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(block, interBuf, newInterBuf, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggProcFunc) {
return ctx->aggProcFunc(block, interBuf, newInterBuf);
@ -189,6 +196,7 @@ int32_t udfdCPluginUdfAggProc(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdf
int32_t udfdCPluginUdfAggMerge(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf,
void *udfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(inputBuf1, inputBuf2, outputBuf, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggMergeFunc) {
return ctx->aggMergeFunc(inputBuf1, inputBuf2, outputBuf);
@ -199,6 +207,7 @@ int32_t udfdCPluginUdfAggMerge(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2,
}
int32_t udfdCPluginUdfAggFinish(SUdfInterBuf *buf, SUdfInterBuf *resultData, void *udfCtx) {
TAOS_UDF_CHECK_PTR_RCODE(buf, resultData, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggFinishFunc) {
return ctx->aggFinishFunc(buf, resultData);
@ -360,6 +369,7 @@ int32_t udfdNewUdf(SUdf **pUdf, const char *udfName);
void udfdGetFuncBodyPath(const SUdf *udf, char *path);
int32_t udfdInitializeCPlugin(SUdfScriptPlugin *plugin) {
TAOS_UDF_CHECK_PTR_RCODE(plugin);
plugin->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
plugin->openFunc = udfdCPluginOpen;
plugin->closeFunc = udfdCPluginClose;
@ -378,6 +388,7 @@ int32_t udfdInitializeCPlugin(SUdfScriptPlugin *plugin) {
}
int32_t udfdLoadSharedLib(char *libPath, uv_lib_t *pLib, const char *funcName[], void **func[], int numOfFuncs) {
TAOS_UDF_CHECK_PTR_RCODE(libPath, pLib, funcName, func);
int err = uv_dlopen(libPath, pLib);
if (err != 0) {
fnError("can not load library %s. error: %s", libPath, uv_strerror(err));
@ -394,6 +405,7 @@ int32_t udfdLoadSharedLib(char *libPath, uv_lib_t *pLib, const char *funcName[],
}
int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) {
TAOS_UDF_CHECK_PTR_RCODE(plugin);
plugin->scriptType = TSDB_FUNC_SCRIPT_PYTHON;
// todo: windows support
snprintf(plugin->libPath, PATH_MAX, "%s", "libtaospyudf.so");
@ -439,6 +451,7 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) {
}
void udfdDeinitCPlugin(SUdfScriptPlugin *plugin) {
TAOS_UDF_CHECK_PTR_RVOID(plugin);
if (plugin->closeFunc) {
if (plugin->closeFunc() != 0) {
fnError("udf script c plugin close func failed.line:%d", __LINE__);
@ -457,8 +470,9 @@ void udfdDeinitCPlugin(SUdfScriptPlugin *plugin) {
}
void udfdDeinitPythonPlugin(SUdfScriptPlugin *plugin) {
TAOS_UDF_CHECK_PTR_RVOID(plugin);
if (plugin->closeFunc) {
if(plugin->closeFunc() != 0) {
if (plugin->closeFunc() != 0) {
fnError("udf script python plugin close func failed.line:%d", __LINE__);
}
}
@ -517,22 +531,29 @@ void udfdDeinitScriptPlugins() {
if (plugin != NULL) {
udfdDeinitPythonPlugin(plugin);
taosMemoryFree(plugin);
global.scriptPlugins[TSDB_FUNC_SCRIPT_PYTHON] = NULL;
}
plugin = global.scriptPlugins[TSDB_FUNC_SCRIPT_BIN_LIB];
if (plugin != NULL) {
udfdDeinitCPlugin(plugin);
taosMemoryFree(plugin);
global.scriptPlugins[TSDB_FUNC_SCRIPT_BIN_LIB] = NULL;
}
return;
}
void udfdProcessRequest(uv_work_t *req) {
TAOS_UDF_CHECK_PTR_RVOID(req);
SUvUdfWork *uvUdf = (SUvUdfWork *)(req->data);
if (uvUdf == NULL) {
fnError("udf work is NULL");
return;
}
SUdfRequest request = {0};
if(decodeUdfRequest(uvUdf->input.base, &request) == NULL)
{
taosMemoryFree(uvUdf->input.base);
taosMemoryFreeClear(uvUdf->input.base);
fnError("udf request decode failed");
return;
}
@ -557,7 +578,7 @@ void udfdProcessRequest(uv_work_t *req) {
}
}
void convertUdf2UdfInfo(SUdf *udf, SScriptUdfInfo *udfInfo) {
static void convertUdf2UdfInfo(SUdf *udf, SScriptUdfInfo *udfInfo) {
udfInfo->bufSize = udf->bufSize;
if (udf->funcType == TSDB_FUNC_TYPE_AGGREGATE) {
udfInfo->funcType = UDF_FUNC_TYPE_AGG;
@ -573,7 +594,8 @@ void convertUdf2UdfInfo(SUdf *udf, SScriptUdfInfo *udfInfo) {
udfInfo->scriptType = udf->scriptType;
}
int32_t udfdInitUdf(char *udfName, SUdf *udf) {
static int32_t udfdInitUdf(char *udfName, SUdf *udf) {
TAOS_UDF_CHECK_PTR_RCODE(udfName, udf);
int32_t err = 0;
err = udfdFillUdfInfoFromMNode(global.clientRpc, udfName, udf);
if (err != 0) {
@ -611,6 +633,7 @@ int32_t udfdInitUdf(char *udfName, SUdf *udf) {
}
int32_t udfdNewUdf(SUdf **pUdf, const char *udfName) {
TAOS_UDF_CHECK_PTR_RCODE(pUdf, udfName);
SUdf *udfNew = taosMemoryCalloc(1, sizeof(SUdf));
if (NULL == udfNew) {
return terrno;
@ -654,6 +677,7 @@ void udfdFreeUdf(void *pData) {
}
int32_t udfdGetOrCreateUdf(SUdf **ppUdf, const char *udfName) {
TAOS_UDF_CHECK_PTR_RCODE(ppUdf, udfName);
uv_mutex_lock(&global.udfsMutex);
SUdf **pUdfHash = taosHashGet(global.udfsHash, udfName, strlen(udfName));
int64_t currTime = taosGetTimestampMs();
@ -693,6 +717,7 @@ int32_t udfdGetOrCreateUdf(SUdf **ppUdf, const char *udfName) {
}
void udfdProcessSetupRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
TAOS_UDF_CHECK_PTR_RVOID(uvUdf, request);
// TODO: tracable id from client. connect, setup, call, teardown
fnInfo("setup request. seq num: %" PRId64 ", udf name: %s", request->seqNum, request->setup.udfName);
@ -760,11 +785,55 @@ _send:
uvUdf->output = uv_buf_init(bufBegin, len);
taosMemoryFree(uvUdf->input.base);
taosMemoryFreeClear(uvUdf->input.base);
return;
}
static int32_t checkUDFScalaResult(SSDataBlock *block, SUdfColumn *output) {
if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
return TSDB_CODE_SUCCESS;
}
if (output->colData.numOfRows != block->info.rows) {
fnError("udf scala result num of rows %d not equal to input rows %" PRId64, output->colData.numOfRows, block->info.rows);
return TSDB_CODE_UDF_FUNC_EXEC_FAILURE;
}
if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_BYROW) {
for (int32_t i = 0; i < output->colData.numOfRows; ++i) {
if (!udfColDataIsNull(output, i)) {
if (IS_VAR_DATA_TYPE(output->colMeta.type)) {
TAOS_UDF_CHECK_CONDITION(output->colData.varLenCol.payload != NULL, TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
TAOS_UDF_CHECK_CONDITION(output->colData.varLenCol.varOffsets[i] >= 0 &&
output->colData.varLenCol.varOffsets[i] < output->colData.varLenCol.payloadLen,
TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
} else {
TAOS_UDF_CHECK_CONDITION(
output->colMeta.bytes * output->colData.numOfRows <= output->colData.fixLenCol.dataLen,
TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
break;
}
}
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t checkUDFAggResult(SSDataBlock *block, SUdfInterBuf *output) {
if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
return TSDB_CODE_SUCCESS;
}
if (output->numOfResult != 1 && output->numOfResult != 0) {
fnError("udf agg result num of rows %d not equal to 1", output->numOfResult);
return TSDB_CODE_UDF_FUNC_EXEC_FAILURE;
}
TAOS_UDF_CHECK_CONDITION(output->buf != NULL, TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
TAOS_UDF_CHECK_CONDITION(output->bufLen > 0, TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
return TSDB_CODE_SUCCESS;
}
void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
TAOS_UDF_CHECK_PTR_RVOID(uvUdf, request);
SUdfCallRequest *call = &request->call;
fnDebug("call request. call type %d, handle: %" PRIx64 ", seq num %" PRId64, call->callType, call->udfHandle,
request->seqNum);
@ -787,6 +856,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
code = convertDataBlockToUdfDataBlock(&call->block, &input);
if (code == TSDB_CODE_SUCCESS) code = udf->scriptPlugin->udfScalarProcFunc(&input, &output, udf->scriptUdfCtx);
freeUdfDataDataBlock(&input);
if (code == TSDB_CODE_SUCCESS) code = checkUDFScalaResult(&call->block, &output);
if (code == TSDB_CODE_SUCCESS) code = convertUdfColumnToDataBlock(&output, &response.callRsp.resultData);
}
freeUdfColumn(&output);
@ -809,6 +879,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
if (outBuf.buf != NULL) {
code = udf->scriptPlugin->udfAggProcFunc(&input, &call->interBuf, &outBuf, udf->scriptUdfCtx);
freeUdfInterBuf(&call->interBuf);
if (code == TSDB_CODE_SUCCESS) code = checkUDFAggResult(&call->block, &outBuf);
subRsp->resultBuf = outBuf;
} else {
code = terrno;
@ -900,11 +971,12 @@ _exit:
break;
}
taosMemoryFree(uvUdf->input.base);
taosMemoryFreeClear(uvUdf->input.base);
return;
}
void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
TAOS_UDF_CHECK_PTR_RVOID(uvUdf, request);
SUdfTeardownRequest *teardown = &request->teardown;
fnInfo("teardown. seq number: %" PRId64 ", handle:%" PRIx64, request->seqNum, teardown->udfHandle);
SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(teardown->udfHandle);
@ -964,6 +1036,7 @@ _send:
}
void udfdGetFuncBodyPath(const SUdf *udf, char *path) {
TAOS_UDF_CHECK_PTR_RVOID(udf, path);
if (udf->scriptType == TSDB_FUNC_SCRIPT_BIN_LIB) {
#ifdef WINDOWS
snprintf(path, PATH_MAX, "%s%s_%d_%" PRIx64 ".dll", global.udfDataDir, udf->name, udf->version, udf->createdTime);
@ -987,6 +1060,7 @@ void udfdGetFuncBodyPath(const SUdf *udf, char *path) {
}
int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) {
TAOS_UDF_CHECK_PTR_RCODE(pFuncInfo, udf);
if (!osDataSpaceAvailable()) {
terrno = TSDB_CODE_NO_DISKSPACE;
fnError("udfd create shared library failed since %s", terrstr());
@ -1022,6 +1096,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) {
}
void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
TAOS_UDF_CHECK_PTR_RVOID(parent, pMsg);
SUdfdRpcSendRecvInfo *msgInfo = (SUdfdRpcSendRecvInfo *)pMsg->info.ahandle;
if (pEpSet) {
@ -1093,6 +1168,7 @@ _return:
}
int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) {
TAOS_UDF_CHECK_PTR_RCODE(clientRpc, udfName, udf);
SRetrieveFuncReq retrieveReq = {0};
retrieveReq.numOfFuncs = 1;
retrieveReq.pFuncNames = taosArrayInit(1, TSDB_FUNC_NAME_LEN);
@ -1233,6 +1309,7 @@ void udfdCloseClientRpc() {
}
void udfdOnWrite(uv_write_t *req, int status) {
TAOS_UDF_CHECK_PTR_RVOID(req);
SUvUdfWork *work = (SUvUdfWork *)req->data;
if (status < 0) {
fnError("udfd send response error, length: %zu code: %s", work->output.len, uv_err_name(status));
@ -1254,6 +1331,7 @@ void udfdOnWrite(uv_write_t *req, int status) {
}
void udfdSendResponse(uv_work_t *work, int status) {
TAOS_UDF_CHECK_PTR_RVOID(work);
SUvUdfWork *udfWork = (SUvUdfWork *)(work->data);
if (udfWork->conn != NULL) {
@ -1274,6 +1352,7 @@ void udfdSendResponse(uv_work_t *work, int status) {
}
void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) {
TAOS_UDF_CHECK_PTR_RVOID(handle, buf);
SUdfdUvConn *ctx = handle->data;
int32_t msgHeadSize = sizeof(int32_t) + sizeof(int64_t);
if (ctx->inputCap == 0) {
@ -1307,6 +1386,10 @@ void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) {
}
bool isUdfdUvMsgComplete(SUdfdUvConn *pipe) {
if (pipe == NULL) {
fnError("udfd pipe is NULL, LINE:%d", __LINE__);
return false;
}
if (pipe->inputTotal == -1 && pipe->inputLen >= sizeof(int32_t)) {
pipe->inputTotal = *(int32_t *)(pipe->inputBuf);
}
@ -1318,6 +1401,7 @@ bool isUdfdUvMsgComplete(SUdfdUvConn *pipe) {
}
void udfdHandleRequest(SUdfdUvConn *conn) {
TAOS_UDF_CHECK_PTR_RVOID(conn);
char *inputBuf = conn->inputBuf;
int32_t inputLen = conn->inputLen;
@ -1350,6 +1434,7 @@ void udfdHandleRequest(SUdfdUvConn *conn) {
}
void udfdPipeCloseCb(uv_handle_t *pipe) {
TAOS_UDF_CHECK_PTR_RVOID(pipe);
SUdfdUvConn *conn = pipe->data;
SUvUdfWork *pWork = conn->pWorkList;
while (pWork != NULL) {
@ -1363,6 +1448,7 @@ void udfdPipeCloseCb(uv_handle_t *pipe) {
}
void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
TAOS_UDF_CHECK_PTR_RVOID(client, buf);
fnDebug("udfd read %zd bytes from client", nread);
if (nread == 0) return;
@ -1389,6 +1475,7 @@ void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
}
void udfdOnNewConnection(uv_stream_t *server, int status) {
TAOS_UDF_CHECK_PTR_RVOID(server);
if (status < 0) {
fnError("udfd new connection error. code: %s", uv_strerror(status));
return;
@ -1434,6 +1521,7 @@ _exit:
}
void udfdIntrSignalHandler(uv_signal_t *handle, int signum) {
TAOS_UDF_CHECK_PTR_RVOID(handle);
fnInfo("udfd signal received: %d\n", signum);
uv_fs_t req;
int32_t code = uv_fs_unlink(global.loop, &req, global.listenPipeName, NULL);
@ -1482,6 +1570,7 @@ static int32_t udfdInitLog() {
}
void udfdCtrlAllocBufCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
TAOS_UDF_CHECK_PTR_RVOID(buf);
buf->base = taosMemoryMalloc(suggested_size);
if (buf->base == NULL) {
fnError("udfd ctrl pipe alloc buffer failed");
@ -1491,6 +1580,7 @@ void udfdCtrlAllocBufCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *bu
}
void udfdCtrlReadCb(uv_stream_t *q, ssize_t nread, const uv_buf_t *buf) {
TAOS_UDF_CHECK_PTR_RVOID(q, buf);
if (nread < 0) {
fnError("udfd ctrl pipe read error. %s", uv_err_name(nread));
taosMemoryFree(buf->base);
@ -1507,7 +1597,7 @@ static void removeListeningPipe() {
int err = uv_fs_unlink(global.loop, &req, global.listenPipeName, NULL);
uv_fs_req_cleanup(&req);
if(err) {
fnError("remove listening pipe %s failed, reason:%s, lino:%d", global.listenPipeName, uv_strerror(err), __LINE__);
fnInfo("remove listening pipe %s : %s, lino:%d", global.listenPipeName, uv_strerror(err), __LINE__);
}
}
@ -1580,7 +1670,7 @@ static void udfdGlobalDataDeinit() {
taosHashCleanup(global.udfsHash);
uv_mutex_destroy(&global.udfsMutex);
uv_mutex_destroy(&global.scriptPluginsMutex);
taosMemoryFree(global.loop);
taosMemoryFreeClear(global.loop);
fnInfo("udfd global data deinit");
}

View File

@ -851,6 +851,7 @@ static int32_t slotDescCopy(const SSlotDescNode* pSrc, SSlotDescNode* pDst) {
static int32_t downstreamSourceCopy(const SDownstreamSourceNode* pSrc, SDownstreamSourceNode* pDst) {
COPY_OBJECT_FIELD(addr, sizeof(SQueryNodeAddr));
COPY_SCALAR_FIELD(clientId);
COPY_SCALAR_FIELD(taskId);
COPY_SCALAR_FIELD(schedId);
COPY_SCALAR_FIELD(execId);

View File

@ -5259,6 +5259,7 @@ static int32_t jsonToColumnDefNode(const SJson* pJson, void* pObj) {
}
static const char* jkDownstreamSourceAddr = "Addr";
static const char* jkDownstreamSourceClientId = "ClientId";
static const char* jkDownstreamSourceTaskId = "TaskId";
static const char* jkDownstreamSourceSchedId = "SchedId";
static const char* jkDownstreamSourceExecId = "ExecId";
@ -5268,6 +5269,9 @@ static int32_t downstreamSourceNodeToJson(const void* pObj, SJson* pJson) {
const SDownstreamSourceNode* pNode = (const SDownstreamSourceNode*)pObj;
int32_t code = tjsonAddObject(pJson, jkDownstreamSourceAddr, queryNodeAddrToJson, &pNode->addr);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDownstreamSourceClientId, pNode->clientId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDownstreamSourceTaskId, pNode->taskId);
}
@ -5288,6 +5292,9 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) {
SDownstreamSourceNode* pNode = (SDownstreamSourceNode*)pObj;
int32_t code = tjsonToObject(pJson, jkDownstreamSourceAddr, jsonToQueryNodeAddr, &pNode->addr);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetUBigIntValue(pJson, jkDownstreamSourceClientId, &pNode->clientId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetUBigIntValue(pJson, jkDownstreamSourceTaskId, &pNode->taskId);
}

View File

@ -1769,6 +1769,9 @@ static int32_t downstreamSourceNodeInlineToMsg(const void* pObj, STlvEncoder* pE
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeValueI32(pEncoder, pNode->fetchMsgType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeValueU64(pEncoder, pNode->clientId);
}
return code;
}
@ -1793,6 +1796,9 @@ static int32_t msgToDownstreamSourceNodeInlineToMsg(STlvDecoder* pDecoder, void*
if (TSDB_CODE_SUCCESS == code) {
code = tlvDecodeValueI32(pDecoder, &pNode->fetchMsgType);
}
if (TSDB_CODE_SUCCESS == code && !tlvDecodeEnd(pDecoder)) {
code = tlvDecodeValueU64(pDecoder, &pNode->clientId);
}
return code;
}

View File

@ -246,7 +246,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, E
return code;
}
static int parseTimestampOrInterval(const char** end, SToken* pToken, int16_t timePrec, int64_t* ts, int64_t* interval,
static int32_t parseTimestampOrInterval(const char** end, SToken* pToken, int16_t timePrec, int64_t* ts, int64_t* interval,
SMsgBuf* pMsgBuf, bool* isTs) {
if (pToken->type == TK_NOW) {
*isTs = true;

View File

@ -3311,11 +3311,11 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType)
} else {
resultType = gDisplyTypes[type2][type1];
}
if (resultType == -1) {
return TSDB_CODE_SCALAR_CONVERT_ERROR;
}
if (commonType->type == newType->type) {
commonType->bytes = TMAX(commonType->bytes, newType->bytes);
return TSDB_CODE_SUCCESS;
@ -3328,9 +3328,9 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType)
} else {
commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]);
}
commonType->type = resultType;
return TSDB_CODE_SUCCESS;
}
@ -13127,7 +13127,7 @@ static int32_t extractShowCreateViewResultSchema(int32_t* numOfCols, SSchema** p
}
static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) {
*numOfCols = 3;
*numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS; // SHOW_VARIABLES_RESULT_COLS
*pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema));
if (NULL == (*pSchema)) {
return terrno;
@ -13138,13 +13138,17 @@ static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pS
strcpy((*pSchema)[0].name, "name");
(*pSchema)[1].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[1].bytes = TSDB_CONFIG_VALUE_LEN;
(*pSchema)[1].bytes = TSDB_CONFIG_PATH_LEN;
strcpy((*pSchema)[1].name, "value");
(*pSchema)[2].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[2].bytes = TSDB_CONFIG_SCOPE_LEN;
strcpy((*pSchema)[2].name, "scope");
(*pSchema)[3].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[3].bytes = TSDB_CONFIG_INFO_LEN;
strcpy((*pSchema)[3].name, "info");
return TSDB_CODE_SUCCESS;
}

View File

@ -3491,37 +3491,77 @@ static void eliminateProjPushdownProjIdx(SNodeList* pParentProjects, SNodeList*
}
}
static int32_t eliminateProjOptFindProjPrefixWithOrderCheck(SProjectLogicNode* pProj, SProjectLogicNode* pChild, SNodeList** pNewChildTargets, bool *orderMatch) {
int32_t code = 0;
SNode* pProjection = NULL, *pChildTarget = NULL;
*orderMatch = true;
FORBOTH(pProjection, pProj->pProjections, pChildTarget, pChild->node.pTargets) {
if (!pProjection) break;
if (0 != strcmp(((SColumnNode*)pProjection)->colName, ((SColumnNode*)pChildTarget)->colName)) {
*orderMatch = false;
break;
}
if (pNewChildTargets) {
SNode* pNew = NULL;
code = nodesCloneNode(pChildTarget, &pNew);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(pNewChildTargets, pNew);
}
if (TSDB_CODE_SUCCESS != code && pNewChildTargets) {
nodesDestroyList(*pNewChildTargets);
*pNewChildTargets = NULL;
break;
}
}
}
return code;
}
static int32_t eliminateProjOptPushTargetsToSetOpChildren(SProjectLogicNode* pSetOp) {
SNode* pChildProj = NULL;
int32_t code = 0;
bool orderMatch = false;
FOREACH(pChildProj, pSetOp->node.pChildren) {
if (QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pChildProj)) {
SProjectLogicNode* pChildLogic = (SProjectLogicNode*)pChildProj;
SNodeList* pNewChildTargetsForChild = NULL;
code = eliminateProjOptFindProjPrefixWithOrderCheck(pSetOp, pChildLogic, &pNewChildTargetsForChild, &orderMatch);
if (TSDB_CODE_SUCCESS != code) break;
nodesDestroyList(pChildLogic->node.pTargets);
pChildLogic->node.pTargets = pNewChildTargetsForChild;
alignProjectionWithTarget((SLogicNode*)pChildLogic);
if (pChildLogic->isSetOpProj) {
code = eliminateProjOptPushTargetsToSetOpChildren(pChildLogic);
if (TSDB_CODE_SUCCESS != code) break;
}
}
}
return code;
}
static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan,
SProjectLogicNode* pProjectNode) {
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProjectNode->node.pChildren, 0);
int32_t code = 0;
bool isSetOpProj = false;
bool orderMatch = false;
bool sizeMatch = LIST_LENGTH(pProjectNode->pProjections) == LIST_LENGTH(pChild->pTargets);
bool needReplaceTargets = true;
if (NULL == pProjectNode->node.pParent) {
SNodeList* pNewChildTargets = NULL;
code = nodesMakeList(&pNewChildTargets);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
SNode * pProjection = NULL, *pChildTarget = NULL;
bool orderMatch = true;
bool needOrderMatch =
isSetOpProj =
QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pChild) && ((SProjectLogicNode*)pChild)->isSetOpProj;
if (needOrderMatch) {
if (isSetOpProj) {
// For sql: select ... from (select ... union all select ...);
// When eliminating the outer proj (the outer select), we have to make sure that the outer proj projections and
// union all project targets have same columns in the same order. See detail in TD-30188
FORBOTH(pProjection, pProjectNode->pProjections, pChildTarget, pChild->pTargets) {
if (!pProjection) break;
if (0 != strcmp(((SColumnNode*)pProjection)->colName, ((SColumnNode*)pChildTarget)->colName)) {
orderMatch = false;
break;
}
SNode* pNew = NULL;
code = nodesCloneNode(pChildTarget, &pNew);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListStrictAppend(pNewChildTargets, pNew);
}
if (TSDB_CODE_SUCCESS != code) break;
code = eliminateProjOptFindProjPrefixWithOrderCheck(pProjectNode, (SProjectLogicNode*)pChild,
sizeMatch ? NULL : &pNewChildTargets, &orderMatch);
if (TSDB_CODE_SUCCESS == code && sizeMatch && orderMatch) {
pNewChildTargets = pChild->pTargets;
needReplaceTargets = false;
}
} else {
FOREACH(pProjection, pProjectNode->pProjections) {
@ -3530,7 +3570,7 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan*
SNode* pNew = NULL;
code = nodesCloneNode(pChildTarget, &pNew);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListStrictAppend(pNewChildTargets, pNew);
code = nodesListMakeStrictAppend(&pNewChildTargets, pNew);
}
break;
}
@ -3545,12 +3585,13 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan*
return code;
}
if (eliminateProjOptCanChildConditionUseChildTargets(pChild, pNewChildTargets) &&
(!needOrderMatch || (needOrderMatch && orderMatch))) {
nodesDestroyList(pChild->pTargets);
pChild->pTargets = pNewChildTargets;
if (eliminateProjOptCanChildConditionUseChildTargets(pChild, pNewChildTargets) && (!isSetOpProj || orderMatch)) {
if (needReplaceTargets) {
nodesDestroyList(pChild->pTargets);
pChild->pTargets = pNewChildTargets;
}
} else {
nodesDestroyList(pNewChildTargets);
if (needReplaceTargets) nodesDestroyList(pNewChildTargets);
OPTIMIZE_FLAG_SET_MASK(pProjectNode->node.optimizedFlag, OPTIMIZE_FLAG_ELIMINATE_PROJ);
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
@ -3573,7 +3614,11 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan*
NODES_CLEAR_LIST(pProjectNode->node.pChildren);
nodesDestroyNode((SNode*)pProjectNode);
// if pChild is a project logic node, remove its projection which is not reference by its target.
alignProjectionWithTarget(pChild);
if (needReplaceTargets) {
alignProjectionWithTarget(pChild);
// Since we have eliminated the outer proj, we need to push down the new targets to the children of the set operation.
if (isSetOpProj && orderMatch && !sizeMatch) code = eliminateProjOptPushTargetsToSetOpChildren((SProjectLogicNode*)pChild);
}
}
pCxt->optimized = true;
return code;

View File

@ -23,7 +23,13 @@ extern "C" {
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
#define QUERY_PARAM_CHECK(_p) \
do { \
if ((_p) == NULL) { \
qError("function:%s, param invalid, line:%d", __FUNCTION__, __LINE__); \
return TSDB_CODE_TSC_INVALID_INPUT; \
} \
} while (0)
#ifdef __cplusplus
}

View File

@ -59,6 +59,9 @@ const SSchema* tGetTbnameColumnSchema() {
}
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
if (!pSchema) {
return false;
}
int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
@ -100,7 +103,7 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
}
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
if (!pSchema || !VALIDNUMOFCOLS(numOfCols)) {
return false;
}
@ -127,6 +130,7 @@ bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTag
static STaskQueue taskQueue = {0};
static void processTaskQueue(SQueueInfo *pInfo, SSchedMsg *pSchedMsg) {
if(!pSchedMsg || !pSchedMsg->ahandle) return;
__async_exec_fn_t execFn = (__async_exec_fn_t)pSchedMsg->ahandle;
(void)execFn(pSchedMsg->thandle);
taosFreeQitem(pSchedMsg);
@ -205,7 +209,11 @@ void destroyAhandle(void *ahandle) {
}
int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo,
bool persistHandle, void* rpcCtx) {
bool persistHandle, void* rpcCtx) {
QUERY_PARAM_CHECK(pTransporter);
QUERY_PARAM_CHECK(epSet);
QUERY_PARAM_CHECK(pInfo);
char* pMsg = rpcMallocCont(pInfo->msgInfo.len);
if (NULL == pMsg) {
qError("0x%" PRIx64 " msg:%s malloc failed", pInfo->requestId, TMSG_INFO(pInfo->msgType));
@ -236,6 +244,7 @@ int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransp
return asyncSendMsgToServerExt(pTransporter, epSet, pTransporterId, pInfo, false, NULL);
}
int32_t asyncFreeConnById(void* pTransporter, int64_t pid) {
QUERY_PARAM_CHECK(pTransporter);
return rpcFreeConnById(pTransporter, pid);
}
@ -314,6 +323,8 @@ void destroyQueryExecRes(SExecResult* pRes) {
}
// clang-format on
int32_t dataConverToStr(char* str, int64_t capacity, int type, void* buf, int32_t bufSize, int32_t* len) {
QUERY_PARAM_CHECK(str);
QUERY_PARAM_CHECK(buf);
int32_t n = 0;
switch (type) {
@ -420,6 +431,10 @@ int32_t dataConverToStr(char* str, int64_t capacity, int type, void* buf, int32_
}
void parseTagDatatoJson(void* p, char** jsonStr) {
if (!p || !jsonStr) {
qError("parseTagDatatoJson invalid input, line:%d", __LINE__);
return;
}
char* string = NULL;
SArray* pTagVals = NULL;
cJSON* json = NULL;
@ -520,6 +535,7 @@ end:
}
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
QUERY_PARAM_CHECK(pDst);
if (NULL == pSrc) {
*pDst = NULL;
return TSDB_CODE_SUCCESS;
@ -553,6 +569,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
}
void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType) {
if(!pMeta || !pName || !pType) return;
int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
for (int32_t i = 0; i < nums; ++i) {
if (0 == strcmp(pName, pMeta->schema[i].name)) {
@ -576,6 +593,7 @@ void freeVgInfo(SDBVgInfo* vgInfo) {
}
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) {
QUERY_PARAM_CHECK(pDst);
if (NULL == pSrc) {
*pDst = NULL;
return TSDB_CODE_SUCCESS;
@ -617,6 +635,7 @@ int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) {
}
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst) {
QUERY_PARAM_CHECK(pDst);
if (NULL == pSrc) {
*pDst = NULL;
return TSDB_CODE_SUCCESS;
@ -674,6 +693,7 @@ int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst) {
_exit:
tdDestroySVCreateTbReq(*pDst);
taosMemoryFree(*pDst);
*pDst = NULL;
return terrno;
}

View File

@ -29,6 +29,8 @@ int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int
int32_t (*queryProcessMsgRsp[TDMT_MAX])(void *output, char *msg, int32_t msgSize) = {0};
int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
QUERY_PARAM_CHECK(pOut);
QUERY_PARAM_CHECK(usedbRsp);
memcpy(pOut->db, usedbRsp->db, TSDB_DB_FNAME_LEN);
pOut->dbId = usedbRsp->uid;
@ -71,10 +73,10 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SBuildTableInput *pInput = input;
if (NULL == input || NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
STableInfoReq infoReq = {0};
infoReq.option = pInput->option;
@ -101,10 +103,10 @@ int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int3
}
int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SBuildUseDBInput *pInput = input;
if (NULL == pInput || NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
SUseDbReq usedbReq = {0};
tstrncpy(usedbReq.db, pInput->db, TSDB_DB_FNAME_LEN);
@ -131,9 +133,8 @@ int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *ms
}
int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SQnodeListReq qnodeListReq = {0};
qnodeListReq.rowNum = -1;
@ -155,9 +156,8 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SDnodeListReq dnodeListReq = {0};
dnodeListReq.rowNum = -1;
@ -179,9 +179,8 @@ int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetSerVerMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SServerVerReq req = {0};
@ -202,9 +201,9 @@ int32_t queryBuildGetSerVerMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SDbCfgReq dbCfgReq = {0};
tstrncpy(dbCfgReq.db, input, TSDB_DB_FNAME_LEN);
@ -226,9 +225,9 @@ int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SUserIndexReq indexReq = {0};
tstrncpy(indexReq.indexFName, input, TSDB_INDEX_FNAME_LEN);
@ -251,9 +250,9 @@ int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t
int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SRetrieveFuncReq funcReq = {0};
funcReq.numOfFuncs = 1;
@ -288,9 +287,9 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3
}
int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SGetUserAuthReq req = {0};
tstrncpy(req.user, input, TSDB_USER_LEN);
@ -311,9 +310,9 @@ int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32
}
int32_t queryBuildGetTbIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
STableIndexReq indexReq = {0};
tstrncpy(indexReq.tbFName, input, TSDB_TABLE_FNAME_LEN);
@ -335,9 +334,9 @@ int32_t queryBuildGetTbIndexMsg(void *input, char **msg, int32_t msgSize, int32_
}
int32_t queryBuildGetTbCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SBuildTableInput *pInput = input;
STableCfgReq cfgReq = {0};
@ -362,9 +361,9 @@ int32_t queryBuildGetTbCfgMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetViewMetaMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
SViewMetaReq req = {0};
tstrncpy(req.fullname, input, TSDB_VIEW_FNAME_LEN);
@ -387,9 +386,9 @@ int32_t queryBuildGetViewMetaMsg(void *input, char **msg, int32_t msgSize, int32
int32_t queryBuildGetTableTSMAMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
STableTSMAInfoReq req = {0};
tstrncpy(req.name, input, TSDB_TABLE_FNAME_LEN);
@ -411,9 +410,9 @@ int32_t queryBuildGetTableTSMAMsg(void *input, char **msg, int32_t msgSize, int3
int32_t queryBuildGetTSMAMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
STableTSMAInfoReq req = {0};
req.fetchingWithTsmaName = true;
@ -436,9 +435,9 @@ int32_t queryBuildGetTSMAMsg(void *input, char **msg, int32_t msgSize, int32_t *
}
int32_t queryBuildGetStreamProgressMsg(void* input, char** msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int64_t)) {
if (!msg || !msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
QUERY_PARAM_CHECK(input);
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(msgLen);
int32_t len = tSerializeStreamProgressReq(NULL, 0, input);
void* pBuf = (*mallcFp)(len);
@ -504,6 +503,7 @@ PROCESS_USEDB_OVER:
}
static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
QUERY_PARAM_CHECK(pMetaMsg);
if (pMetaMsg->numOfTags < 0 || pMetaMsg->numOfTags > TSDB_MAX_TAGS) {
qError("invalid numOfTags[%d] in table meta rsp msg", pMetaMsg->numOfTags);
return TSDB_CODE_TSC_INVALID_VALUE;
@ -539,6 +539,8 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
}
int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(pMeta);
pMeta->vgId = msg->vgId;
pMeta->tableType = msg->tableType;
pMeta->uid = msg->tuid;
@ -551,6 +553,8 @@ int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
}
int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(pMeta);
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
int32_t schemaExtSize = (useCompress(msg->tableType) && msg->pSchemaExt) ? sizeof(SSchemaExt) * msg->numOfColumns : 0;
@ -605,6 +609,8 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta *
}
int32_t queryCreateTableMetaExFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
QUERY_PARAM_CHECK(msg);
QUERY_PARAM_CHECK(pMeta);
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
int32_t schemaExtSize = (useCompress(msg->tableType) && msg->pSchemaExt) ? sizeof(SSchemaExt) * msg->numOfColumns : 0;

View File

@ -215,8 +215,8 @@ typedef struct SQWorkerMgmt {
#define QW_CTX_NOT_EXISTS_ERR_CODE(mgmt) \
(atomic_load_8(&(mgmt)->nodeStopped) ? TSDB_CODE_VND_STOPPED : TSDB_CODE_QRY_TASK_CTX_NOT_EXIST)
#define QW_FPARAMS_DEF SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId
#define QW_IDS() sId, qId, tId, rId, eId
#define QW_FPARAMS_DEF SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId, int32_t eId
#define QW_IDS() sId, qId, cId, tId, rId, eId
#define QW_FPARAMS() mgmt, QW_IDS()
#define QW_STAT_INC(_item, _n) (void)atomic_add_fetch_64(&(_item), _n)
@ -257,18 +257,20 @@ typedef struct SQWorkerMgmt {
#define QW_FETCH_RUNNING(ctx) ((ctx)->inFetch)
#define QW_QUERY_NOT_STARTED(ctx) (QW_GET_PHASE(ctx) == -1)
#define QW_SET_QTID(id, qId, tId, eId) \
do { \
*(uint64_t *)(id) = (qId); \
*(uint64_t *)((char *)(id) + sizeof(qId)) = (tId); \
*(int32_t *)((char *)(id) + sizeof(qId) + sizeof(tId)) = (eId); \
#define QW_SET_QTID(id, qId, cId, tId, eId) \
do { \
*(uint64_t *)(id) = (qId); \
*(uint64_t *)((char *)(id) + sizeof(qId)) = (cId); \
*(uint64_t *)((char *)(id) + sizeof(qId) + sizeof(cId)) = (tId); \
*(int32_t *)((char *)(id) + sizeof(qId) + sizeof(cId) + sizeof(tId)) = (eId); \
} while (0)
#define QW_GET_QTID(id, qId, tId, eId) \
do { \
(qId) = *(uint64_t *)(id); \
(tId) = *(uint64_t *)((char *)(id) + sizeof(qId)); \
(eId) = *(int32_t *)((char *)(id) + sizeof(qId) + sizeof(tId)); \
#define QW_GET_QTID(id, qId, cId, tId, eId) \
do { \
(qId) = *(uint64_t *)(id); \
(cId) = *(uint64_t *)((char *)(id) + sizeof(qId)); \
(tId) = *(uint64_t *)((char *)(id) + sizeof(qId) + sizeof(cId)); \
(eId) = *(int32_t *)((char *)(id) + sizeof(qId) + sizeof(cId) + sizeof(tId)); \
} while (0)
#define QW_ERR_RET(c) \
@ -310,25 +312,31 @@ typedef struct SQWorkerMgmt {
#define QW_SCH_ELOG(param, ...) qError("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__)
#define QW_SCH_DLOG(param, ...) qDebug("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__)
#define QW_TASK_ELOG(param, ...) qError("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
#define QW_TASK_WLOG(param, ...) qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
#define QW_TASK_DLOG(param, ...) qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
#define QW_TASK_ELOG(param, ...) \
qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
#define QW_TASK_WLOG(param, ...) \
qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
#define QW_TASK_DLOG(param, ...) \
qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
#define QW_TASK_DLOGL(param, ...) \
qDebugL("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
qDebugL("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
#define QW_TASK_ELOG_E(param) qError("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId)
#define QW_TASK_WLOG_E(param) qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId)
#define QW_TASK_DLOG_E(param) qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId)
#define QW_TASK_ELOG_E(param) \
qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
#define QW_TASK_WLOG_E(param) \
qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
#define QW_TASK_DLOG_E(param) \
qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
#define QW_SCH_TASK_ELOG(param, ...) \
qError("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, tId, eId, \
__VA_ARGS__)
#define QW_SCH_TASK_WLOG(param, ...) \
qWarn("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, tId, eId, \
__VA_ARGS__)
#define QW_SCH_TASK_DLOG(param, ...) \
qDebug("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, tId, eId, \
__VA_ARGS__)
#define QW_SCH_TASK_ELOG(param, ...) \
qError("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
qId, cId, tId, eId, __VA_ARGS__)
#define QW_SCH_TASK_WLOG(param, ...) \
qWarn("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \
cId, tId, eId, __VA_ARGS__)
#define QW_SCH_TASK_DLOG(param, ...) \
qDebug("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
qId, cId, tId, eId, __VA_ARGS__)
#define QW_LOCK_DEBUG(...) \
do { \

View File

@ -96,14 +96,14 @@ void qwDbgDumpSchInfo(SQWorker *mgmt, SQWSchStatus *sch, int32_t i) {
int32_t taskNum = taosHashGetSize(sch->tasksHash);
QW_DLOG("***The %dth scheduler status, hbBrokenTs:%" PRId64 ",taskNum:%d", i, sch->hbBrokenTs, taskNum);
uint64_t qId, tId;
uint64_t qId, cId, tId;
int32_t eId;
SQWTaskStatus *pTask = NULL;
void *pIter = taosHashIterate(sch->tasksHash, NULL);
while (pIter) {
pTask = (SQWTaskStatus *)pIter;
void *key = taosHashGetKey(pIter, NULL);
QW_GET_QTID(key, qId, tId, eId);
QW_GET_QTID(key, qId, cId, tId, eId);
QW_TASK_DLOG("job refId:%" PRIx64 ", code:%x, task status:%d", pTask->refId, pTask->code, pTask->status);
@ -118,13 +118,13 @@ void qwDbgDumpTasksInfo(SQWorker *mgmt) {
int32_t i = 0;
SQWTaskCtx *ctx = NULL;
uint64_t qId, tId;
uint64_t qId, cId, tId;
int32_t eId;
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
while (pIter) {
ctx = (SQWTaskCtx *)pIter;
void *key = taosHashGetKey(pIter, NULL);
QW_GET_QTID(key, qId, tId, eId);
QW_GET_QTID(key, qId, cId, tId, eId);
QW_TASK_DLOG("%p lock:%x, phase:%d, type:%d, explain:%d, needFetch:%d, localExec:%d, queryMsgType:%d, "
"sId:%" PRId64 ", level:%d, queryGotData:%d, queryRsped:%d, queryEnd:%d, queryContinue:%d, queryInQueue:%d, "

View File

@ -233,6 +233,7 @@ int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
qMsg.header.contLen = 0;
qMsg.sId = sId;
qMsg.queryId = qId;
qMsg.clientId = cId;
qMsg.taskId = tId;
qMsg.refId = rId;
qMsg.execId = eId;
@ -284,6 +285,7 @@ int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
req->header.vgId = mgmt->nodeId;
req->sId = sId;
req->queryId = qId;
req->clientId = cId;
req->taskId = tId;
req->execId = eId;
@ -312,6 +314,7 @@ int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
qMsg.header.contLen = 0;
qMsg.sId = sId;
qMsg.queryId = qId;
qMsg.clientId = cId;
qMsg.taskId = tId;
qMsg.refId = rId;
qMsg.execId = eId;
@ -416,6 +419,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@ -447,6 +451,7 @@ int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg) {
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@ -479,6 +484,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@ -524,6 +530,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
uint64_t sId = msg->sId;
uint64_t qId = msg->queryId;
uint64_t cId = msg->clientId;
uint64_t tId = msg->taskId;
int64_t rId = 0;
int32_t eId = msg->execId;
@ -557,6 +564,7 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
uint64_t sId = req.sId;
uint64_t qId = req.queryId;
uint64_t cId = req.clientId;
uint64_t tId = req.taskId;
int64_t rId = 0;
int32_t eId = req.execId;
@ -604,12 +612,14 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
msg->sId = be64toh(msg->sId);
msg->queryId = be64toh(msg->queryId);
msg->clientId = be64toh(msg->clientId);
msg->taskId = be64toh(msg->taskId);
msg->refId = be64toh(msg->refId);
msg->execId = ntohl(msg->execId);
uint64_t sId = msg->sId;
uint64_t qId = msg->queryId;
uint64_t cId = msg->clientId;
uint64_t tId = msg->taskId;
int64_t rId = msg->refId;
int32_t eId = msg->execId;
@ -646,6 +656,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int6
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@ -684,6 +695,7 @@ int32_t qWorkerProcessNotifyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@ -753,6 +765,7 @@ int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SD
uint64_t sId = req.sId;
uint64_t qId = req.queryId;
uint64_t cId = req.clientId;
uint64_t tId = req.taskId;
int64_t rId = 0;
int32_t eId = -1;

View File

@ -137,8 +137,8 @@ int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchS
void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); }
int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) {
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
QW_LOCK(rwType, &sch->tasksLock);
*task = taosHashGet(sch->tasksHash, id, sizeof(id));
@ -153,8 +153,8 @@ int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, S
int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) {
int32_t code = 0;
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskStatus ntask = {0};
ntask.status = status;
@ -209,8 +209,8 @@ int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch
void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); }
int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
*ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
if (NULL == (*ctx)) {
@ -222,8 +222,8 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
}
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
*ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
if (NULL == (*ctx)) {
@ -235,8 +235,8 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
}
int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) {
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskCtx nctx = {0};
@ -347,6 +347,7 @@ int32_t qwSendExplainResponse(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
(void)memcpy(pExec, taosArrayGet(execInfoList, 0), localRsp.rsp.numOfPlans * sizeof(SExplainExecInfo));
localRsp.rsp.subplanInfo = pExec;
localRsp.qId = qId;
localRsp.cId = cId;
localRsp.tId = tId;
localRsp.rId = rId;
localRsp.eId = eId;
@ -376,8 +377,8 @@ _return:
int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskCtx octx;
SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
@ -411,8 +412,8 @@ int32_t qwDropTaskStatus(QW_FPARAMS_DEF) {
SQWTaskStatus *task = NULL;
int32_t code = 0;
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) {
QW_TASK_WLOG_E("scheduler does not exist");
@ -465,8 +466,8 @@ _return:
int32_t qwHandleDynamicTaskEnd(QW_FPARAMS_DEF) {
char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, tId, eId);
char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskCtx octx;
SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
@ -588,14 +589,14 @@ void qwDestroyImpl(void *pMgmt) {
mgmt->hbTimer = NULL;
taosTmrCleanUp(mgmt->timer);
uint64_t qId, tId;
uint64_t qId, cId, tId;
int32_t eId;
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
while (pIter) {
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
void *key = taosHashGetKey(pIter, NULL);
QW_GET_QTID(key, qId, tId, eId);
QW_GET_QTID(key, qId, cId, tId, eId);
qwFreeTaskCtx(ctx);
QW_TASK_DLOG_E("task ctx freed");

Some files were not shown because too many files have changed in this diff Show More