diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000000..7d877987ac
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,5 @@
+# Pull Request Checklist
+
+- [ ] Is the user manual updated?
+- [ ] Are the test cases passed and automated?
+- [ ] Is there no significant decrease in test coverage?
diff --git a/Jenkinsfile2 b/Jenkinsfile2
index a9c829660a..88806222a0 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -1,9 +1,11 @@
import hudson.model.Result
import hudson.model.*;
import jenkins.model.CauseOfInterruption
-docs_only=0
node {
}
+file_zh_changed = ''
+file_en_changed = ''
+file_no_doc_changed = '1'
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
@@ -29,7 +31,7 @@ def abort_previous(){
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
-def check_docs() {
+def check_docs(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
sh '''
hostname
@@ -40,39 +42,94 @@ def check_docs() {
cd ${WKC}
git reset --hard
git clean -f
- rm -rf examples/rust/
git remote prune origin
git fetch
- '''
- script {
- sh '''
- cd ${WKC}
- git checkout ''' + env.CHANGE_TARGET + '''
- '''
- }
- sh '''
- cd ${WKC}
- git remote prune origin
+ git checkout ''' + env.CHANGE_TARGET + '''
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
- git checkout -qf FETCH_HEAD
+ git checkout -qf FETCH_HEAD
'''
- def file_changed = sh (
+
+ file_zh_changed = sh (
script: '''
cd ${WKC}
- git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || :
+ git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep "^docs/zh/" || :
+ ''',
+ returnStdout: true
+ )
+
+ file_en_changed = sh (
+ script: '''
+ cd ${WKC}
+ git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep "^docs/en/" || :
+ ''',
+ returnStdout: true
+ )
+
+ file_no_doc_changed = sh (
+ script: '''
+ cd ${WKC}
+ git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v "*.md" || :
''',
returnStdout: true
).trim()
- if (file_changed == '') {
- echo "docs PR"
- docs_only=1
- } else {
- echo file_changed
- }
- env.FILE_CHANGED = file_changed
+ echo "file_zh_changed: ${file_zh_changed}"
+ echo "file_en_changed: ${file_en_changed}"
+ echo "file_no_doc_changed: ${file_no_doc_changed}"
}
}
+
+def build_pre_docs(){
+ if (env.CHANGE_URL =~ /\/TDengine\//) {
+ sh '''
+ hostname
+ date
+ env
+ '''
+
+ sh '''
+ cd ${DOC_WKC}/${td_repo}
+ git reset --hard
+ git clean -f
+ git remote prune origin
+ git fetch
+ git checkout ''' + env.CHANGE_TARGET + '''
+ git pull >/dev/null
+ git fetch origin +refs/pull/${CHANGE_ID}/merge
+ git checkout -qf FETCH_HEAD
+ '''
+
+ sh '''
+ cd ${DOC_WKC}/${tools_repo}
+ git reset --hard
+ git clean -f
+ git fetch
+ git remote prune origin
+ git checkout ''' + env.CHANGE_TARGET + '''
+ git pull >/dev/null
+ '''
+ }
+}
+
+def build_zh_docs(){
+ sh '''
+ cd ${DOC_WKC}/${zh_doc_repo}
+ # git pull
+ yarn ass local
+ yarn build
+ '''
+}
+
+def build_en_docs(){
+ sh '''
+ cd ${DOC_WKC}/${en_doc_repo}
+ # git pull
+ yarn ass local
+ yarn build
+ '''
+}
+
+
def pre_test(){
sh '''
hostname
@@ -153,6 +210,7 @@ def pre_test(){
'''
return 1
}
+
def pre_test_build_mac() {
sh '''
hostname
@@ -173,6 +231,7 @@ def pre_test_build_mac() {
date
'''
}
+
def pre_test_win(){
bat '''
hostname
@@ -273,17 +332,8 @@ def pre_test_win(){
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
'''
- bat '''
- cd %WIN_CONNECTOR_ROOT%
- git branch
- git reset --hard
- git pull
- '''
- bat '''
- cd %WIN_CONNECTOR_ROOT%
- git log -5
- '''
}
+
def pre_test_build_win() {
bat '''
echo "building ..."
@@ -303,16 +353,14 @@ def pre_test_build_win() {
time /t
'''
bat '''
- cd %WIN_CONNECTOR_ROOT%
- python.exe -m pip install --upgrade pip
- python -m pip uninstall taospy -y
- python -m pip install taospy==2.7.16
- python -m pip uninstall taos-ws-py -y
- python -m pip install taos-ws-py==0.3.3
+ cd %WIN_COMMUNITY_ROOT%/tests/ci
+ pip3 install taospy==2.7.16
+ pip3 install taos-ws-py==0.3.5
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1
}
+
def run_win_ctest() {
bat '''
echo "windows ctest ..."
@@ -322,10 +370,10 @@ def run_win_ctest() {
time /t
'''
}
+
def run_win_test() {
bat '''
echo "windows test ..."
- cd %WIN_CONNECTOR_ROOT%
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll
time /t
@@ -344,28 +392,67 @@ pipeline {
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
WKPY = '/var/lib/jenkins/workspace/taos-connector-python'
+ DOC_WKC = '/root/doc_ci_work'
+ td_repo = 'TDengine'
+ zh_doc_repo = 'docs.taosdata.com'
+ en_doc_repo = 'docs.tdengine.com'
+ tools_repo = 'taos-tools'
}
stages {
- stage('check') {
+ stage ('check doc file changed') {
+ agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
+ steps {
+ check_docs()
+ }
+ }
+
+ stage ('pre for build docs') {
when {
- allOf {
- not { expression { env.CHANGE_BRANCH =~ /docs\// }}
- }
+ beforeAgent true
+ expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' }
+ }
+ agent{label "doc_build_0_30"}
+ steps {
+ build_pre_docs()
+ }
+ }
+
+ stage('build Docs') {
+ when {
+ beforeAgent true
+ expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' }
}
parallel {
- stage('check docs') {
- agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
+ stage('build zh docs') {
+ agent{label "doc_build_0_30"}
+ when {
+ expression { file_zh_changed != '' }
+ }
steps {
- check_docs()
+ build_zh_docs()
+ }
+ }
+ stage('build en docs') {
+ agent{label "doc_build_0_30"}
+ when {
+ expression { file_en_changed != '' }
+ }
+ steps {
+ build_en_docs()
}
}
}
+ post {
+ unsuccessful {
+ error('build docs stage failed, terminating pipeline.')
+ }
+ }
}
+
stage('run test') {
when {
- allOf {
- not { expression { env.CHANGE_BRANCH =~ /docs\// }}
- expression { docs_only == 0 }
+ expression {
+ file_no_doc_changed != '' && env.CHANGE_TARGET != 'docs-cloud'
}
}
parallel {
@@ -375,7 +462,6 @@ pipeline {
WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal"
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
- WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python"
}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
@@ -420,7 +506,7 @@ pipeline {
script {
sh '''
mkdir -p ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}
- echo "''' + env.FILE_CHANGED + '''" > ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt
+ echo "''' + file_no_doc_changed + '''" > ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt
'''
sh '''
cd ${WKC}/tests/parallel_test
diff --git a/cmake/cmake.define b/cmake/cmake.define
index ff582261b3..eb95feaf82 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -97,10 +97,13 @@ ELSE()
SET(TD_TAOS_TOOLS TRUE)
ENDIF()
+SET(TAOS_LIB taos)
+SET(TAOS_LIB_STATIC taos_static)
+
IF(${TD_WINDOWS})
- SET(TAOS_LIB taos_static)
+ SET(TAOS_LIB_PLATFORM_SPEC taos_static)
ELSE()
- SET(TAOS_LIB taos)
+ SET(TAOS_LIB_PLATFORM_SPEC taos)
ENDIF()
# build TSZ by default
diff --git a/cmake/cmake.version b/cmake/cmake.version
index c600c084fd..710ff87fe0 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.3.4.0.alpha")
+ SET(TD_VER_NUMBER "3.3.4.3.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md
index d2efd668b0..3852783c10 100644
--- a/docs/en/14-reference/03-taos-sql/10-function.md
+++ b/docs/en/14-reference/03-taos-sql/10-function.md
@@ -422,7 +422,7 @@ CAST(expr AS type_name)
TO_ISO8601(expr [, timezone])
```
-**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
+**Description**: The ISO8601 date/time format converted from a timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
**Return value type**: VARCHAR
@@ -466,7 +466,7 @@ return_timestamp: {
}
```
-**Description**: UNIX timestamp converted from a string of date/time format
+**Description**: timestamp converted from a string of date/time format
**Return value type**: BIGINT, TIMESTAMP
@@ -1149,7 +1149,7 @@ TOP(expr, k)
UNIQUE(expr)
```
-**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. For a table with composite primary key, only the data with the smallest primary key value is returned.
+**Description**: Return the unique values of this column. The effect is similar to `distinct` keyword. Return the row with the earliest timestamp for duplicate data. For a table with composite primary key, only the data with the smallest primary key value is returned.
**Return value type**:Same as the data type of the column being operated upon
diff --git a/docs/en/14-reference/03-taos-sql/31-compress.md b/docs/en/14-reference/03-taos-sql/31-compress.md
index 39abfe69bd..f726c8bbe6 100644
--- a/docs/en/14-reference/03-taos-sql/31-compress.md
+++ b/docs/en/14-reference/03-taos-sql/31-compress.md
@@ -30,11 +30,12 @@ In this article, it specifically refers to the level within the secondary compre
| Data Type | Optional Encoding Algorithm | Default Encoding Algorithm | Optional Compression Algorithm|Default Compression Algorithm| Default Compression Level|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
-| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
+| int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
+| tinyint/untinyint/smallint/usmallint | simple8b| simple8b | lz4/zlib/zstd/xz| zlib| medium|
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
-|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
-|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
+|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| zstd|
+|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| zstd|
## SQL
diff --git a/docs/en/14-reference/05-connectors/30-python.mdx b/docs/en/14-reference/05-connectors/30-python.mdx
index 7263a3caa6..4f17261b33 100644
--- a/docs/en/14-reference/05-connectors/30-python.mdx
+++ b/docs/en/14-reference/05-connectors/30-python.mdx
@@ -41,12 +41,18 @@ We recommend using the latest version of `taospy`, regardless of the version of
|Python Client Library Version|major changes|
|:-------------------:|:----:|
+|2.7.16|add subscription configuration (session.timeout.ms, max.poll.interval.ms)|
+|2.7.15|added support for VARBINARY and GEOMETRY types|
+|2.7.14|fix known issues|
+|2.7.13|add TMQ synchronous submission offset interface|
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)
2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|2.7.9|support for getting assignment and seek function on subscription|
|2.7.8|add `execute_many` method|
|Python Websocket Connection Version|major changes|
|:----------------------------:|:-----:|
+|0.3.5|1. added support for VARBINARY and GEOMETRY types
2. Fix known issues|
+|0.3.2|1. optimize WebSocket SQL query and insertion performance
2. Fix known issues
3. Modify the readme and document|
|0.2.9|bugs fixes|
|0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT|
|0.2.4|support `unsubscribe` on subscription|
diff --git a/docs/en/14-reference/05-connectors/35-node.mdx b/docs/en/14-reference/05-connectors/35-node.mdx
index 476f9bab71..2aeef7af1e 100644
--- a/docs/en/14-reference/05-connectors/35-node.mdx
+++ b/docs/en/14-reference/05-connectors/35-node.mdx
@@ -27,6 +27,8 @@ Node.js client library needs to be run with Node.js 14 or higher version.
| Node.js connector version | major changes | TDengine 版本 |
| :-----------------------: | :------------------: | :----------------:|
+| 3.1.2 | Optimized the data protocol and parsing, resulting in a significant improvement in performance | 3.2.0.0 or later |
+| 3.1.1 | Optimized data transmission performance | 3.2.0.0 or later |
| 3.1.0 | new version, supports websocket | 3.2.0.0 or later |
## Supported features
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 77d183a5ef..63aa6ed447 100755
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -773,7 +773,7 @@ lossyColumns float|double
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
```
-### ifAdtFse
+### ifAdtFse
| Attribute | Description |
| -------- | -------------------------------- |
@@ -898,4 +898,4 @@ lossyColumns float|double
| 53 | udf | Yes | Yes | |
| 54 | enableCoreFile | Yes | Yes | |
| 55 | ttlChangeOnWrite | No | Yes | |
-| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | |
+| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | see "KEEP_TIME_OFFSET" |
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index 486fe2c015..b24931b166 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -20,6 +20,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3";
+## 3.3.4.3
+
+
+
## 3.3.3.0
diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json
index 14303c8f37..d77c96fbb3 100644
--- a/docs/examples/node/package.json
+++ b/docs/examples/node/package.json
@@ -4,6 +4,12 @@
"main": "index.js",
"license": "MIT",
"dependencies": {
- "@tdengine/websocket": "^3.1.1"
- }
+ "@tdengine/websocket": "^3.1.2"
+ },
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "keywords": [],
+ "author": "",
+ "description": ""
}
diff --git a/docs/examples/node/websocketexample/all_type_stmt.js b/docs/examples/node/websocketexample/all_type_stmt.js
old mode 100644
new mode 100755
index f095bee090..2297923e75
--- a/docs/examples/node/websocketexample/all_type_stmt.js
+++ b/docs/examples/node/websocketexample/all_type_stmt.js
@@ -95,8 +95,8 @@ async function all_type_example() {
tagParams.setBoolean([true]);
tagParams.setVarchar(["hello"]);
tagParams.setNchar(["stmt"]);
- tagParams.setGeometry([geometryData]);
tagParams.setVarBinary([vbData]);
+ tagParams.setGeometry([geometryData]);
await stmt.setTags(tagParams);
@@ -108,8 +108,8 @@ async function all_type_example() {
bindParams.setBoolean([true]);
bindParams.setVarchar(["hello"]);
bindParams.setNchar(["stmt"]);
- bindParams.setGeometry([geometryData]);
bindParams.setVarBinary([vbData]);
+ bindParams.setGeometry([geometryData]);
await stmt.bind(bindParams);
await stmt.batch();
diff --git a/docs/zh/06-advanced/02-cache.md b/docs/zh/06-advanced/02-cache.md
index 065adbf50a..875452205b 100644
--- a/docs/zh/06-advanced/02-cache.md
+++ b/docs/zh/06-advanced/02-cache.md
@@ -1,68 +1,44 @@
---
-sidebar_label: 数据缓存
-title: 数据缓存
+sidebar_label: 读缓存
+title: 读缓存
toc_max_heading_level: 4
---
-在工业互联网和物联网大数据应用场景中,时序数据库的性能表现尤为关键。这类应用程序不仅要求数据的实时写入能力,还需求能够迅速获取设备的最新状态或对最新数据进行实时计算。通常,大数据平台会通过部署 Redis 或类似的缓存技术来满足这些需求。然而,这种做法会增加系统的复杂性和运营成本。
+在物联网(IoT)和工业互联网(IIoT)大数据应用场景中,实时数据的价值往往远超历史数据。企业不仅需要数据处理系统具备高效的实时写入能力,更需要能快速获取设备的最新状态,或者对最新数据进行实时计算和分析。无论是工业设备的状态监控、车联网中的车辆位置追踪,还是智能仪表的实时读数,当前值都是业务运行中不可或缺的核心数据。这些数据直接关系到生产安全、运营效率以及用户体验。
-为了解决这一问题,TDengine 采用了针对性的缓存优化策略。通过精心设计的缓存机制,TDengine 实现了数据的实时高效写入和快速查询,从而有效降低整个集群的复杂性和运营成本。这种优化不仅提升了性能,还为用户带来了更简洁、易用的解决方案,使他们能够更专注于核心业务的发展。
+例如,在工业生产中,生产线设备的当前运行状态至关重要。操作员需要实时监控温度、压力、转速等关键指标,一旦设备出现异常,这些数据必须即时呈现,以便迅速调整工艺参数,避免停产或更大的损失。在车联网领域,以滴滴为例,车辆的实时位置数据是滴滴平台优化派单策略、提升运营效率的关键,确保每位乘客快速上车并享受更高质量的出行体验。
-## 写缓存
+同时,看板系统和智能仪表作为现场操作和用户端的窗口,也需要实时数据支撑。无论是工厂管理者通过看板获取的实时生产指标,还是家庭用户随时查询智能水表、电表的用量,实时性不仅影响到运营和决策效率,更直接关系到用户对服务的满意程度。
-TDengine 采用了一种创新的时间驱动缓存管理策略,亦称为写驱动的缓存管理机制。这一策略与传统的读驱动的缓存模式有所不同,其核心思想是将最新写入的数据优先保存在缓存中。当缓存容量达到预设的临界值时,系统会将最早存储的数据批量写入硬盘,从而实现缓存与硬盘之间的动态平衡。
+## 传统缓存方案的局限性
-在物联网数据应用中,用户往往最关注最近产生的数据,即设备的当前状态。TDengine 充分利用了这一业务特性,将最近到达的当前状态数据优先存储在缓存中,以便用户能够快速获取所需信息。
+为了满足这些高频实时查询需求,许多企业选择将 Redis 等缓存技术集成到大数据平台中,通过在数据库和应用之间添加一层缓存来提升查询性能。然而,这种方法也带来了不少问题:
+- 系统复杂性增加:需要额外部署和维护缓存集群,对系统架构提出了更高的要求。
+- 运营成本上升:需要额外的硬件资源来支撑缓存,增加了维护和管理的开销。
+- 一致性问题:缓存和数据库之间的数据同步需要额外的机制来保障,否则可能出现数据不一致的情况。
-为了实现数据的分布式存储和高可用性,TDengine 引入了虚拟节点(vnode)的概念。每个 vnode 可以拥有多达 3 个副本,这些副本共同组成一个 vnode group,简称 vgroup。在创建数据库时,用户需要确定每个 vnode 的写入缓存大小,以确保数据的合理分配和高效存储。
+## TDengine 的解决方案:内置读缓存
-创建数据库时的两个关键参数 `vgroups` 和 `buffer` 分别决定了数据库中的数据由多少个 vgroup 进行处理,以及为每个 vnode 分配多少写入缓存。通过合理配置这两个
-参数,用户可以根据实际需求调整数据库的性能和存储容量,从而实现最佳的性能和成本效益。
+为了解决这些问题,TDengine 针对物联网和工业互联网的高频实时查询场景,设计并实现了读缓存机制。这一机制能够自动将每张表的最后一条记录缓存到内存中,从而在不引入第三方缓存技术的情况下,直接满足用户对当前值的实时查询需求。
-例 如, 下面的 SQL 创建了包含 10 个 vgroup,每个 vnode 占 用 256MB 内存的数据库。
-```sql
-CREATE DATABASE POWER VGROUPS 10 BUFFER 256 CACHEMODEL 'NONE' PAGES 128 PAGESIZE 16;
-```
+TDengine 采用时间驱动的缓存管理策略,将最新数据优先存储在缓存中,查询时无需访问硬盘即可快速返回结果。当缓存容量达到设定上限时,系统会批量将最早的数据写入硬盘,既提升了查询效率,也有效减少了硬盘的写入负担,延长硬件使用寿命。
-缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助。
+用户可通过设置 cachemodel 参数,自定义缓存模式,包括缓存最新一行数据、每列最近的非 NULL 值,或同时缓存行和列的数据。这种灵活设计在物联网场景中尤为重要,使设备状态的实时查询更加高效精准。
-## 读缓存
+这种读缓存机制的内置化设计显著降低了查询延迟,避免了引入 Redis 等外部系统的复杂性和运维成本。同时,减少了频繁查询对存储系统的压力,大幅提升系统的整体吞吐能力,确保在高并发场景下依然稳定高效运行。通过读缓存,TDengine 为用户提供了一种更轻量化的实时数据处理方案,不仅优化了查询性能,还降低了整体运维成本,为物联网和工业互联网用户提供强有力的技术支持。
-在创建数据库时,用户可以选择是否启用缓存机制以存储该数据库中每张子表的最新数据。这一缓存机制由数据库创建参数 cachemodel 进行控制。参数 cachemodel 具有如
-下 4 种情况:
-- none: 不缓存
-- last_row: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能
-- last_value: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能
-- both: 同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效
+## TDengine 的读缓存配置
+
+在创建数据库时,用户可以选择是否启用缓存机制以存储该数据库中每张子表的最新数据。这一缓存机制由数据库创建参数 cachemodel 进行控制。参数 cachemodel 具有如 下 4 种情况:
+- none:不缓存
+- last_row:缓存子表最近一行数据,这将显著改善 last_row 函数的性能
+- last_value:缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE,ORDER BY,GROUP BY, INTERVAL)时的 last 函数的性能
+- both:同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效
当使用数据库读缓存时,可以使用参数 cachesize 来配置每个 vnode 的内存大小。
-- cachesize:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。需要根据机器内存合理配置。
+- cachesize:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1,65536],单位是 MB。需要根据机器内存合理配置。
-## 元数据缓存
-
-为了提升查询和写入操作的效率,每个 vnode 都配备了缓存机制,用于存储其曾经获取过的元数据。这一元数据缓存的大小由创建数据库时的两个参数 pages 和 pagesize 共同决定。其中,pagesize 参数的单位是 KB,用于指定每个缓存页的大小。如下 SQL 会为数据库 power 的每个 vnode 创建 128 个 page、每个 page 16KB 的元数据缓存
-
-```sql
-CREATE DATABASE POWER PAGES 128 PAGESIZE 16;
-```
-
-## 文件系统缓存
-
-TDengine 采用 WAL 技术作为基本的数据可靠性保障手段。WAL 是一种先进的数据保护机制,旨在确保在发生故障时能够迅速恢复数据。其核心原理在于,在数据实际写入数据存储层之前,先将其变更记录到一个日志文件中。这样一来,即便集群遭遇崩溃或其他故障,也能确保数据安全无损。
-
-TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 的过程中,数据是以顺序追加的方式写入硬盘文件的。因此,文件系统缓存在此过程中发挥着关键作用,对写入性能产生显著影响。为了确保数据真正落盘,系统会调用 fsync 函数,该函数负责将文件系统缓存中的数据强制写入硬盘。
-
-数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。
-- wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。
-- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。
-
-```sql
-CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000;
-```
-
-在创建数据库时可以选择不同的参数类型,来选择性能优先或者可靠性优先。
-- 1: 写 WAL 但不执行 fsync ,新写入 WAL 的数据保存在文件系统缓存中但并未写入磁盘,这种方式性能优先
-- 2: 写 WAL 且执行 fsync,新写入 WAL 的数据被立即同步到磁盘上,可靠性更高
+关于数据库的具体创建,相关参数和操作说明请参考[创建数据库](../../reference/taos-sql/database/)
## 实时数据查询的缓存实践
diff --git a/docs/zh/06-advanced/03-stream.md b/docs/zh/06-advanced/03-stream.md
index c47831dde3..c26924561c 100644
--- a/docs/zh/06-advanced/03-stream.md
+++ b/docs/zh/06-advanced/03-stream.md
@@ -124,7 +124,7 @@ create stream if not exists count_history_s fill_history 1 into count_history as
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
-因此,流计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算。
+因此,流计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式:MAX_DELAY 模式在窗口关闭时会立即触发计算,它的单位可以自行指定,具体单位:a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。此外,当数据写入后,计算触发的时间超过 MAX_DELAY 指定的时间,则立即触发计算。
### 流计算的窗口关闭
@@ -259,4 +259,4 @@ flush database test1;
5.修改 taos.cfg,去掉 disableStream 1,或将 disableStream 改为 0
-6.启动 taosd
\ No newline at end of file
+6.启动 taosd
diff --git a/docs/zh/06-advanced/05-data-in/05-opcua.md b/docs/zh/06-advanced/05-data-in/05-opcua.md
index 5795528d01..5123dacd1b 100644
--- a/docs/zh/06-advanced/05-data-in/05-opcua.md
+++ b/docs/zh/06-advanced/05-data-in/05-opcua.md
@@ -150,7 +150,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
#### 5.2. 选择数据点位
-可以通过配置 **根节点ID**、**命名空间**、**正则匹配** 等条件,对点位进行筛选。
+可以通过配置 **根节点ID**、**命名空间**、**节点ID**、**节点名称** 等条件,对点位进行筛选。
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
diff --git a/docs/zh/06-advanced/05-data-in/06-opcda.md b/docs/zh/06-advanced/05-data-in/06-opcda.md
index 7da5b89fe6..32ac1c1f8a 100644
--- a/docs/zh/06-advanced/05-data-in/06-opcda.md
+++ b/docs/zh/06-advanced/05-data-in/06-opcda.md
@@ -126,7 +126,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
#### 4.2. 选择数据点位
-可以通过配置 **根节点ID** 和 **正则匹配** 作为过滤条件,对点位进行筛选。
+可以通过配置 **根节点ID**、**节点ID**、**节点名称** 作为过滤条件,对点位进行筛选。
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.md b/docs/zh/06-advanced/05-data-in/07-mqtt.md
index f54086b61b..8fc69bcaa1 100644
--- a/docs/zh/06-advanced/05-data-in/07-mqtt.md
+++ b/docs/zh/06-advanced/05-data-in/07-mqtt.md
@@ -63,7 +63,7 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
在 **Clean Session** 中,选择是否清除会话。默认值为 true。
-在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。
+在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置: `{topic_name}::{qos}`(如:`my_topic::0`)。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。
点击 **检查连通性** 按钮,检查数据源是否可用。
@@ -146,7 +146,13 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
### 7. 高级选项
-在 **日志级别** 下拉列表中选择日志级别。有五个选项:`TRACE`、`DEBUG`、`INFO`、`WARN`、`ERROR`。 默认值为 INFO。
+在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。
+
+在 **处理批次上限** 中填写可以同时进行数据处理流程的批次数量,当到达此上限后,不再从消息缓存队列中获取消息,会导致缓存队列的消息积压,最小值为 1。
+
+在 **批次大小** 中填写每次发送给数据处理流程的消息数量,和 **批次延时** 配合使用,当读取的 MQTT 消息数量达到批次大小时,就算 **批次延时** 没有到达也立即向数据处理流程发送数据,最小值为 1。
+
+在 **批次延时** 中填写每次生成批次消息的超时时间(单位:毫秒),从每批次接收到的第一个消息开始算起,和 **批次大小** 配合使用,当读取消息到达超时时间时,就算 **批次大小** 不满足数量也立即向数据处理流程发送数据,最小值为 1。
当 **保存原始数据时**,以下2个参数配置生效。
diff --git a/docs/zh/06-advanced/05-data-in/11-csv.md b/docs/zh/06-advanced/05-data-in/11-csv.md
index 79fbb40740..4924ed2fbd 100644
--- a/docs/zh/06-advanced/05-data-in/11-csv.md
+++ b/docs/zh/06-advanced/05-data-in/11-csv.md
@@ -2,86 +2,123 @@
title: "CSV"
sidebar_label: "CSV"
---
-本节讲述如何通过 Explorer 界面创建数据迁移任务, 从 CSV 迁移数据到当前 TDengine 集群。
+本节讲述如何通过 Explorer 界面创建数据迁移任务,从 CSV 迁移数据到当前 TDengine 集群。
## 功能概述
导入一个或多个 CSV 文件数据到 TDengine。
## 创建任务
### 1. 新增数据源
-在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
+在数据写入任务列表页面中,点击 **+新建任务** 按钮,进入新建任务页面。

### 2. 配置基本信息
-在 **名称** 中输入任务名称,如:“test_csv”;
+在 **名称** 中输入任务名称,如:“test_csv”。
在 **类型** 下拉列表中选择 **CSV**。
-在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
+在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮。

### 3. 配置 CSV 选项
-在 **包含表头** 区域点击开启或关闭,如果包含表头,则第一行将被视为列信息。
+在 **包含表头** 区域点击开启或关闭,如果包含表头,则 CSV 文件内容第一行将被视为列信息。
-在 **忽略前 N 行** 区域填写 N,表示忽略 CSV 文件的前 N 行。
+在 **忽略前 N 行** 区域填写数字 N,表示忽略 CSV 文件的前 N 行。
-在 **字段分隔符** 区域进行选择,CSV 字段之间的分隔符,默认是 “,” 。
+在 **字段分隔符** 区域选择 CSV 字段分隔符,用于分隔行内容为多个字段,默认是 `,`。
-在 **字段引用符** 区域进行选择,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 "“"。
+在 **字段引用符** 区域选择 CSV 字段引用符,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 `"`。
-在 **注释前缀符** 区域进行选择,当 CSV 文件中某行以此处指定的字符开头,则忽略该行默认是 “#”。
+在 **注释前缀符** 区域选择 CSV 行注释前缀符,当 CSV 文件中某行以此处指定的字符开头,则忽略该行,默认是 `#`。

### 4. 配置解析 CSV 文件
-在本地上传 CSV 文件,例如:test-json.csv,之后会使用这条示例 csv 文件来配置提取和过滤条件。
-#### 4.1 解析
+#### 4.1 配置数据源
-点击 **选取文件** 后,选择 test-json.csv,点击 **解析** 预览识别的列。
+包含“上传 CSV 文件”与“监听文件目录”两种方式,“上传 CSV 文件”是指将本地文件通过浏览器上传到 taosx 所在服务器作为数据源,“监听文件目录”是指配置一个 taosx 所在服务器的绝对路径作为数据源,以下将分别进行介绍:
+
+##### 4.1.1 上传 CSV 文件
+
+在“上传 CSV 文件”标签页中:
+
+点击 **选取文件** 按钮,选取一个或多个本地文件,上传到服务器作为数据源。
+
+在 **保留已处理文件** 区域点击开启或关闭,如果开启,则文件被处理完成后仍会保留在服务器中,如果关闭,则将被删除。

-**预览解析结果**
+##### 4.1.2 监听文件目录
+
+在“监听文件目录”标签页中:
+
+在 **文件监听目录** 中输入一个 taosx 所在服务器的绝对路径,路径中包含的文件及子目录文件将作为数据源。
+
+在 **匹配模式** 中输入一个正则表达式,用于筛选过滤目录中的文件。
+
+在 **监听新文件** 区域点击开启或关闭,如果开启,则任务永不停止,且持续处理目录中新增的文件,如果关闭,则不处理新增文件,且初始文件处理结束后任务变为完成状态。
+
+在 **监听间隔** 中输入一个数字,用于配置监听新文件的时间间隔。
+
+在 **文件处理顺序** 区域选择“正序”或“倒序”,用于指定文件列表的处理先后顺序,“正序”将按照文件名的字典序正序处理,“倒序”将按照文件名的字典序倒序处理,与此同时,程序总是保持先处理文件后处理同级子目录的顺序。

-#### 4.2 字段拆分
+#### 4.2 解析
-在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 message 字段拆分成 `text_0` 和 `text_1` 这2个字段,选择 split 提取器,seperator 填写 -, number 填写2。
-点击 **删除**,可以删除当前提取规则。
-点击 **新增**,可以添加更多提取规则。
+上传文件或配置监听目录后,点击解析按钮,页面将获取文件中的示例数据,同时得到识别的列与示例数据解析结果:
-
+
-点击 **放大镜图标** 可预览提取或拆分结果。
+#### 4.2 从列中提取或拆分
+
+在 **从列中提取或拆分** 中填写从消息体中提取或拆分规则,例如:将 `desc` 字段拆分为 `desc_0` 与 `desc_1` 两个字段,可以选择 split 规则,separator 填写 `,`,number 填写 2 即可。
+
+点击 **删除** 可以删除当前提取规则。
+
+点击 **预览** 可以预览提取或拆分结果。
+
+点击 **新增提取/拆分** 可以添加更多提取规则。

-
-
-#### 4.3 表映射
+#### 4.4 映射
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
-在 **映射** 中,填写目标超级表中的子表名称,例如:`t_${groupid}`。
+在映射规则中,填写目标超级表中的子表名称,例如:`csv_meter_${id}`,同时配置映射到超级表的列。
-
-
-点击 **预览**,可以预览映射的结果。
-
-
+点击 **预览** 可以预览映射的结果。
+
### 5. 创建完成
-点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
+点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。
+
+
+
+### 6. 查看运行指标
+
+点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/zh/06-advanced/05-data-in/csv-01.png b/docs/zh/06-advanced/05-data-in/csv-01.png
index a183e0dcef..f1494f5c20 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-01.png and b/docs/zh/06-advanced/05-data-in/csv-01.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-02.png b/docs/zh/06-advanced/05-data-in/csv-02.png
index 909e7ff27c..05972c6810 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-02.png and b/docs/zh/06-advanced/05-data-in/csv-02.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-03.png b/docs/zh/06-advanced/05-data-in/csv-03.png
index 1e0bd97a51..bf5a939fe4 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-03.png and b/docs/zh/06-advanced/05-data-in/csv-03.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-04.png b/docs/zh/06-advanced/05-data-in/csv-04.png
index 189bdfa263..2fb0bed47e 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-04.png and b/docs/zh/06-advanced/05-data-in/csv-04.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-05.png b/docs/zh/06-advanced/05-data-in/csv-05.png
index d7e2b51ccb..ec36377c9e 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-05.png and b/docs/zh/06-advanced/05-data-in/csv-05.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-06.png b/docs/zh/06-advanced/05-data-in/csv-06.png
index 398d1dd903..0a3f794b33 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-06.png and b/docs/zh/06-advanced/05-data-in/csv-06.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-07.png b/docs/zh/06-advanced/05-data-in/csv-07.png
index 7fd5ca4a89..ac9a4c1a2c 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-07.png and b/docs/zh/06-advanced/05-data-in/csv-07.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-08.png b/docs/zh/06-advanced/05-data-in/csv-08.png
new file mode 100644
index 0000000000..0cd525dc98
Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/csv-08.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-09.png b/docs/zh/06-advanced/05-data-in/csv-09.png
new file mode 100644
index 0000000000..706d959a8d
Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/csv-09.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-10.png b/docs/zh/06-advanced/05-data-in/csv-10.png
index c0bb68f373..fcb82af87c 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-10.png and b/docs/zh/06-advanced/05-data-in/csv-10.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-11.png b/docs/zh/06-advanced/05-data-in/csv-11.png
index 268e4788ab..352ae3dcdc 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-11.png and b/docs/zh/06-advanced/05-data-in/csv-11.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-12.png b/docs/zh/06-advanced/05-data-in/csv-12.png
index 5a9f312b01..92fd670adf 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-12.png and b/docs/zh/06-advanced/05-data-in/csv-12.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-13.png b/docs/zh/06-advanced/05-data-in/csv-13.png
index ed5108d3eb..a475bf6660 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-13.png and b/docs/zh/06-advanced/05-data-in/csv-13.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-14.png b/docs/zh/06-advanced/05-data-in/csv-14.png
deleted file mode 100644
index e98d8ce334..0000000000
Binary files a/docs/zh/06-advanced/05-data-in/csv-14.png and /dev/null differ
diff --git a/docs/zh/06-advanced/05-data-in/mqtt-14.png b/docs/zh/06-advanced/05-data-in/mqtt-14.png
index 0388d8a705..df80108d29 100644
Binary files a/docs/zh/06-advanced/05-data-in/mqtt-14.png and b/docs/zh/06-advanced/05-data-in/mqtt-14.png differ
diff --git a/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png b/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png
index 9ab69fb386..b47463dfbb 100644
Binary files a/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png and b/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png differ
diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png b/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png
index 783adae3be..18f01e8885 100644
Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png and b/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md
new file mode 100644
index 0000000000..9aaa123299
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/02-management.md
@@ -0,0 +1,128 @@
+---
+title: "安装部署"
+sidebar_label: "安装部署"
+---
+
+### 环境准备
+使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/MacOS 等平台上,同时需要 3.10 或以上版本的 Python 环境支持。
+> 部署 Anode 需要 TDengine Enterprise 3.3.4.3 及以后版本,请首先确认搭配 Anode 使用的 TDengine 能够支持 Anode。
+
+### 安装及卸载
+不同操作系统上安装及部署 Anode 有一些差异,主要是卸载操作、安装路径、服务启停等方面。本文以 Linux 系统为例,说明安装部署的流程。
+使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,命令如下:
+
+```bash
+tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
+cd TDengine-enterprise-anode-1.0.0
+sudo ./install.sh
+```
+
+对于已经安装的 Anode,执行命令 `rmtaosanode` 即可完成卸载。
+为了避免影响系统已有的 Python 环境,Anode 使用虚拟环境运行。安装 Anode 会在目录 `/var/lib/taos/taosanode/venv/` 中创建默认的 Python 虚拟环境,Anode 运行所需要的库均安装在该目录下。为了避免反复安装虚拟环境带来的开销,卸载命令 `rmtaosanode` 并不会自动删除该虚拟环境,如果您确认不再需要 Python 的虚拟环境,手动删除该目录即可。
+
+### 启停服务
+在 Linux 系统中,安装 Anode 以后会自动创建 `taosanoded` 服务。可以使用 `systemd` 来管理 Anode 服务,使用如下命令启动/停止/检查 Anode。
+
+```bash
+systemctl start taosanoded
+systemctl stop taosanoded
+systemctl status taosanoded
+```
+
+### 目录及配置说明
+安装完成后,Anode 主体目录结构如下:
+
+|目录/文件|说明|
+|---------------|------|
+|/usr/local/taos/taosanode/bin|可执行文件目录|
+|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
+|/usr/local/taos/taosanode/lib|库文件目录|
+|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
+|/var/log/taos/taosanode/|日志文件目录|
+|/etc/taos/taosanode.ini|配置文件|
+
+#### 配置说明
+
+Anode 的服务需要使用 uWSGI 驱动驱动运行,因此 Anode 和 uWSGI 的配置信息共同存放在相同的配置文件 `taosanode.ini` 中,该配置文件默认位于 `/etc/taos/` 目录下。
+具体内容及配置项说明如下:
+
+```ini
+[uwsgi]
+
+# Anode RESTful service ip:port
+http = 127.0.0.1:6090
+
+# base directory for Anode python files, do NOT modified this
+chdir = /usr/local/taos/taosanode/lib
+
+# initialize Anode python file
+wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
+
+# pid file
+pidfile = /usr/local/taos/taosanode/taosanode.pid
+
+# conflict with systemctl, so do NOT uncomment this
+# daemonize = /var/log/taos/taosanode/taosanode.log
+
+# log directory
+logto = /var/log/taos/taosanode/taosanode.log
+
+# wWSGI monitor port
+stats = 127.0.0.1:8387
+
+# python virtual environment directory, used by Anode
+virtualenv = /usr/local/taos/taosanode/venv/
+
+[taosanode]
+# default app log file
+app-log = /var/log/taos/taosanode/taosanode.app.log
+
+# model storage directory
+model-dir = /usr/local/taos/taosanode/model/
+
+# default log level
+log-level = DEBUG
+
+```
+
+**提示**
+请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而导致 Anode 无法正常启动。
+上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。
+
+Anode 运行配置主要是以下:
+- app-log: Anode 服务运行产生的日志,用户可以调整其到需要的位置
+- model-dir: 采用算法针对已经存在的数据集的运行完成生成的模型存储位置
+- log-level: app-log文件的日志级别
+
+
+### Anode 基本操作
+对于 Anode 的管理,用户需要通过 TDengine 的命令行接口 taos 进行。因此下述介绍的管理命令都需要先打开 taos, 连接到 TDengine 运行实例。
+#### 创建 Anode
+```sql
+CREATE ANODE {node_url}
+```
+node_url 是提供服务的 Anode 的 IP 和 PORT组成的字符串, 例如:`create anode '127.0.0.1:6090'`。Anode 启动后还需要注册到 TDengine 集群中才能提供服务。不建议将 Anode 同时注册到两个集群中。
+
+#### 查看 Anode
+列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`等属性。
+```sql
+SHOW ANODES;
+```
+
+#### 查看提供的时序数据分析服务
+
+```SQL
+SHOW ANODES FULL;
+```
+
+#### 刷新集群中的分析算法缓存
+```SQL
+UPDATE ANODE {anode_id}
+UPDATE ALL ANODES
+```
+
+#### 删除 Anode
+```sql
+DROP ANODE {anode_id}
+```
+删除 Anode 只是将 Anode 从 TDengine 集群中删除,管理 Anode 的启停仍然需要使用 `systemctl` 命令。卸载 Anode 则需要使用上面提到的 `rmtaosanode` 命令。
diff --git a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md
new file mode 100644
index 0000000000..9efd2bdf11
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md
@@ -0,0 +1,49 @@
+---
+title: "数据分析预处理"
+sidebar_label: "数据分析预处理"
+---
+
+import activity from './pic/activity.png';
+import wndata from './pic/white-noise-data.png'
+
+### 分析流程
+时序数据分析之前需要有预处理的过程,为减轻分析算法的负担,TDgpt 在将时序数据发给具体分析算法进行分析时,已经对数据做了预处理,整体的流程如下图所示。
+
+
+
+TDgpt 首先对输入数据进行白噪声检查(White Noise Data check), 检查通过以后针对预测分析,还要进行输入(历史)数据的重采样和时间戳对齐处理(异常检测跳过数据重采样和时间戳对齐步骤)。
+预处理完成以后,再进行预测或异常检测操作。预处理过程部署于预测或异常检测处理逻辑的一部分。
+
+### 白噪声检查
+
+
+
+白噪声时序数据可以简单地认为是随机数构成的时间数据序列(如上图所示的正态分布随机数序列),随机数构成的时间序列没有分析的价值,因此会直接返回。白噪声检查采用经典的 `Ljung-Box` 统计量检验,计算 `Ljung-Box` 统计量需遍历整个输入时间序列。如果用户能够明确输入序列一定不是白噪声序列,那么可以在参数列表中增加参数 `wncheck=0` 强制要求分析平台忽略白噪声检查,从而节省计算资源。
+TDgpt 暂不提供独立的时间序列白噪声检测功能。
+
+
+### 重采样和时间戳对齐
+
+对于进行预测分析的时间序列数据,在进行预测分析前需要进行必要的预处理。预处理主要解决以下两个问题:
+
+- 真实时间序列数据时间戳未对齐。由于数据生成设备的原因或网关赋值时间戳的时候并不能保证按照严格的时间间隔赋值,时间序列数据并不能保证是严格按照采样频率对齐。例如采样频率为 1Hz 的一个时间序列数据序列,其时间戳序列如下:
+
+ > ['20:12:21.143', '20:12:22.187', '20:12:23.032', '20:12:24.384', '20:12:25.033']
+
+ 预测返回的时间序列时间戳会严格对齐,例如返回后续的两个预测结果的时间戳,其时间一定如下:['20:12:26.000', '20:12:27.000']。因此上述的输入时间戳序列要进行时间戳对齐,变换成为如下时间戳序列
+
+ > ['20:12:21.000', '20:12:22.000', '20:12:23.000', '20:12:24.000', '20:12:25.000']
+
+
+- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样时间间隔是 5 sec,但是要求输出预测结果的采样时间间隔是 10sec
+
+ > ['20:12:20.000', '20:12:25.000', '20:12:30.000', '20:12:35.000', '20:12:40.000']
+
+ 重采样为采样间隔为 10sec 的时间戳序列
+
+ > ['20:12:20.000', '20:12:30.000', '20:12:40.000']
+
+ 然后将其作为预测分析的输入, ['20:12:25.000', '20:12:35.000'] 数据被丢弃。
+
+需要注意的是,预处理过程不支持缺失数据补齐操作,如果输入时间序列数据 ['20:12:10.113', '20:12:21.393', '20:12:29.143', '20:12:51.330'],并且要求的采样时间间隔为 10sec,重整对齐后的时间戳序列是 ['20:12:10.000', '20:12:20.000', '20:12:30.000', '20:12:50.000'] 那么对该序列进行预测分析将返回错误。
+
diff --git a/docs/zh/06-advanced/06-data-analysis/01-arima.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md
similarity index 74%
rename from docs/zh/06-advanced/06-data-analysis/01-arima.md
rename to docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md
index b9d63e924f..469f557984 100644
--- a/docs/zh/06-advanced/06-data-analysis/01-arima.md
+++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md
@@ -3,14 +3,14 @@ title: "ARIMA"
sidebar_label: "ARIMA"
---
-本节讲述 ARIMA 算法模型的使用方法。
+本节说明 ARIMA 算法模型的使用方法。
## 功能概述
-ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
+ARIMA:Autoregressive Integrated Moving Average,即自回归移动平均模型,记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
->平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
+> 平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。
@@ -38,6 +38,11 @@ ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的
FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5")
```
+完整的调用SQL语句如下:
+```SQL
+SELECT _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") from foo
+```
+
```json5
{
"rows": fc_rows, // 返回结果的行数
@@ -51,4 +56,4 @@ FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5
### 参考文献
- https://en.wikipedia.org/wiki/Autoregressive_moving-average_model
-- https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931?fromtitle=ARMA%E6%A8%A1%E5%9E%8B&fromid=8048415
+- [https://baike.baidu.com/item/自回归滑动平均模型/5023931](https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931)
diff --git a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md
similarity index 89%
rename from docs/zh/06-advanced/06-data-analysis/02-holtwinters.md
rename to docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md
index 38662ca2b3..7e92a8ae1a 100644
--- a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md
+++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md
@@ -23,11 +23,16 @@ HoltWinters 有两种不同的季节性组成部分,当季节变化在该时
参数 `trend` 和 `seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。
### 示例及结果
-针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势采用乘法模型,季节采用乘法模型
+针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势参数采用乘法模型,季节参数采用乘法模型
```
FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul")
```
+完整的调用SQL语句如下:
+```SQL
+SELECT _frowts, FORECAST(i32, "algo=holtwinters, peroid=10,trend=mul,seasonal=mul") from foo
+```
+
```json5
{
"rows": rows, // 返回结果的行数
diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md
new file mode 100644
index 0000000000..c7388ab9c0
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md
@@ -0,0 +1,93 @@
+---
+title: 预测算法
+description: 预测算法
+---
+
+时序数据预测处理以持续一个时间段的时序数据作为输入,预测接下来一个连续时间区间内时间序列数据趋势。用户可以指定输出的(预测)时间序列数据点的数量,因此其输出的结果行数不确定。为此,TDengine 使用新 SQL 函数 `FORECAST` 提供时序数据预测服务。基础数据(用于预测的历史时间序列数据)是该函数的输入,预测结果是该函数的输出。用户可以通过 `FORECAST` 函数调用 Anode 提供的预测算法提供的服务。
+
+在后续章节中,使用时序数据表`foo`作为示例,介绍预测和异常检测算法的使用方式,`foo` 表的模式如下:
+
+|列名称|类型|说明|
+|---|---|---|
+|ts| timestamp| 主时间戳列|
+|i32| int32| 4字节整数,设备测量值 metric|
+
+```bash
+taos> select * from foo;
+ ts | k |
+========================================
+ 2020-01-01 00:00:12.681 | 13 |
+ 2020-01-01 00:00:13.727 | 14 |
+ 2020-01-01 00:00:14.378 | 8 |
+ 2020-01-01 00:00:15.774 | 10 |
+ 2020-01-01 00:00:16.170 | 16 |
+ 2020-01-01 00:00:17.558 | 26 |
+ 2020-01-01 00:00:18.938 | 32 |
+ 2020-01-01 00:00:19.308 | 27 |
+```
+
+### 语法
+```SQL
+FORECAST(column_expr, option_expr)
+
+option_expr: {"
+algo=expr1
+[,wncheck=1|0]
+[,conf=conf_val]
+[,every=every_val]
+[,rows=rows_val]
+[,start=start_ts_val]
+[,expr2]
+"}
+
+```
+1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。
+2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
+
+### 参数说明
+
+|参数|含义|默认值|
+|---|---|---|
+|algo|预测分析使用的算法|holtwinters|
+|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
+|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
+|every|预测数据的采样间隔|输入数据的采样间隔|
+|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样间隔时间区间|
+|rows|预测结果的记录数|10|
+
+1. 预测查询结果新增三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
+2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
+3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
+4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
+
+### 示例
+
+```SQL
+--- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
+SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
+FROM foo;
+
+--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期,返回置信区间是95%的上下边界,同时忽略白噪声检查
+SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10,wncheck=0")
+FROM foo;
+```
+```
+taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
+ _flow | _fhigh | _frowts | forecast(i32) |
+========================================================================================
+ 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.000 | 26 |
+ -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.000 | 30 |
+ -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.000 | 33 |
+ -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.000 | 37 |
+ -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.000 | 41 |
+ -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.000 | 45 |
+ -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.000 | 53 |
+ -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.000 | 57 |
+ -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.000 | 62 |
+ -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.000 | 69 |
+```
+
+
+## 内置预测算法
+- [arima](./02-arima.md)
+- [holtwinters](./03-holtwinters.md)
diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-statistics-approach.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-statistics-approach.md
new file mode 100644
index 0000000000..d0d6815c25
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-statistics-approach.md
@@ -0,0 +1,57 @@
+---
+title: "统计学算法"
+sidebar_label: "统计学算法"
+---
+
+- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点.
+
+|参数|说明|是否必选|默认值|
+|---|---|---|---|
+|k|标准差倍数|选填|3|
+
+```SQL
+--- 指定调用的算法为ksigma, 参数 k 为 2
+SELECT _WSTART, COUNT(*)
+FROM foo
+ANOMALY_WINDOW(foo.i32, "algo=ksigma,k=2")
+```
+
+- IQR[2]:Interquartile range(IQR),四分位距是一种衡量变异性的方法。四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。 $IQR=Q3-Q1$,对于 $v$, $Q1-(1.5 \times IQR) \le v \le Q3+(1.5 \times IQR)$ 是正常值,范围之外的是异常值。无输入参数。
+
+```SQL
+--- 指定调用的算法为 iqr, 无参数
+SELECT _WSTART, COUNT(*)
+FROM foo
+ANOMALY_WINDOW(foo.i32, "algo=iqr")
+```
+
+- Grubbs[3]: Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,要求单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
+
+```SQL
+--- 指定调用的算法为 grubbs, 无参数
+SELECT _WSTART, COUNT(*)
+FROM foo
+ANOMALY_WINDOW(foo.i32, "algo=grubbs")
+```
+
+- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常检测方向('pos' / 'neg' / 'both'),异常值比例的上界***max_anoms***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5%
+
+|参数|说明|是否必选|默认值|
+|---|---|---|---|
+|direction|异常检测方向类型('pos' / 'neg' / 'both')|否|"both"|
+|max_anoms|异常值比例 $0 < K \le 49.9$|否|0.05|
+|period|一个周期包含的数据点|否|0|
+
+
+```SQL
+--- 指定调用的算法为 shesd, 参数 direction 为 both,异常值比例 5%
+SELECT _WSTART, COUNT(*)
+FROM foo
+ANOMALY_WINDOW(foo.i32, "algo=shesd,direction=both,anoms=0.05")
+```
+
+### 参考文献
+1. [https://en.wikipedia.org/wiki/68–95–99.7 rule](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule)
+2. https://en.wikipedia.org/wiki/Interquartile_range
+3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948.
+4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017).
diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/03-data-density.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/03-data-density.md
new file mode 100644
index 0000000000..7c0998c917
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/03-data-density.md
@@ -0,0 +1,20 @@
+---
+title: "数据密度算法"
+sidebar_label: "数据密度算法"
+---
+
+### 基于数据密度的检测方法
+LOF[1]: Local Outlier Factor(LOF),局部离群因子/局部异常因子,
+是 Breunig 在 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,
+该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 $topK$ 个点。
+
+```SQL
+--- 指定调用的算法为LOF,即可调用该算法
+SELECT count(*)
+FROM foo
+ANOMALY_WINDOW(foo.i32, "algo=lof")
+```
+
+### 参考文献
+
+1. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93–104. doi:10.1145/335191.335388. ISBN 1-58113-217-4.
diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md
new file mode 100644
index 0000000000..d72b8e70a9
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md
@@ -0,0 +1,17 @@
+---
+title: "机器学习算法"
+sidebar_label: "机器学习算法"
+---
+
+Autoencoder[1]: TDgpt 内置使用自编码器(Autoencoder)的异常检测算法,对周期性的时间序列数据具有较好的检测结果。使用该模型需要针对输入时序数据进行预训练,同时将训练完成的模型保存在到服务目录 `ad_autoencoder` 中,然后在 SQL 语句中指定调用该算法模型即可使用。
+
+```SQL
+--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测
+SELECT COUNT(*), _WSTART
+FROM foo
+ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo');
+```
+
+### 参考文献
+
+1. https://en.wikipedia.org/wiki/Autoencoder
diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md
new file mode 100644
index 0000000000..632492ce72
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md
@@ -0,0 +1,69 @@
+---
+title: 异常检测算法
+description: 异常检测算法
+---
+
+import ad from '../pic/anomaly-detection.png';
+
+TDengine 中定义了异常(状态)窗口来提供异常检测服务。异常窗口可以视为一种特殊的**事件窗口(Event Window)**,即异常检测算法确定的连续异常时间序列数据所在的时间窗口。与普通事件窗口区别在于——时间窗口的起始时间和结束时间均是分析算法识别确定,不是用户给定的表达式进行判定。因此,在 `WHERE` 子句中使用 `ANOMALY_WINDOW` 关键词即可调用时序数据异常检测服务,同时窗口伪列(`_WSTART`, `_WEND`, `_WDURATION`)也能够像其他时间窗口一样用于描述异常窗口的起始时间(`_WSTART`)、结束时间(`_WEND`)、持续时间(`_WDURATION`)。例如:
+
+```SQL
+--- 使用异常检测算法 IQR 对输入列 col_val 进行异常检测。同时输出异常窗口的起始时间、结束时间、以及异常窗口内 col 列的和。
+SELECT _wstart, _wend, SUM(col)
+FROM foo
+ANOMALY_WINDOW(col_val, "algo=iqr");
+```
+
+如下图所示,Anode 将返回时序数据异常窗口 $[10:51:30, 10:53:40]$
+
+
+
+在此基础上,用户可以针对异常窗口内的时序数据进行查询聚合、变换处理等操作。
+
+### 语法
+
+```SQL
+ANOMALY_WINDOW(column_name, option_expr)
+
+option_expr: {"
+algo=expr1
+[,wncheck=1|0]
+[,expr2]
+"}
+```
+
+1. `column_name`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
+2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
+3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
+4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
+
+### 参数说明
+|参数|含义|默认值|
+|---|---|---|
+|algo|异常检测调用的算法|iqr|
+|wncheck|对输入数据列是否进行白噪声检查,取值为0或1|1|
+
+
+### 示例
+```SQL
+--- 使用 iqr 算法进行异常检测,检测列 i32 列。
+SELECT _wstart, _wend, SUM(i32)
+FROM foo
+ANOMALY_WINDOW(i32, "algo=iqr");
+
+--- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2,检测列 i32 列
+SELECT _wstart, _wend, SUM(i32)
+FROM foo
+ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
+
+taos> SELECT _wstart, _wend, count(*) FROM foo ANOMAYL_WINDOW(i32);
+ _wstart | _wend | count(*) |
+====================================================================
+ 2020-01-01 00:00:16.000 | 2020-01-01 00:00:17.000 | 2 |
+Query OK, 1 row(s) in set (0.028946s)
+```
+
+
+### 内置异常检测算法
+分析平台内置了6个异常检查模型,分为3个类别,分别是[基于统计学的算法](./02-statistics-approach.md)、[基于数据密度的算法](./03-data-density.md)、以及[基于机器学习的算法](./04-machine-learning.md)。在不指定异常检测使用的方法的情况下,默认调用 IQR 进行异常检测。
+
diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md
new file mode 100644
index 0000000000..954076c8fd
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md
@@ -0,0 +1,110 @@
+---
+title: "预测算法"
+sidebar_label: "预测算法"
+---
+
+### 输入约定
+`execute` 是预测算法处理的核心方法。框架调用该方法之前,在对象属性参数 `self.list` 中已经设置完毕用于预测的历史时间序列数据。
+
+### 输出约定及父类属性说明
+`execute` 方法执行完成后的返回一个如下字典对象, 预测返回结果如下:
+```python
+return {
+ "mse": mse, # 预测算法的拟合数据最小均方误差(minimum squared error)
+ "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
+}
+```
+
+
+预测算法的父类 `AbstractForecastService` 包含的对象属性如下:
+
+|属性名称|说明|默认值|
+|---|---|---|
+|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,设置为 0 即可| 0|
+|start_ts|预测结果的开始时间| 0|
+|time_step|预测结果的两个数据点之间时间间隔|0 |
+|fc_rows|预测结果的数量| 0 |
+|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
+|conf|置信区间分位数|95|
+
+
+
+### 示例代码
+下面我们开发一个示例预测算法,对于任何输入的时间序列数据,固定返回值 1 作为预测结果。
+
+```python
+import numpy as np
+from service import AbstractForecastService
+
+# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束
+class _MyForecastService(AbstractForecastService):
+ """ 定义类,从 AbstractForecastService 继承并实现其定义的抽象方法 execute """
+
+ # 定义算法调用关键词,全小写ASCII码
+ name = 'myfc'
+
+ # 该算法的描述信息(建议添加)
+ desc = """return the forecast time series data"""
+
+ def __init__(self):
+ """类初始化方法"""
+ super().__init__()
+
+ def execute(self):
+ """ 算法逻辑的核心实现"""
+ res = []
+
+ """这个预测算法固定返回 1 作为预测值,预测值的数量是用户通过 self.fc_rows 指定"""
+ ts_list = [self.start_ts + i * self.time_step for i in range(self.fc_rows)]
+ res.app(ts_list) # 设置预测结果时间戳列
+
+ """生成全部为 1 的预测结果 """
+ res_list = [1] * self.fc_rows
+ res.append(res_list)
+
+ """检查用户输入,是否要求返回预测置信区间上下界"""
+ if self.return_conf:
+ """对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可"""
+ bound_list = [1] * self.fc_rows
+ res.append(bound_list) # 预测结果置信区间下界
+ res.append(bound_list) # 预测结果执行区间上界
+
+ """返回结果"""
+ return { "res": res, "mse": 0}
+
+
+ def set_params(self, params):
+ """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
+ pass
+```
+
+将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口中执行 `SHOW ANODES FULL` 能够看到新加入的算法。应用就可以通过 SQL 语句调用该预测算法。
+
+```SQL
+--- 对 col 列进行异常检测,通过指定 algo 参数为 myfc 来调用新添加的预测类
+SELECT _flow, _fhigh, _frowts, FORECAST(col_name, "algo=myfc")
+FROM foo;
+```
+
+如果是第一次启动该 Anode, 请按照 [TDgpt 安装部署](../../management/) 里的步骤先将该 Anode 添加到 TDengine 系统中。
+
+### 单元测试
+
+在测试目录`taosanalytics/test`中的 forecast_test.py 中增加单元测试用例或添加新的测试文件。单元测试依赖 Python Unit test 包。
+
+```python
+def test_myfc(self):
+ """ 测试 myfc 类 """
+ s = loader.get_service("myfc")
+
+ # 设置用于预测分析的数据
+ s.set_input_list(self.get_input_list())
+ # 检查预测结果应该全部为 1
+ r = s.set_params(
+ {"fc_rows": 10, "start_ts": 171000000, "time_step": 86400 * 30, "start_p": 0}
+ )
+ r = s.execute()
+
+ expected_list = [1] * 10
+ self.assertEqlist(r["res"][0], expected_list)
+```
diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md
new file mode 100644
index 0000000000..dc0a534706
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md
@@ -0,0 +1,76 @@
+---
+title: "异常检测"
+sidebar_label: "异常检测"
+---
+
+### 输入约定
+`execute` 是算法处理的核心方法。框架调用该方法之前,在对象属性参数 `self.list` 中已经设置完毕用于异常检测的时间序列数据。
+
+### 输出约定
+`execute` 方法执行完成后的返回值是长度与 `self.list` 相同的数组,数组位置 -1 的标识异常值点。
+> 例如:对于输入测量值序列 $[2, 2, 2, 2, 100]$, 假设 100 是异常点,那么方法返回的结果数组则为 $[1, 1, 1, 1, -1]$。
+
+
+### 示例代码
+下面我们开发一个示例异常检测算法,在异常检测中,将输入时间序列值的最后一个值设置为异常值,并返回结果。
+
+```python
+import numpy as np
+from service import AbstractAnomalyDetectionService
+
+# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束
+class _MyAnomalyDetectionService(AbstractAnomalyDetectionService):
+ """ 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象方法 """
+
+ # 定义算法调用关键词,全小写ASCII码
+ name = 'myad'
+
+ # 该算法的描述信息(建议添加)
+ desc = """return the last value as the anomaly data"""
+
+ def __init__(self):
+ """类初始化方法"""
+ super().__init__()
+
+ def execute(self):
+ """ 算法逻辑的核心实现"""
+
+ """创建一个长度为 len(self.list),全部值为 1 的结果数组,然后将最后一个值设置为 -1,表示最后一个值是异常值"""
+ res = [1] * len(self.list)
+ res[-1] = -1
+
+ """返回结果数组"""
+ return res
+
+
+ def set_params(self, params):
+ """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
+ pass
+```
+
+将该文件保存在 `./taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后应用就可以通过 SQL 语句调用该检测算法。
+
+```SQL
+--- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类
+SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad')
+```
+如果是第一次启动该 Anode, 请按照 [TDgpt 安装部署](../../management/) 里的步骤先将该 Anode 添加到 TDengine 系统中。
+
+### 单元测试
+
+在测试目录`taosanalytics/test`中的 anomaly_test.py 中增加单元测试用例或添加新的测试文件。框架中使用了 Python Unit test 包。
+
+```python
+def test_myad(self):
+ """ 测试 _IqrService 类 """
+ s = loader.get_service("myad")
+
+ # 设置需要进行检测的输入数据
+ s.set_input_list(AnomalyDetectionTest.input_list)
+
+ r = s.execute()
+
+ # 最后一个点是异常点
+ self.assertEqual(r[-1], -1)
+ self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
+```
diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md
new file mode 100644
index 0000000000..b7f048cefc
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md
@@ -0,0 +1,82 @@
+---
+title: "算法开发者指南"
+sidebar_label: "算法开发者指南"
+---
+TDgpt 是一个可扩展的时序数据高级分析平台,用户遵循简易的步骤就能将自己开发的分析算法添加到分析平台, 各种应用就可以通过SQL语句直接调用, 让高级分析算法的使用门槛降到几乎为零。目前 TDpgt 平台只支持使用 Python 语言开发的分析算法。
+Anode 采用类动态加载模式,在启动的时候扫描特定目录内满足约定条件的所有代码文件,并将其加载到系统中。因此,开发者只需要遵循以下几步就能完成新算法的添加工作:
+1. 开发完成符合要求的分析算法类
+2. 将代码文件放入对应目录,然后重启 Anode
+3. 使用SQL命令"CREATE ANODE",将 Anode 添加到 TDengine
+
+此时就完成了新算法的添加工作,之后应用就可以直接使用SQL语句调用新算法。得益于 TDgpt 与 TDengine主进程 `taosd` 的松散耦合,Anode算法升级对 `taosd` 完全没有影响。应用系统只需要调整对应的SQL语句调用新(升级的)算法,就能够快速完成分析功能和分析算法的升级。
+
+这种方式能够按需扩展分析算法,极大地拓展 TDgpt 的适应范围,用户可以按需将更契合业务场景的、更准确的(预测、异常检测)分析算法动态嵌入到 TDgpt,并通过 SQL 语句进行调用。在基本不用更改应用系统代码的前提下,就能够快速完成分析功能的平滑升级。
+
+以下内容将说明如何将分析算法添加到 Anode 中并能够通过SQL语句进行调用。
+
+## 目录结构
+Anode的主要目录结构如下图所示
+
+```bash
+.
+├── cfg
+├── model
+│ └── ac_detection
+├── release
+├── script
+└── taosanalytics
+ ├── algo
+ │ ├── ad
+ │ └── fc
+ ├── misc
+ └── test
+
+```
+
+|目录|说明|
+|---|---|
+|taosanalytics| 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 保存异常检测算法代码,fc 目录保存预测算法代码|
+|script|是安装脚本和发布脚本放置目录|
+|model|放置针对数据集完成的训练模型|
+|cfg|配置文件目录|
+
+## 约定与限制
+
+- 异常检测算法的 Python 代码文件需放在 `./taos/algo/ad` 目录中
+- 预测算法 Python 代码文件需要放在 `./taos/algo/fc` 目录中
+
+
+### 类命名规范
+
+Anode采用算法自动加载模式,因此只识别符合命名约定的 Python 类。需要加载的算法类名称需要以下划线 `_` 开始并以 `Service` 结尾。例如:`_KsigmaService` 是 KSigma 异常检测算法类。
+
+### 类继承约定
+
+- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
+- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
+
+### 类属性初始化
+实现的类需要初始化以下两个类属性:
+
+- `name`:识别该算法的关键词,全小写英文字母。通过 `SHOW` 命令查看可用算法显示的名称即为该名称。
+- `desc`:算法的基础描述信息
+
+```SQL
+--- algo 后面的参数 name 即为类属性 `name`
+SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=name')
+```
+
+## 添加具有模型的分析算法
+
+基于统计学的分析算法可以直接针对输入时间序列数据进行分析,但是某些深度学习算法对于输入数据需要较长的时间训练,并且生成相应的模型。这种情况下,同一个分析算法对应不同的输入数据集有不同的分析模型。
+将具有模型的分析算法添加到 Anode 中,首先需要在 `model` 目录中建立该算法对应的目录(目录名称可自拟),将采用该算法针对不同的输入时间序列数据生成的训练模型均需要保存在该目录下,同时目录名称要在分析算法中确定,以便能够固定加载该目录下的分析模型。为了确保模型能够正常读取加载,存储的模型使用`joblib`库进行序列化保存。
+
+下面以自编码器(Autoencoder)为例,说明如何添加要预先训练的模型进行异常检测。
+首先我们在`model`目录中创建一个目录 -- `ad_detection`,该目录将用来保存所有使用自编码器训练的模型。然后,我们使用自编码器对 foo 表的时间序列数据进行训练,得到模型 ad_autoencoder_foo,使用 `joblib`序列化以后保存在`ad_detection` 目录中。
+
+使用 SQL 调用已经保存的模型,需要在调用参数中指定模型名称``model=ad_autoencoder_foo`,而 `algo=encoder` 是确定调用的自编码器生成的模型(这里的`encoder`说明调用的是自编码器算法模型,该名称是添加算法的时候在代码中定义)以便能够调用该模型。
+
+```SQL
+--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测
+SELECT COUNT(*), _WSTART FROM foo ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo');
+```
diff --git a/docs/zh/06-advanced/06-TDgpt/index.md b/docs/zh/06-advanced/06-TDgpt/index.md
new file mode 100644
index 0000000000..3f650b196b
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/index.md
@@ -0,0 +1,25 @@
+---
+sidebar_label: TDgpt
+title: TDgpt
+---
+
+import TDgpt from './pic/data-analysis.png';
+
+
+TDgpt 是 TDengine Enterprise 中针对时序数据提供高级分析功能的企业级组件,通过内置接口向 TDengine 提供运行时动态扩展的时序数据分析服务。TDgpt 能够独立于 TDengine 主进程部署和运行,因此可避免消耗占用 TDengine 集群的主进程资源。
+TDgpt 具有服务无状态、功能易扩展、快速弹性部署、应用轻量化、高安全性等优势。
+TDgpt 运行在集群中的 AI Node (Anode)中,集群中可以部署若干个 Anode 节点,不同的 Anode 节点之间无同步依赖或协同的要求。Anode 注册到 TDengine 集群以后,立即就可以提供服务。TDgpt 提供的高级时序数据分析服务可分为时序数据异常检测和时序数据预测分析两大类。
+
+下图是部署 TDgpt 的 TDengine 集群示意图。
+
+
+在查询处理过程中,Vnode中运行的查询引擎会根据查询处理物理执行计划,按需向 Anode 请求高级时序数据分析服务。因此用户可通过 SQL 语句与 Anode 节点交互并使用其提供的全部分析服务。需要注意的是 Anode 不直接接受用户的数据分析请求。同时 Anode 具备分析算法动态注册机制,其算法扩展过程完全不影响 TDengine 集群的服务,仅在非常小的(秒级)时间窗口内影响涉及高级分析的查询服务。
+
+目前 TDgpt 提供如下的高级分析服务:
+- 时序数据异常检测。TDengine 中定义了新的时间窗口——异常(状态)窗口——来提供异常检测服务。异常窗口可以视为一种特殊的**事件窗口(Event Window)**,即异常检测算法确定的连续异常时间序列数据所在的时间窗口。与普通事件窗口区别在于——时间窗口的起始时间和结束时间均是分析算法确定,不是用户指定的表达式判定。异常窗口使用方式与其他类型的时间窗口(例如状态窗口、会话窗口等)类似。因此时间窗口内可使用的查询操作均可应用在异常窗口上。
+- 时序数据预测。定义了一个新函数`FORECAST`,基于输入的(历史)时间序列数据调用指定(或默认)预测算法给出输入时序数据后续时间序列的**预测**数据。
+
+TDgpt 还为算法开发者提供了一 SDK。任何开发者只需要按照[算法开发者指南](./dev)的步骤,就可以将自己独有的时序数据预测或时序数据异常检测算法无缝集成到 TDgpt, 这样 TDengine 用户就可以通过一条 SQL 获得时序数据预测结果或是异常窗口了, 大幅降低了用户使用新的时序数据分析算法的门槛,而且让 TDengine 成为一开放的系统。
+
+
+
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/activity.png b/docs/zh/06-advanced/06-TDgpt/pic/activity.png
new file mode 100644
index 0000000000..2d2a403de5
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/activity.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/anomaly-detection.png b/docs/zh/06-advanced/06-TDgpt/pic/anomaly-detection.png
new file mode 100644
index 0000000000..f198ce88b7
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/anomaly-detection.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/data-analysis.png b/docs/zh/06-advanced/06-TDgpt/pic/data-analysis.png
new file mode 100755
index 0000000000..baeb51ad5c
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/data-analysis.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/white-noise-data.png b/docs/zh/06-advanced/06-TDgpt/pic/white-noise-data.png
new file mode 100644
index 0000000000..754c6f3d38
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/white-noise-data.png differ
diff --git a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md
deleted file mode 100644
index bdfa455ae3..0000000000
--- a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-title: "Anomaly-detection"
-sidebar_label: "Anomaly-detection"
----
-
-本节讲述异常检测算法模型的使用方法。
-
-## 概述
-分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。
-
-
-### 统计学异常检测方法
-
-- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点.
-
-|参数|说明|是否必选|默认值|
-|---|---|---|---|
-|k|标准差倍数|选填|3|
-
-
-- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5。无输入参数。
-
-- Grubbs[3]: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
-
-- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5%
-
-|参数|说明|是否必选|默认值|
-|---|---|---|---|
-|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
-
-
-### 基于数据密度的检测方法
-LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。
-
-
-### 基于深度学习的检测方法
-使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。
-
-
-### 参考文献
-1. https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
-2. https://en.wikipedia.org/wiki/Interquartile_range
-3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948.
-4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017).
-5. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93–104. doi:10.1145/335191.335388. ISBN 1-58113-217-4.
-
diff --git a/docs/zh/06-advanced/06-data-analysis/addins.md b/docs/zh/06-advanced/06-data-analysis/addins.md
deleted file mode 100644
index c0b8921718..0000000000
--- a/docs/zh/06-advanced/06-data-analysis/addins.md
+++ /dev/null
@@ -1,170 +0,0 @@
----
-title: "addins"
-sidebar_label: "addins"
----
-
-本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。
-
-## 目录结构
-
-
-
-|目录|说明|
-|---|---|
-|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码,fc 放置预测算法代码|
-|script|是安装脚本和发布脚本放置目录|
-|model|放置针对数据集完成的训练模型|
-|cfg|配置文件目录|
-
-## 约定与限制
-
-定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
-
-
-### 类命名规范
-
-算法类的名称需要以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。
-
-### 类继承约定
-
-- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
-- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
-
-### 类属性初始化
-每个算法实现的类需要静态初始化两个类属性,分别是:
-
-- `name`:触发调用的关键词,全小写英文字母
-- `desc`:算法的描述信息
-
-### 核心方法输入与输出约定
-
-`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。
-
-异常检测输出结果
-
-`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
-
-预测输出结果
-
-对于预测算法,`AbstractForecastService` 的对象属性说明如下:
-
-|属性名称|说明|默认值|
-|---|---|---|
-|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
-|start_ts|预测结果的开始时间| 0|
-|time_step|预测结果的两个数据点之间时间间隔|0 |
-|fc_rows|预测结果的数量| 0 |
-|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
-|conf|置信区间分位数 0.05|
-
-
-预测返回结果如下:
-```python
-return {
- "rows": self.fc_rows, # 预测数据行数
- "period": self.period, # 数据周期性,同输入
- "algo": "holtwinters", # 预测使用的算法
- "mse": mse, # 预测算法的 mse
- "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
-}
-```
-
-
-## 示例代码
-
-```python
-import numpy as np
-from service import AbstractAnomalyDetectionService
-
-# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。
-class _IqrService(AbstractAnomalyDetectionService):
- """ IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """
-
- # 定义算法调用关键词,全小写ASCII码(必须添加)
- name = 'iqr'
-
- # 该算法的描述信息(建议添加)
- desc = """found the anomaly data according to the inter-quartile range"""
-
- def __init__(self):
- super().__init__()
-
- def execute(self):
- """ execute 是算法实现逻辑的核心实现,直接修改该实现即可 """
-
- # self.list 是输入数值列,list 类型,例如:[1,2,3,4,5]。设置 self.list 的方法在父类中已经进行了定义。实现自己的算法,修改该文件即可,以下代码使用自己的实现替换即可。
- #lower = np.quantile(self.list, 0.25)
- #upper = np.quantile(self.list, 0.75)
-
- #min_val = lower - 1.5 * (upper - lower)
- #max_val = upper + 1.5 * (upper - lower)
- #threshold = [min_val, max_val]
-
- # 返回值是与输入数值列长度相同的数据列,异常值对应位置是 -1。例如上述输入数据列,返回数值列是 [1, 1, 1, 1, -1],表示 [5] 是异常值。
- return [-1 if k < threshold[0] or k > threshold[1] else 1 for k in self.list]
-
-
- def set_params(self, params):
- """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
- pass
-```
-
-
-## 单元测试
-
-在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
-
-```python
-def test_iqr(self):
- """ 测试 _IqrService 类 """
- s = loader.get_service("iqr")
-
- # 设置需要进行检测的输入数据
- s.set_input_list(AnomalyDetectionTest.input_list)
-
- # 测试 set_params 的处理逻辑
- try:
- s.set_params({"k": 2})
- except ValueError as e:
- self.assertEqual(1, 0)
-
- r = s.execute()
-
- # 绘制异常检测结果
- draw_ad_results(AnomalyDetectionTest.input_list, r, "iqr")
-
- # 检查结果
- self.assertEqual(r[-1], -1)
- self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
-```
-
-## 需要模型的算法
-
-针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
-
-训练完成后的模型,使用 joblib 进行保存。
-
-并在 model 目录下建立对应的文件夹存放该模型。
-
-保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。
-
-具体的调用方式如下:
-
-```python
-def test_autoencoder_ad(self):
- # 获取特定的算法服务
- s = loader.get_service("ac")
- data = self.__load_remote_data_for_ad()
-
- # 设置异常检查的输入数据
- s.set_input_list(data)
-
- # 指定调用的模型,该模型是之前针对该数据集进行训练获得
- s.set_params({"model": "ad_encoder_keras"})
- # 执行检查动作,并返回结果
- r = s.execute()
-
- num_of_error = -(sum(filter(lambda x: x == -1, r)))
- self.assertEqual(num_of_error, 109)
-```
-
diff --git a/docs/zh/06-advanced/06-data-analysis/index.md b/docs/zh/06-advanced/06-data-analysis/index.md
deleted file mode 100644
index 2cbea1caba..0000000000
--- a/docs/zh/06-advanced/06-data-analysis/index.md
+++ /dev/null
@@ -1,322 +0,0 @@
----
-sidebar_label: 数据分析
-title: 数据分析功能
----
-
-## 概述
-
-ANode(Analysis Node)是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。
-ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
-下图是数据分析的技术架构示意图。
-
-
-
-## 安装部署
-### 环境准备
-ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip,同时请确保能够正常连接互联网。
-
-### 安装及卸载
-使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。
-
-```bash
-tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
-cd TDengine-enterprise-anode-1.0.0
-sudo ./install.sh
-```
-
-卸载 ANode,执行命令 `rmtaosanode` 即可。
-
-### 其他
-为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。
-
-## 启动及停止服务
-安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
-
-```bash
-systemctl start taosanoded
-systemctl stop taosanoded
-systemctl status taosanoded
-```
-
-## 目录及配置说明
-|目录/文件|说明|
-|---------------|------|
-|/usr/local/taos/taosanode/bin|可执行文件目录|
-|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
-|/usr/local/taos/taosanode/lib|库文件目录|
-|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
-|/var/log/taos/taosanode/|日志文件目录|
-|/etc/taos/taosanode.ini|配置文件|
-
-### 配置说明
-
-Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下:
-
-```ini
-[uwsgi]
-# charset
-env = LC_ALL = en_US.UTF-8
-
-# ip:port
-http = 127.0.0.1:6050
-
-# the local unix socket file than communicate to Nginx
-#socket = 127.0.0.1:8001
-#socket-timeout = 10
-
-# base directory
-chdir = /usr/local/taos/taosanode/lib
-
-# initialize python file
-wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
-
-# call module of uWSGI
-callable = app
-
-# auto remove unix Socket and pid file when stopping
-vacuum = true
-
-# socket exec model
-#chmod-socket = 664
-
-# uWSGI pid
-uid = root
-
-# uWSGI gid
-gid = root
-
-# main process
-master = true
-
-# the number of worker processes
-processes = 2
-
-# pid file
-pidfile = /usr/local/taos/taosanode/taosanode.pid
-
-# enable threads
-enable-threads = true
-
-# the number of threads for each process
-threads = 4
-
-# memory useage report
-memory-report = true
-
-# smooth restart
-reload-mercy = 10
-
-# conflict with systemctl, so do NOT uncomment this
-# daemonize = /var/log/taos/taosanode/taosanode.log
-
-# log directory
-logto = /var/log/taos/taosanode/taosanode.log
-
-# wWSGI monitor port
-stats = 127.0.0.1:8387
-
-# python virtual environment directory
-virtualenv = /usr/local/taos/taosanode/venv/
-
-[taosanode]
-# default app log file
-app-log = /var/log/taos/taosanode/taosanode.app.log
-
-# model storage directory
-model-dir = /usr/local/taos/taosanode/model/
-
-# default log level
-log-level = DEBUG
-
-# draw the query results
-draw-result = 0
-```
-
-**提示**
-请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。
-
-
-## ANode 基本操作
-### 管理 ANode
-#### 创建 ANode
-```sql
-CREATE ANODE {node_url}
-```
-node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
-
-#### 查看 ANode
-列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。
-```sql
-SHOW ANODES;
-```
-
-#### 查看提供的时序数据分析服务
-
-```SQL
-SHOW ANODES FULL;
-```
-
-#### 强制刷新集群中的分析算法缓存
-```SQL
-UPDATE ANODE {node_id}
-UPDATE ALL ANODES
-```
-
-#### 删除 ANode
-```sql
-DROP ANODE {anode_id}
-```
-删除 ANode 只是将 ANode 从 TDengine 集群中删除,管理 ANode 的启停仍然需要使用`systemctl`命令。
-
-### 时序数据分析功能
-
-#### 白噪声检查
-
-分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列(White Noise Data, WND)和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
-如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
-同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
-
-
-#### 数据重采样和时间戳对齐
-
-分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
-
-- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
-- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。
-
-需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
-
-
-#### 时序数据异常检测
-异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
-异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
-对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
-
-
-##### 语法
-
-```SQL
-ANOMALY_WINDOW(column_name, option_expr)
-
-option_expr: {"
-algo=expr1
-[,wncheck=1|0]
-[,expr2]
-"}
-```
-
-1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
-2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
-3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
-4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
-
-**参数说明**
-|参数|含义|默认值|
-|---|---|---|
-|algo|异常检测调用的算法|iqr|
-|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查|
-
-异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
-1. `_WSTART`: 异常窗口开始时间戳
-2. `_WEND`:异常窗口结束时间戳
-3. `_WDURATION`:异常窗口持续时间
-
-**示例**
-```SQL
---- 使用 iqr 算法进行异常检测,检测列 i32 列。
-SELECT _wstart, _wend, SUM(i32)
-FROM ai.atb
-ANOMALY_WINDOW(i32, "algo=iqr");
-
---- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2,检测列 i32 列
-SELECT _wstart, _wend, SUM(i32)
-FROM ai.atb
-ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
-```
-
-```
-taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
- _wstart | _wend | count(*) |
-====================================================================
- 2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
-Query OK, 1 row(s) in set (0.028946s)
-```
-
-
-**可用异常检测算法**
-- iqr
-- ksigma
-- grubbs
-- lof
-- shesd
-- tac
-
-
-#### 时序数据预测
-数据预测以一段训练数据作为输入,预测接下来一个连续时间区间内,时序数据的趋势。
-
-##### 语法
-```SQL
-FORECAST(column_expr, option_expr)
-
-option_expr: {"
-algo=expr1
-[,wncheck=1|0]
-[,conf=conf_val]
-[,every=every_val]
-[,rows=rows_val]
-[,start=start_ts_val]
-[,expr2]
-"}
-
-```
-1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。
-2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
-
-**参数说明**
-
-|参数|含义|默认值|
-|---|---|---|
-|algo|预测分析使用的算法|holtwinters|
-|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
-|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
-|every|预测数据的采样间隔|输入数据的采样间隔|
-|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
-|rows|预测结果的记录数|10|
-
-1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
-2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
-3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
-4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
-
-**示例**
-
-```SQL
---- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
-SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
-FROM ai.ftb;
-
---- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%.
-SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
-FROM ai.ftb;
-```
-```
-taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
- _flow | _fhigh | _frowts | forecast(i32) |
-========================================================================================
- 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
- -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 |
- -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 |
- -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 |
- -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 |
- -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 |
- -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 |
- -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 |
- -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 |
- -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 |
-```
-
-
-**可用预测算法**
-- arima
-- holtwinters
diff --git a/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png b/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png
deleted file mode 100644
index 44fd82832f..0000000000
Binary files a/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png and /dev/null differ
diff --git a/docs/zh/06-advanced/06-data-analysis/pic/dir.png b/docs/zh/06-advanced/06-data-analysis/pic/dir.png
deleted file mode 100644
index d5aafb4427..0000000000
Binary files a/docs/zh/06-advanced/06-data-analysis/pic/dir.png and /dev/null differ
diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md
index a91a764c67..8b272bdfff 100644
--- a/docs/zh/07-develop/07-tmq.md
+++ b/docs/zh/07-develop/07-tmq.md
@@ -16,7 +16,7 @@ TDengine 提供了类似于消息队列产品的数据订阅和消费接口。
**注意**
在 TDengine 连接器实现中,对于订阅查询,有以下限制。
-- 查询语句限制:订阅查询只能使用 select 语句,不支持其他类型的SQL,如 insert、update 或 delete 等。
+- 查询语句限制:订阅查询只能使用 select 语句,并不支持其他类型的SQL,如订阅库,订阅超级表(非 select 方式),insert、update 或 delete 等。
- 原始始数据查询:订阅查询只能查询原始数据,而不能查询聚合或计算结果。
- 时间顺序限制:订阅查询只能按照时间正序查询数据。
diff --git a/docs/zh/08-operation/06-gui.md b/docs/zh/08-operation/06-gui.md
index bb99b2cf43..cdf19b3ac1 100644
--- a/docs/zh/08-operation/06-gui.md
+++ b/docs/zh/08-operation/06-gui.md
@@ -165,6 +165,10 @@ toc_max_heading_level: 4
第一步 填写添加新主题需要的信息,点击“创建”按钮;

+如上图,您可以选择是否 “同步 meta”。如果同步 meta 信息,则可以订阅到 meta 信息,比如增加或者删除超级表。
+
+您需要根据使用场景来选择是否开启,如果您引用 taos 连接器编写业务代码订阅 topic,则不能开启“同步 meta”,只能订阅数据;如果您创建 topic 在 explorer 配置同步任务使用,则可以开启“同步 meta”。
+
第二步 页面出现以下记录,则证明创建成功。

diff --git a/docs/zh/08-operation/16-security.md b/docs/zh/08-operation/16-security.md
index 4f47a644f7..e3cd72d9dc 100644
--- a/docs/zh/08-operation/16-security.md
+++ b/docs/zh/08-operation/16-security.md
@@ -26,6 +26,22 @@ SHOW USERS;
```sql
ALTER USER TEST DROP HOST HOST_NAME1
```
+说明
+- 开源版和企业版本都能添加成功,且可以查询到,但是开源版本不会对 IP 做任何限制。
+- create user u_write pass 'taosdata1' host 'iprange1','iprange2', 可以一次添加多个 iprange, 服务端会做去重,去重的逻辑是需要 iprange 完全一样
+- 默认会把 127.0.0.1 添加到白名单列表,且在白名单列表可以查询
+- 集群的节点 IP 集合会自动添加到白名单列表,但是查询不到。
+- taosadaper 和 taosd 不在一个机器的时候,需要把 taosadaper IP 手动添加到 taosd 白名单列表中
+- 集群情况下,各个节点 enableWhiteList 成一样,或者全为 false,或者全为 true, 要不然集群无法启动
+- 白名单变更生效时间 1s,不超过 2s, 每次变更对收发性能有些微影响(多一次判断,可以忽略),变更完之后、影响忽略不计, 变更过程中对集群没有影响,对正在访问客户端也没有影响(假设这些客户端的 IP 包含在 white list 内)
+- 如果添加两个 ip range, 192.168.1.1/16(假设为 A), 192.168.1.1/24(假设为 B), 严格来说,A 包含了 B,但是考虑情况太复杂,并不会对 A 和 B 做合并
+- 要删除的时候,必须严格匹配。 也就是如果添加的是 192.168.1.1/24, 要删除也是 192.168.1.1/24
+- 只有 root 才有权限对其他用户增删 ip white list
+- 兼容之前的版本,但是不支持从当前版本回退到之前版本
+- x.x.x.x/32 和 x.x.x.x 属于同一个 iprange, 显示为 x.x.x.x
+- 如果客户端拿到的 0.0.0.0/0, 说明没有开启白名单
+- 如果白名单发生了改变, 客户端会在 heartbeat 里检测到。
+- 针对一个 user, 添加的 IP 个数上限是 2048
## 审计日志
diff --git a/docs/zh/08-operation/19-debug.md b/docs/zh/08-operation/19-debug.md
new file mode 100644
index 0000000000..ebab9410d6
--- /dev/null
+++ b/docs/zh/08-operation/19-debug.md
@@ -0,0 +1,14 @@
+---
+sidebar_label: 分析调试
+title: 分析调试
+toc_max_heading_level: 4
+---
+为了更好的分析调试 TDengine ,推荐开发者在操作系统中安装以下分析调试工具:
+## gdb
+GDB(GNU Debugger)是一个功能强大的命令行调试器,广泛用于调试 C、C++ 和其他编程语言的程序。
+## valgrind
+valgrind 是一个用于内存调试、内存泄漏检测和性能分析的工具框架。Valgrind 提供了一组工具,帮助开发者检测和修复程序中的内存错误、线程错误和性能问题。
+## bpftrace
+bpftrace 是一个高级的动态跟踪工具,基于 eBPF(Extended Berkeley Packet Filter)技术,用于在 Linux 系统上进行性能分析和故障排除。
+## perf
+perf 是一个强大的 Linux 性能分析工具。它提供了对系统和应用程序的详细性能分析,帮助开发者和系统管理员识别和解决性能瓶颈。
\ No newline at end of file
diff --git a/docs/zh/08-operation/pic/topic-03-addTopicWizard.jpeg b/docs/zh/08-operation/pic/topic-03-addTopicWizard.jpeg
index e227d161ce..841c0e95ee 100644
Binary files a/docs/zh/08-operation/pic/topic-03-addTopicWizard.jpeg and b/docs/zh/08-operation/pic/topic-03-addTopicWizard.jpeg differ
diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md
index 89a97b108d..101058c2a8 100644
--- a/docs/zh/14-reference/01-components/01-taosd.md
+++ b/docs/zh/14-reference/01-components/01-taosd.md
@@ -26,42 +26,66 @@ taosd 命令行参数如下
:::
### 连接相关
-
-| 参数名称 | 参数说明 |
-| :--------------------: | :-------------------------------------------------------------------------------------: |
-| firstEp | taosd 启动时,主动连接的集群中首个 dnode 的 end point,缺省值:localhost:6030 |
-| secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,缺省值:无 |
-| fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname |
-| serverPort | 启动 taosd 后所监听的端口,缺省值:6030 |
-| numOfRpcSessions | 允许一个 dnode 能发起的最大连接数,取值范围 100-100000,缺省值:30000 |
-| timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值:500000 |
+|参数名称|支持版本|参数含义|
+|-----------------------|----------|-|
+|firstEp | |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030|
+|secondEp | |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值|
+|fqdn | |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname|
+|serverPort | |taosd 监听的端口,默认值 6030|
+|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1|
+|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3|
+|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000|
+|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半|
+|numOfTaskQueueThreads | |dnode 处理 RPC 消息的线程数|
+|statusInterval | |dnode 与 mnode 之间的心跳间隔|
+|rpcQueueMemoryAllowed | |dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10 |
+|resolveFQDNRetryTime | |FQDN 解析失败时的重试次数|
+|timeToGetAvailableConn | |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000|
+|maxShellConns | |允许创建的最大链接数|
+|maxRetryWaitTime | |重连最大超时时间|
+|shareConnLimit |3.3.4.3 后|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10|
+|readTimeout |3.3.4.3 后|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
### 监控相关
-| 参数名称 | 参数说明 |
-| :----------------: | :------------------------------------------------------------------------------------: |
-| monitor | 是否收集监控数据并上报,0: 关闭;1:打开;缺省值:0 |
-| monitorFqdn | taosKeeper 服务所在服务器的 FQDN,缺省值:无 |
-| monitorPort | taosKeeper 服务所监听的端口号,缺省值:6043 |
-| monitorInternal | 监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,缺省值:30 |
-| telemetryReporting | 是否上传 telemetry,0: 不上传,1:上传,缺省值:1 |
-| crashReporting | 是否上传 crash 信息;0: 不上传,1: 上传;缺省值: 1 |
+|参数名称|支持版本|参数含义|
+|-----------------------|----------|-|
+|monitor | |是否收集监控数据并上报,0:关闭;1:打开;默认值 0|
+|monitorFqdn | |taosKeeper 服务所在服务器的 FQDN,默认值 无|
+|monitorPort | |taosKeeper 服务所监听的端口号,默认值 6043|
+|monitorInterval | |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30|
+|monitorMaxLogs | |缓存的待上报日志条数|
+|monitorComp | |是否采用压缩方式上报监控日志时|
+|monitorLogProtocol | |是否打印监控日志|
+|monitorForceV2 | |是否使用 V2 版本协议上报|
+|telemetryReporting | |是否上传 telemetry,0:不上传,1:上传,默认值 1|
+|telemetryServer | |telemetry 服务器地址|
+|telemetryPort | |telemetry 服务器端口编号|
+|telemetryInterval | |telemetry 上传时间间隔,单位为秒,默认 43200|
+|crashReporting | |是否上传 crash 信息;0:不上传,1:上传;默认值 1|
### 查询相关
-
-| 参数名称 | 参数说明 |
-| :--------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| queryPolicy | 查询策略,1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 ;4: 使用客户端聚合模式;缺省值:1 |
-| maxNumOfDistinctRes | 允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿 |
-| countAlwaysReturnValue | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值,0: 返回空行,1: 返回;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了TSMA时, 且相应的组或窗口内数据为空或者NULL, 对应的组或窗口将不返回查询结果. 注意此参数客户端和服务端值应保持一致. |
-
+|参数名称|支持版本|参数含义|
+|------------------------|----------|-|
+|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
+|tagFilterCache | |是否缓存标签过滤结果|
+|maxNumOfDistinctRes | |允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿|
+|queryBufferSize | |暂不生效|
+|queryRspPolicy | |查询响应策略|
+|filterScalarMode | |强制使用标量过滤模式,0:关闭;1:开启,默认值 0|
+|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
+|queryNodeChunkSize | |内部参数,查询计划的块大小|
+|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
+|queryMaxConcurrentTables| |内部参数,查询计划的并发数目|
+|queryRsmaTolerance | |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
+|enableQueryHb | |内部参数,是否发送查询心跳消息|
+|pqSortMemThreshold | |内部参数,排序使用的内存阈值|
### 区域相关
-
-| 参数名称 | 参数说明 |
-| :------: | :------------------------------------------------------------------------------------------------------: |
-| timezone | 时区,缺省值:当前服务器所配置的时区 |
-| locale | 系统区位信息及编码格式 ,缺省值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过 API 设置 |
-| charset | 字符集编码,缺省值:系统自动获取 |
+|参数名称|支持版本|参数含义|
+|-----------------|----------|-|
+|timezone | |时区;缺省从系统中动态获取当前的时区设置|
+|locale | |系统区位信息及编码格式,缺省从系统中获取|
+|charset | |字符集编码,缺省从系统中获取|
:::info
1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
@@ -101,7 +125,7 @@ SELECT count(*) FROM table_name WHERE TS<1554984068000;
客户端的输入的字符均采用操作系统当前默认的编码格式,在 Linux/macOS 系统上多为 UTF-8,部分中文系统编码则可能是 GB18030 或 GBK 等。在 docker 环境中默认的编码是 POSIX。在中文版 Windows 系统中,编码则是 CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证 nchar 中的数据正确转换为 UCS4-LE 编码格式。
-在 Linux/macOS 中 locale 的命名规则为: \<语言>_\<地区>.\<字符集编码> 如:zh_CN.UTF-8,zh 代表中文,CN 代表大陆地区,UTF-8 表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux/macOS 可以通过设置 locale 来确定系统的字符编码,由于 Windows 使用的 locale 中不是 POSIX 标准的 locale 格式,因此在 Windows 下需要采用另一个配置参数 charset 来指定字符编码。在 Linux/macOS 中也可以使用 charset 来指定字符编码。
+在 Linux/macOS 中 locale 的命名规则为:\<语言>_\<地区>.\<字符集编码> 如:zh_CN.UTF-8,zh 代表中文,CN 代表大陆地区,UTF-8 表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux/macOS 可以通过设置 locale 来确定系统的字符编码,由于 Windows 使用的 locale 中不是 POSIX 标准的 locale 格式,因此在 Windows 下需要采用另一个配置参数 charset 来指定字符编码。在 Linux/macOS 中也可以使用 charset 来指定字符编码。
3. 如果配置文件中不设置 charset,在 Linux/macOS 中,taos 在启动时候,自动读取系统当前的 locale 信息,并从 locale 信息中解析提取 charset 编码格式。如果自动读取 locale 信息失败,则尝试读取 charset 配置,如果读取 charset 配置也失败,则中断启动过程。
@@ -139,73 +163,149 @@ charset 的有效值是 UTF-8。
:::
### 存储相关
-
-| 参数名称 | 参数说明 |
-| :--------------: | :--------------------------------------------------------------------: |
-| dataDir | 数据文件目录,所有的数据文件都将写入该目录,缺省值:/var/lib/taos |
-| tempDir | 指定所有系统运行过程中的临时文件生成的目录,缺省值:/tmp |
-| minimalTmpDirGB | tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值: 1 |
-| minimalDataDirGB | dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,缺省值: 2 |
+|参数名称|支持版本|参数含义|
+|--------------------|----------|-|
+|dataDir | |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos|
+|tempDir | |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp|
+|minimalDataDirGB | |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2|
+|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1|
+|minDiskFreeSize |3.1.1.0 后|当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数|
+|s3MigrateIntervalSec|3.3.4.3 后|本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数|
+|s3MigrateEnabled |3.3.4.3 后|是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数|
+|s3Accesskey |3.3.4.3 后|冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数|
+|s3Endpoint |3.3.4.3 后|用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数|
+|s3BucketName |3.3.4.3 后|存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数|
+|s3PageCacheSize |3.3.4.3 后|S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数|
+|s3UploadDelaySec |3.3.4.3 后|data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数|
+|cacheLazyLoadThreshold| |内部参数,缓存的装载策略|
### 集群相关
+|参数名称|支持版本|参数含义|
+|--------------------------|----------|-|
+|supportVnodes | |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5|
+|numOfCommitThreads | |落盘线程的最大数量,取值范围 0-1024,默认值为 4|
+|numOfMnodeReadThreads | |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
+|numOfVnodeQueryThreads | |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
+|numOfVnodeFetchThreads | |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
+|numOfVnodeRsmaThreads | |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
+|numOfQnodeQueryThreads | |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
+|numOfSnodeSharedThreads | |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
+|numOfSnodeUniqueThreads | |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
+|ratioOfVnodeStreamThreads | |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4|
+|ttlUnit | |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400|
+|ttlPushInterval | |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10|
+|ttlChangeOnWrite | |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0|
+|ttlBatchDropNum | |ttl 一批删除子表的数目,最小值为 0,默认值 10000|
+|retentionSpeedLimitMB | |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制|
+|maxTsmaNum | |集群内可创建的TSMA个数;取值范围 0-3;默认值 3|
+|tmqMaxTopicNum | |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20|
+|tmqRowSize | |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096|
+|audit | |审计功能开关;企业版参数|
+|auditInterval | |审计数据上报的时间间隔;企业版参数|
+|auditCreateTable | |是否针对创建子表开启申计功能;企业版参数|
+|encryptAlgorithm | |数据加密算法;企业版参数|
+|encryptScope | |加密范围;企业版参数|
+|enableWhiteList | |白名单功能开关;企业版参数|
+|syncLogBufferMemoryAllowed| |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
+|syncElectInterval | |内部参数,用于同步模块调试|
+|syncHeartbeatInterval | |内部参数,用于同步模块调试|
+|syncHeartbeatTimeout | |内部参数,用于同步模块调试|
+|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
+|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
+|arbHeartBeatIntervalSec | |内部参数,用于同步模块调试|
+|arbCheckSyncIntervalSec | |内部参数,用于同步模块调试|
+|arbSetAssignedTimeoutSec | |内部参数,用于同步模块调试|
+|mndSdbWriteDelta | |内部参数,用于 mnode 模块调试|
+|mndLogRetention | |内部参数,用于 mnode 模块调试|
+|skipGrant | |内部参数,用于授权检查|
+|trimVDbIntervalSec | |内部参数,用于删除过期数据|
+|ttlFlushThreshold | |内部参数,ttl 定时器的频率|
+|compactPullupInterval | |内部参数,数据重整定时器的频率|
+|walFsyncDataSizeLimit | |内部参数,WAL 进行 FSYNC 的阈值|
+|transPullupInterval | |内部参数,mnode 执行事务的重试间隔|
+|mqRebalanceInterval | |内部参数,消费者再平衡的时间间隔|
+|uptimeInterval | |内部参数,用于记录系统启动时间|
+|timeseriesThreshold | |内部参数,用于统计用量|
+|udf | |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 |
+|udfdResFuncs | |内部参数,用于 UDF 结果集设置|
+|udfdLdLibPath | |内部参数,表示 UDF 装载的库路径|
-| 参数名称 | 参数说明 |
-| :-----------: | :-------------------------------------------------------------------------: |
-| supportVnodes | dnode 支持的最大 vnode 数目,取值范围:0-4096,缺省值: CPU 核数的 2 倍 + 5 |
-### 内存相关
-| 参数名称 | 参数说明 |
-| :----------------: | :---------------------------------------------: |
-| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10 |
-| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
-
-### 性能调优
-
-| 参数名称 | 参数说明 |
-| :----------------: | :---------------------------------------------: |
-| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024,缺省值为 4 |
+### 流计算参数
+|参数名称|支持版本|参数含义|
+|-----------------------|----------|-|
+|disableStream | |流计算的启动开关|
+|streamBufferSize | |控制内存中窗口状态缓存的大小,默认值为 128MB|
+|streamAggCnt | |内部参数,并发进行聚合计算的数目|
+|checkpointInterval | |内部参数,checkponit 同步间隔|
+|concurrentCheckpoint | |内部参数,是否并发检查 checkpoint|
+|maxStreamBackendCache | |内部参数,流计算使用的最大缓存|
+|streamSinkDataRate | |内部参数,用于控制流计算结果的写入速度|
### 日志相关
+|参数名称|支持版本|参数含义|
+|----------------|----------|-|
+|logDir | |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos|
+|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1|
+|numOfLogLines | |单个日志文件允许的最大行数,默认值 10,000,000|
+|asyncLog | |日志写入模式,0:同步,1:异步,默认值 1|
+|logKeepDays | |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
+|slowLogThreshold|3.3.3.0 后|慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
+|slowLogMaxLen |3.3.3.0 后|慢查询日志最大长度,取值范围 1-16384,默认值 4096|
+|slowLogScope |3.3.3.0 后|慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
+|slowLogExceptDb |3.3.3.0 后|指定的数据库不上报慢查询,仅支持配置换一个数据库|
+|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
+|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
+|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
+|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
+|qDebugFlag | |query 模块的日志开关,取值范围同上|
+|dDebugFlag | |dnode 模块的日志开关,取值范围同上|
+|vDebugFlag | |vnode 模块的日志开关,取值范围同上|
+|mDebugFlag | |mnode 模块的日志开关,取值范围同上|
+|azDebugFlag |3.3.4.3 后|S3 模块的日志开关,取值范围同上|
+|sDebugFlag | |sync 模块的日志开关,取值范围同上|
+|tsdbDebugFlag | |tsdb 模块的日志开关,取值范围同上|
+|tqDebugFlag | |tq 模块的日志开关,取值范围同上|
+|fsDebugFlag | |fs 模块的日志开关,取值范围同上|
+|udfDebugFlag | |udf 模块的日志开关,取值范围同上|
+|smaDebugFlag | |sma 模块的日志开关,取值范围同上|
+|idxDebugFlag | |index 模块的日志开关,取值范围同上|
+|tdbDebugFlag | |tdb 模块的日志开关,取值范围同上|
+|metaDebugFlag | |meta 模块的日志开关,取值范围同上|
+|stDebugFlag | |stream 模块的日志开关,取值范围同上|
+|sndDebugFlag | |snode 模块的日志开关,取值范围同上|
-| 参数名称 | 参数说明 |
-| :--------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| logDir | 日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos |
-| minimalLogDirGB | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位GB,缺省值:1 |
-| numOfLogLines | 单个日志文件允许的最大行数,缺省值:10,000,000 |
-| asyncLog | 日志写入模式,0: 同步,1: 异步,缺省值: 1 |
-| logKeepDays | 日志文件的最长保存时间 ,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳,并滚动产生新的日志文件 |
-| slowLogThreshold | 慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值: 3 |
-| slowLogScope | 定启动记录哪些类型的慢查询,可选值:ALL, QUERY, INSERT, OHTERS, NONE; 默认值:ALL |
-| debugFlag | 运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志); 默认值:131 或 135 (取决于不同模块) |
-| tmrDebugFlag | 定时器模块的日志开关,取值范围同上 |
-| uDebugFlag | 共用功能模块的日志开关,取值范围同上 |
-| rpcDebugFlag | rpc 模块的日志开关,取值范围同上 |
-| cDebugFlag | 客户端模块的日志开关,取值范围同上 |
-| jniDebugFlag | jni 模块的日志开关,取值范围同上 |
-| qDebugFlag | query 模块的日志开关,取值范围同上 |
-| dDebugFlag | dnode 模块的日志开关,取值范围同上,缺省值 135 |
-| vDebugFlag | vnode 模块的日志开关,取值范围同上 |
-| mDebugFlag | mnode 模块的日志开关,取值范围同上 |
-| wDebugFlag | wal 模块的日志开关,取值范围同上 |
-| sDebugFlag | sync 模块的日志开关,取值范围同上 |
-| tsdbDebugFlag | tsdb 模块的日志开关,取值范围同上 |
-| tqDebugFlag | tq 模块的日志开关,取值范围同上 |
-| fsDebugFlag | fs 模块的日志开关,取值范围同上 |
-| udfDebugFlag | udf 模块的日志开关,取值范围同上 |
-| smaDebugFlag | sma 模块的日志开关,取值范围同上 |
-| idxDebugFlag | index 模块的日志开关,取值范围同上 |
-| tdbDebugFlag | tdb 模块的日志开关,取值范围同上 |
+### 调试相关
+|参数名称|支持版本|参数含义|
+|--------------------|----------|-|
+|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1|
+|configDir | |配置文件所在目录|
+|scriptDir | |内部测试工具的脚本目录|
+|assert | |断言控制开关,默认值 0|
+|randErrorChance | |内部参数,用于随机失败测试|
+|randErrorDivisor | |内部参数,用于随机失败测试|
+|randErrorScope | |内部参数,用于随机失败测试|
+|safetyCheckLevel | |内部参数,用于随机失败测试|
+|experimental | |内部参数,用于一些实验特性|
+|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
+|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
+|rsyncPort | |内部参数,用于调试流计算|
+|snodeAddress | |内部参数,用于调试流计算|
+|checkpointBackupDir | |内部参数,用于恢复 snode 数据|
+|enableAuditDelete | |内部参数,用于测试审计功能|
+|slowLogThresholdTest| |内部参数,用于测试慢日志|
+|bypassFlag |3.3.4.5 后|内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;默认值 0|
### 压缩参数
-
-| 参数名称 | 参数说明 |
-|:-------------:|:----------------------------------------------------------------:|
-| compressMsgSize | 是否对 RPC 消息进行压缩;-1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩;缺省值 -1 |
-| fPrecision | 设置 float 类型浮点数压缩精度 ,取值范围:0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断 |
-|dPrecision | 设置 double 类型浮点数压缩精度 , 取值范围:0.1 ~ 0.0000000000000001 , 缺省值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取 |
-|lossyColumn | 对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围: float, double, none;缺省值: none,表示关闭无损压缩。**注意:此参数在 3.3.0.0 及更高版本中不再使用** |
-|ifAdtFse | 在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法, FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法; 0: 关闭,1:打开;默认值为 0 |
-
+|参数名称|支持版本|参数含义|
+|------------|----------|-|
+|fPrecision | |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断|
+|dPrecision | |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取|
+|lossyColumn |3.3.0.0 前|对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩|
+|ifAdtFse | |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0|
+|maxRange | |内部参数,用于有损压缩设置|
+|curRange | |内部参数,用于有损压缩设置|
+|compressor | |内部参数,用于有损压缩设置|
**补充说明**
1. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本
@@ -220,16 +320,6 @@ lossyColumns float|double
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
```
-### 其他参数
-
-| 参数名称 | 参数说明 |
-| :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 |
-| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值为 0 |
-| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变;默认值为 0 |
-| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值为20 |
-| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值为 3 |
-
## taosd 监控指标
@@ -282,7 +372,7 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| :------------- | :-------- | :------ | :--------------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| tables\_num | DOUBLE | | vgroup 中 table 数量 |
-| status | DOUBLE | | vgroup 状态, 取值范围:unsynced = 0, ready = 1 |
+| status | DOUBLE | | vgroup 状态, 取值范围 unsynced = 0, ready = 1 |
| vgroup\_id | VARCHAR | TAG | vgroup id |
| database\_name | VARCHAR | TAG | vgroup 所属的 database 名字 |
| cluster\_id | VARCHAR | TAG | cluster id |
@@ -311,10 +401,10 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| io\_write\_disk | DOUBLE | | 磁盘 io 吞吐率,从 `/proc//io` 中读取的 write_bytes。单位 byte/s |
| vnodes\_num | DOUBLE | | dnode 上 vnodes 数量 |
| masters | DOUBLE | | dnode 上 master node 数量 |
-| has\_mnode | DOUBLE | | dnode 是否包含 mnode,取值范围:包含=1,不包含=0 |
-| has\_qnode | DOUBLE | | dnode 是否包含 qnode,取值范围:包含=1,不包含=0 |
-| has\_snode | DOUBLE | | dnode 是否包含 snode,取值范围:包含=1,不包含=0 |
-| has\_bnode | DOUBLE | | dnode 是否包含 bnode,取值范围:包含=1,不包含=0 |
+| has\_mnode | DOUBLE | | dnode 是否包含 mnode,取值范围 包含=1,不包含=0 |
+| has\_qnode | DOUBLE | | dnode 是否包含 qnode,取值范围 包含=1,不包含=0 |
+| has\_snode | DOUBLE | | dnode 是否包含 snode,取值范围 包含=1,不包含=0 |
+| has\_bnode | DOUBLE | | dnode 是否包含 bnode,取值范围 包含=1,不包含=0 |
| error\_log\_count | DOUBLE | | error 总数 |
| info\_log\_count | DOUBLE | | info 总数 |
| debug\_log\_count | DOUBLE | | debug 总数 |
@@ -330,7 +420,7 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| field | type | is\_tag | comment |
| :---------- | :-------- | :------ | :--------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
-| status | DOUBLE | | dnode 状态,取值范围:ready=1,offline =0 |
+| status | DOUBLE | | dnode 状态,取值范围 ready=1,offline =0 |
| dnode\_id | VARCHAR | TAG | dnode id |
| dnode\_ep | VARCHAR | TAG | dnode endpoint |
| cluster\_id | VARCHAR | TAG | cluster id |
@@ -373,7 +463,7 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| field | type | is\_tag | comment |
| :---------- | :-------- | :------ | :------------------------------------------------------------------------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
-| role | DOUBLE | | mnode 角色, 取值范围:offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
+| role | DOUBLE | | mnode 角色, 取值范围 offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
| mnode\_id | VARCHAR | TAG | master node id |
| mnode\_ep | VARCHAR | TAG | master node endpoint |
| cluster\_id | VARCHAR | TAG | cluster id |
@@ -385,7 +475,7 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| field | type | is\_tag | comment |
| :------------- | :-------- | :------ | :------------------------------------------------------------------------------------------------------ |
| \_ts | TIMESTAMP | | timestamp |
-| vnode\_role | DOUBLE | | vnode 角色,取值范围:offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
+| vnode\_role | DOUBLE | | vnode 角色,取值范围 offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104 |
| vgroup\_id | VARCHAR | TAG | dnode id |
| dnode\_id | VARCHAR | TAG | dnode id |
| database\_name | VARCHAR | TAG | vgroup 所属的 database 名字 |
@@ -399,9 +489,9 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| :---------- | :-------- | :------ | :--------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| count | DOUBLE | | sql 数量 |
-| result | VARCHAR | TAG | sql的执行结果,取值范围:Success, Failed |
+| result | VARCHAR | TAG | sql的执行结果,取值范围 Success, Failed |
| username | VARCHAR | TAG | 执行sql的user name |
-| sql\_type | VARCHAR | TAG | sql类型,取值范围:inserted_rows |
+| sql\_type | VARCHAR | TAG | sql类型,取值范围 inserted_rows |
| dnode\_id | VARCHAR | TAG | dnode id |
| dnode\_ep | VARCHAR | TAG | dnode endpoint |
| vgroup\_id | VARCHAR | TAG | dnode id |
@@ -415,9 +505,9 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| :---------- | :-------- | :------ | :---------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| count | DOUBLE | | sql 数量 |
-| result | VARCHAR | TAG | sql的执行结果,取值范围:Success, Failed |
+| result | VARCHAR | TAG | sql的执行结果,取值范围 Success, Failed |
| username | VARCHAR | TAG | 执行sql的user name |
-| sql\_type | VARCHAR | TAG | sql类型,取值范围:select, insert,delete |
+| sql\_type | VARCHAR | TAG | sql类型,取值范围 select, insert,delete |
| cluster\_id | VARCHAR | TAG | cluster id |
### taos\_slow\_sql 表
@@ -428,9 +518,9 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep
| :---------- | :-------- | :------ | :---------------------------------------------------- |
| \_ts | TIMESTAMP | | timestamp |
| count | DOUBLE | | sql 数量 |
-| result | VARCHAR | TAG | sql的执行结果,取值范围:Success, Failed |
+| result | VARCHAR | TAG | sql的执行结果,取值范围 Success, Failed |
| username | VARCHAR | TAG | 执行sql的user name |
-| duration | VARCHAR | TAG | sql执行耗时,取值范围:3-10s,10-100s,100-1000s,1000s- |
+| duration | VARCHAR | TAG | sql执行耗时,取值范围 3-10s,10-100s,100-1000s,1000s- |
| cluster\_id | VARCHAR | TAG | cluster id |
## 日志相关
diff --git a/docs/zh/14-reference/01-components/02-taosc.md b/docs/zh/14-reference/01-components/02-taosc.md
index bd1e700041..631f457391 100755
--- a/docs/zh/14-reference/01-components/02-taosc.md
+++ b/docs/zh/14-reference/01-components/02-taosc.md
@@ -8,38 +8,101 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
## 配置参数
-| 参数名称 | 参数含义 |
-|:-----------:|:----------------------------------------------------------:|
-|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost |
-|secondEp | 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值 |
-|numOfRpcSessions | 一个客户端能创建的最大连接数,取值范围:10-50000000(单位为毫秒);缺省值:500000 |
-|telemetryReporting | 是否上传 telemetry,0: 不上传,1: 上传;缺省值:1 |
-|crashReporting | 是否上传 telemetry,0: 不上传,1: 上传;缺省值:1 |
-|queryPolicy | 查询语句的执行策略,1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 ;缺省值:1 |
-|querySmaOptimize | sma index 的优化策略,0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0 |
-|keepColumnName | Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数; 1: 表示自动设置别名为列名(不包含函数名), 0: 表示不自动设置别名; 缺省值: 0 |
-|countAlwaysReturnValue | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值; 0:返回空行,1:返回; 缺省值 1; 该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了TSMA时, 且相应的组或窗口内数据为空或者NULL, 对应的组或窗口将不返回查询结果. 注意此参数客户端和服务端值应保持一致. |
-|multiResultFunctionStarReturnTags | 查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响; 0:不返回标签列,1:返回标签列 ; 缺省值: 0; 该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列 |
-|maxTsmaCalcDelay| 查询时客户端可允许的tsma计算延迟, 若tsma的计算延迟大于配置值, 则该TSMA将不会被使用.; 取值范围: 600s - 86400s, 即10分钟-1小时 ; 缺省值:600 秒|
-|tsmaDataDeleteMark |TSMA计算的历史数据中间结果保存时间, 单位为毫秒; 取值范围:>= 3600000, 即大于等于1h; 缺省值: 86400000, 即1d |
-|timezone | 时区; 缺省从系统中动态获取当前的时区设置 |
-|locale | 系统区位信息及编码格式, 缺省从系统中获取 |
-|charset | 字符集编码,缺省从系统中获取 |
-|metaCacheMaxSize | 指定单个客户端元数据缓存大小的最大值, 单位 MB; 缺省值 -1,表示无限制 |
-|logDir | 日志文件目录,客户端运行日志将写入该目录, 缺省值: /var/log/taos |
-|minimalLogDirGB | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志; 缺省值 1 |
-|numOfLogLines | 单个日志文件允许的最大行数; 缺省值 10,000,000 |
-|asyncLog | 是否异步写入日志,0:同步;1:异步;缺省值:1 |
-|logKeepDays | 日志文件的最长保存时间; 缺省值: 0,表示无限保存; 大于 0 时,日志文件会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳|
-|smlChildTableName | schemaless 自定义的子表名的 key, 无缺省值 |
-|smlAutoChildTableNameDelimiter | schemaless tag之间的连接符,连起来作为子表名,无缺省值 |
-|smlTagName | schemaless tag 为空时默认的 tag 名字, 缺省值 "_tag_null" |
-|smlTsDefaultName | schemaless自动建表的时间列名字通过该配置设置, 缺省值 "_ts" |
-|smlDot2Underline | schemaless 把超级表名中的 dot 转成下划线 |
-|enableCoreFile | crash 时是否生成 core 文件,0: 不生成, 1: 生成;缺省值:1 |
-|enableScience | 是否开启科学计数法显示浮点数; 0: 不开始, 1: 开启;缺省值:1 |
-|compressMsgSize | 是否对 RPC 消息进行压缩; -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩; 缺省值 -1|
-|queryTableNotExistAsEmpty | 查询表不存在时是否返回空结果集; false: 返回错误; true: 返回空结果集; 缺省值 false|
+### 连接相关
+|参数名称|支持版本|参数含义|
+|----------------------|----------|-|
+|firstEp | |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost|
+|secondEp | |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值|
+|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1|
+|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3|
+|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000|
+|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半|
+|timeToGetAvailableConn| |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000|
+|useAdapter | |内部参数,是否使用 taosadapter,影响 CSV 文件导入|
+|shareConnLimit |3.3.4.3 后|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10|
+|readTimeout |3.3.4.3 后|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
+
+### 查询相关
+|参数名称|支持版本|参数含义|
+|---------------------------------|---------|-|
+|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
+|keepColumnName | |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0|
+|multiResultFunctionStarReturnTags|3.3.3.0 后|查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列|
+|metaCacheMaxSize | |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制|
+|maxTsmaCalcDelay | |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒|
+|tsmaDataDeleteMark | |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d |
+|queryPolicy | |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1|
+|queryTableNotExistAsEmpty | |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false|
+|querySmaOptimize | |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0|
+|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
+|queryNodeChunkSize | |内部参数,查询计划的块大小|
+|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
+|queryMaxConcurrentTables | |内部参数,查询计划的并发数目|
+|enableQueryHb | |内部参数,是否发送查询心跳消息|
+|minSlidingTime | |内部参数,sliding 的最小允许值|
+|minIntervalTime | |内部参数,interval 的最小允许值|
+
+### 写入相关
+|参数名称|支持版本|参数含义|
+|------------------------------|----------|-|
+|smlChildTableName | |schemaless 自定义的子表名的 key,无缺省值|
+|smlAutoChildTableNameDelimiter| |schemaless tag 之间的连接符,连起来作为子表名,无缺省值|
+|smlTagName | |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"|
+|smlTsDefaultName | |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"|
+|smlDot2Underline | |schemaless 把超级表名中的 dot 转成下划线|
+|maxInsertBatchRows | |内部参数,一批写入的最大条数|
+
+### 区域相关
+|参数名称|支持版本|参数含义|
+|-----------------|----------|-|
+|timezone | |时区;缺省从系统中动态获取当前的时区设置|
+|locale | |系统区位信息及编码格式,缺省从系统中获取|
+|charset | |字符集编码,缺省从系统中获取|
+
+### 存储相关
+|参数名称|支持版本|参数含义|
+|-----------------|----------|-|
+|tempDir | |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp|
+|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1|
+
+### 日志相关
+|参数名称|支持版本|参数含义|
+|-----------------|----------|-|
+|logDir | |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos|
+|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1|
+|numOfLogLines | |单个日志文件允许的最大行数,缺省值:10,000,000|
+|asyncLog | |日志写入模式,0:同步,1:异步,缺省值:1|
+|logKeepDays | |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
+|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
+|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
+|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
+|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
+|jniDebugFlag | |jni 模块的日志开关,取值范围同上|
+|qDebugFlag | |query 模块的日志开关,取值范围同上|
+|cDebugFlag | |客户端模块的日志开关,取值范围同上|
+|simDebugFlag | |内部参数,测试工具的日志开关,取值范围同上|
+|tqClientDebugFlag|3.3.4.3 后|客户端模块的日志开关,取值范围同上|
+
+### 调试相关
+|参数名称|支持版本|参数含义|
+|-----------------|-----------|-|
+|crashReporting | |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1|
+|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1|
+|assert | |断言控制开关,缺省值:0|
+|configDir | |配置文件所在目录|
+|scriptDir | |内部参数,测试用例的目录|
+|randErrorChance |3.3.3.0 后|内部参数,用于随机失败测试|
+|randErrorDivisor |3.3.3.0 后|内部参数,用于随机失败测试|
+|randErrorScope |3.3.3.0 后|内部参数,用于随机失败测试|
+|safetyCheckLevel |3.3.3.0 后|内部参数,用于随机失败测试|
+|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
+|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
+|bypassFlag |3.3.4.5 后|内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;缺省值:0|
+
+### SHELL 相关
+|参数名称|支持版本|参数含义|
+|-----------------|----------|-|
+|enableScience | |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1|
## API
diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md
index 91b39976a1..1ccc5071f0 100644
--- a/docs/zh/14-reference/03-taos-sql/02-database.md
+++ b/docs/zh/14-reference/03-taos-sql/02-database.md
@@ -8,10 +8,10 @@ description: "创建、删除数据库,查看、修改数据库参数"
```sql
CREATE DATABASE [IF NOT EXISTS] db_name [database_options]
-
+
database_options:
database_option ...
-
+
database_option: {
VGROUPS value
| PRECISION {'ms' | 'us' | 'ns'}
@@ -26,6 +26,7 @@ database_option: {
| MAXROWS value
| MINROWS value
| KEEP value
+ | KEEP_TIME_OFFSET value
| STT_TRIGGER value
| SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value
@@ -43,7 +44,7 @@ database_option: {
- VGROUPS:数据库中初始 vgroup 的数目。
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
-- REPLICA:表示数据库副本数,取值为 1、2 或 3,默认为 1; 2 仅在企业版 3.3.0.0 及以后版本中可用。在集群中使用,副本数必须小于或等于 DNODE 的数目。且使用时存在以下限制:
+- REPLICA:表示数据库副本数,取值为 1、2 或 3,默认为 1; 2 仅在企业版 3.3.0.0 及以后版本中可用。在集群中使用,副本数必须小于或等于 DNODE 的数目。且使用时存在以下限制:
- 暂不支持对双副本数据库相关 Vgroup 进行 SPLITE VGROUP 或 REDISTRIBUTE VGROUP 操作
- 单副本数据库可变更为双副本数据库,但不支持从双副本变更为其它副本数,也不支持从三副本变更为双副本
- BUFFER: 一个 VNODE 写入内存池大小,单位为 MB,默认为 256,最小为 3,最大为 16384。
@@ -63,7 +64,8 @@ database_option: {
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
- MINROWS:文件块中记录的最小条数,默认为 100 条。
-- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
+- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
+- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
- 0:表示可以创建多张超级表。
@@ -78,6 +80,7 @@ database_option: {
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
+
### 创建数据库示例
```sql
@@ -88,7 +91,7 @@ create database if not exists db vgroups 10 buffer 10
### 使用数据库
-```
+```sql
USE db_name;
```
@@ -96,7 +99,7 @@ USE db_name;
## 删除数据库
-```
+```sql
DROP DATABASE [IF EXISTS] db_name
```
@@ -126,7 +129,7 @@ alter_database_option: {
}
```
-### 修改 CACHESIZE
+### 修改 CACHESIZE
修改数据库参数的命令使用简单,难的是如何确定是否需要修改以及如何修改。本小节描述如何判断数据库的 cachesize 是否够用。
@@ -155,13 +158,13 @@ alter_database_option: {
### 查看系统中的所有数据库
-```
+```sql
SHOW DATABASES;
```
### 显示一个数据库的创建语句
-```
+```sql
SHOW CREATE DATABASE db_name \G;
```
@@ -189,23 +192,23 @@ TRIM DATABASE db_name;
FLUSH DATABASE db_name;
```
-落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的数据回放,加速启动过程。
+落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的预写数据日志回放,加速启动过程。
-## 调整VGROUP中VNODE的分布
+## 调整 VGROUP 中 VNODE 的分布
```sql
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
```
-按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。
+按照给定的 dnode 列表,调整 vgroup 中的 vnode 分布。因为副本数目最大为 3,所以最多输入 3 个 dnode。
-## 自动调整VGROUP中VNODE的分布
+## 自动调整 VGROUP 中 LEADER 的分布
```sql
-BALANCE VGROUP
+BALANCE VGROUP LEADER
```
-自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
+触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。
## 查看数据库工作状态
diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md
index 81ad60e3d2..40e2802fcd 100644
--- a/docs/zh/14-reference/03-taos-sql/03-table.md
+++ b/docs/zh/14-reference/03-taos-sql/03-table.md
@@ -227,7 +227,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
如下 SQL 语句可以列出当前数据库中的所有表名。
```sql
-SHOW TABLES [LIKE tb_name_wildchar];
+SHOW TABLES [LIKE tb_name_wildcard];
```
### 显示表创建语句
diff --git a/docs/zh/14-reference/03-taos-sql/05-insert.md b/docs/zh/14-reference/03-taos-sql/05-insert.md
index 40f8e95006..ccf24e882c 100644
--- a/docs/zh/14-reference/03-taos-sql/05-insert.md
+++ b/docs/zh/14-reference/03-taos-sql/05-insert.md
@@ -5,9 +5,11 @@ description: 写入数据的详细语法
---
## 写入语法
+
写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名
### 正常语法
+
```sql
INSERT INTO
tb_name
@@ -22,7 +24,9 @@ INSERT INTO
INSERT INTO tb_name [(field1_name, ...)] subquery
```
+
### 超级表语法
+
```sql
INSERT INTO
stb1_name [(field1_name, ...)]
@@ -32,16 +36,18 @@ INSERT INTO
...];
```
-**关于时间戳**
+#### 关于主键时间戳
-1. TDengine 要求插入的数据必须要有时间戳,插入数据的时间戳要注意以下几点:
+TDengine 要求插入的数据必须要有时间戳,插入数据的时间戳要注意以下几点:
-2. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。
+1. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。
-3. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
- 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 KEEP 值(数据保留的天数, 可以在创建数据库时指定,缺省值是 3650 天)。允许插入的最新记录的时间戳,取决于数据库的 PRECISION 值(时间戳精度, 可以在创建数据库时指定, ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认毫秒):如果是毫秒或微秒, 取值为 1970 年 1 月 1 日 00:00:00.000 UTC 加上 1000 年, 即 2970 年 1 月 1 日 00:00:00.000 UTC; 如果是纳秒, 取值为 1970 年 1 月 1 日 00:00:00.000000000 UTC 加上 292 年, 即 2262 年 1 月 1 日 00:00:00.000000000 UTC。
+2. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
-**语法说明**
+3. 允许插入的最大时间戳为当前时间加上 100 年, 比如当前时间为`2024-11-11 12:00:00`,则允许插入的最大时间戳为`2124-11-11 12:00:00`。允许插入的最小时间戳取决于数据库的 KEEP 设置。企业版支持三级存储,可以设置多个 KEEP 时间,如下图所示,如果数据库的 KEEP 配置为`100h,100d,3650d`,则允许的最小时间戳为当前时间减去 3650 天。那么时间戳在`[Now - 100h, Now + 100y)`内的会保存在一级存储,时间戳在`[Now - 100d, Now - 100h)`内的会保存在二级存储,时间戳在`[Now - 3650d, Now - 100d)`内的会保存在三级存储。社区版不支持多级存储功能,只能配置一个 KEEP 值,如果配置多个,则取其最大者。如果时间戳不在有效时间范围内,TDengine 将返回错误“Timestamp out of range"。
+
+
+#### 语法说明
1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。
@@ -56,22 +62,24 @@ INSERT INTO
```sql
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
```
+
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
7. 主键列值必须指定且不能为 NULL。
-**正常语法说明**
+#### 正常语法说明
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
-**超级表语法说明**
+#### 超级表语法说明
1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.‘
2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列,则把所有标签列的值设置为NULL
3. 不支持参数绑定写入
+
## 插入一条记录
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
@@ -154,15 +162,18 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
```
+
## 向超级表插入数据并自动创建子表
自动建表, 表名通过 tbname 列指定
+
```sql
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
```
+
## 通过 CSV 文件向超级表插入数据并自动创建子表
根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag
diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md
index bbc6a0b81a..2f4b739447 100644
--- a/docs/zh/14-reference/03-taos-sql/10-function.md
+++ b/docs/zh/14-reference/03-taos-sql/10-function.md
@@ -1065,7 +1065,7 @@ CAST(expr AS type_name)
TO_ISO8601(expr [, timezone])
```
-**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。
+**功能说明**:将时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。
**返回结果数据类型**:VARCHAR 类型。
@@ -1109,7 +1109,7 @@ return_timestamp: {
}
```
-**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
+**功能说明**:将日期时间格式的字符串转换成为时间戳。
**返回结果数据类型**:BIGINT, TIMESTAMP。
@@ -1257,8 +1257,8 @@ TIMEDIFF(expr1, expr2 [, time_unit])
**返回结果类型**:BIGINT。
**适用数据类型**:
-- `expr1`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
-- `expr2`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
+- `expr1`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
+- `expr2`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
- `time_unit`:见使用说明。
**嵌套子查询支持**:适用于内层查询和外层查询。
@@ -1301,7 +1301,7 @@ use_current_timezone: {
**返回结果数据类型**:TIMESTAMP。
-**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
+**应用字段**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
**适用于**:表和超级表。
@@ -1364,7 +1364,7 @@ WEEK(expr [, mode])
**返回结果类型**:BIGINT。
**适用数据类型**:
-- `expr`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
+- `expr`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
- `mode`:0 - 7 之间的整数。
**嵌套子查询支持**:适用于内层查询和外层查询。
@@ -1424,7 +1424,7 @@ WEEKOFYEAR(expr)
**返回结果类型**:BIGINT。
-**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
+**适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
@@ -1451,7 +1451,7 @@ WEEKDAY(expr)
**返回结果类型**:BIGINT。
-**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
+**适用数据类型**:表示 表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
@@ -1478,7 +1478,7 @@ DAYOFWEEK(expr)
**返回结果类型**:BIGINT。
-**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
+**适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
@@ -1988,7 +1988,7 @@ TOP(expr, k)
UNIQUE(expr)
```
-**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
+**功能说明**:返回该列数据去重后的值。该函数功能与 distinct 相似。对于相同的数据,返回时间戳最小的一条,对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
**返回数据类型**:同应用的字段。
diff --git a/docs/zh/14-reference/03-taos-sql/20-keywords.md b/docs/zh/14-reference/03-taos-sql/20-keywords.md
index 1ef0d07e14..f7cc5d17c0 100644
--- a/docs/zh/14-reference/03-taos-sql/20-keywords.md
+++ b/docs/zh/14-reference/03-taos-sql/20-keywords.md
@@ -11,337 +11,470 @@ description: TDengine 保留关键字的详细列表
关键字列表如下:
### A
-
-- ABORT
-- ACCOUNT
-- ACCOUNTS
-- ADD
-- AFTER
-- AGGREGATE
-- ALIVE
-- ALL
-- ALTER
-- ANALYZE
-- AND
-- APPS
-- AS
-- ASC
-- AT_ONCE
-- ATTACH
+|关键字|说明|
+|----------------------|-|
+| ABORT | |
+| ACCOUNT | |
+| ACCOUNTS | |
+| ADD | |
+| AFTER | |
+| AGGREGATE | |
+| ALIAS | |
+| ALIVE | |
+| ALL | |
+| ALTER | |
+| ANALYZE | 3.3.4.3 及后续版本 |
+| AND | |
+| ANODE | 3.3.4.3 及后续版本 |
+| ANODES | 3.3.4.3 及后续版本 |
+| ANOMALY_WINDOW | 3.3.4.3 及后续版本 |
+| ANTI | |
+| APPS | |
+| ARBGROUPS | |
+| ARROW | |
+| AS | |
+| ASC | |
+| ASOF | |
+| AT_ONCE | |
+| ATTACH | |
### B
-
-- BALANCE
-- BEFORE
-- BEGIN
-- BETWEEN
-- BIGINT
-- BINARY
-- BITAND
-- BITNOT
-- BITOR
-- BLOCKS
-- BNODE
-- BNODES
-- BOOL
-- BUFFER
-- BUFSIZE
-- BY
+|关键字|说明|
+|----------------------|-|
+| BALANCE | |
+| BEFORE | |
+| BEGIN | |
+| BETWEEN | |
+| BIGINT | |
+| BIN | |
+| BINARY | |
+| BITAND | |
+| BITAND | |
+| BITNOT | |
+| BITOR | |
+| BLOB | |
+| BLOCKS | |
+| BNODE | |
+| BNODES | |
+| BOOL | |
+| BOTH | |
+| BUFFER | |
+| BUFSIZE | |
+| BWLIMIT | |
+| BY | |
### C
-
-- CACHE
-- CACHEMODEL
-- CACHESIZE
-- CASCADE
-- CAST
-- CHANGE
-- CLIENT_VERSION
-- CLUSTER
-- COLON
-- COLUMN
-- COMMA
-- COMMENT
-- COMP
-- COMPACT
-- CONCAT
-- CONFLICT
-- CONNECTION
-- CONNECTIONS
-- CONNS
-- CONSUMER
-- CONSUMERS
-- CONTAINS
-- COPY
-- COUNT
-- CREATE
-- CURRENT_USER
+|关键字|说明|
+|----------------------|-|
+| CACHE | |
+| CACHEMODEL | |
+| CACHESIZE | |
+| CASE | |
+| CAST | |
+| CHANGE | |
+| CHILD | |
+| CLIENT_VERSION | |
+| CLUSTER | |
+| COLON | |
+| COLUMN | |
+| COMMA | |
+| COMMENT | |
+| COMP | |
+| COMPACT | |
+| COMPACTS | |
+| CONCAT | |
+| CONFLICT | |
+| CONNECTION | |
+| CONNECTIONS | |
+| CONNS | |
+| CONSUMER | |
+| CONSUMERS | |
+| CONTAINS | |
+| COPY | |
+| COUNT | |
+| COUNT_WINDOW | |
+| CREATE | |
+| CREATEDB | |
+| CURRENT_USER | |
### D
-
-- DATABASE
-- DATABASES
-- DBS
-- DEFERRED
-- DELETE
-- DELIMITERS
-- DESC
-- DESCRIBE
-- DETACH
-- DISTINCT
-- DISTRIBUTED
-- DIVIDE
-- DNODE
-- DNODES
-- DOT
-- DOUBLE
-- DROP
-- DURATION
+|关键字|说明|
+|----------------------|-|
+| DATABASE | |
+| DATABASES | |
+| DBS | |
+| DECIMAL | |
+| DEFERRED | |
+| DELETE | |
+| DELETE_MARK | |
+| DELIMITERS | |
+| DESC | |
+| DESCRIBE | |
+| DETACH | |
+| DISTINCT | |
+| DISTRIBUTED | |
+| DIVIDE | |
+| DNODE | |
+| DNODES | |
+| DOT | |
+| DOUBLE | |
+| DROP | |
+| DURATION | |
### E
-
-- EACH
-- ENABLE
-- END
-- EVERY
-- EXISTS
-- EXPIRED
-- EXPLAIN
+|关键字|说明|
+|----------------------|-|
+| EACH | |
+| ELSE | |
+| ENABLE | |
+| ENCRYPT_ALGORITHM | |
+| ENCRYPT_KEY | |
+| ENCRYPTIONS | |
+| END | |
+| EQ | |
+| EVENT_WINDOW | |
+| EVERY | |
+| EXCEPT | |
+| EXISTS | |
+| EXPIRED | |
+| EXPLAIN | |
### F
-
-- FAIL
-- FILE
-- FILL
-- FIRST
-- FLOAT
-- FLUSH
-- FOR
-- FROM
-- FUNCTION
-- FUNCTIONS
+|关键字|说明|
+|----------------------|-|
+| FAIL | |
+| FHIGH | 3.3.4.3 及后续版本 |
+| FILE | |
+| FILL | |
+| FILL_HISTORY | |
+| FIRST | |
+| FLOAT | |
+| FLOW | 3.3.4.3 及后续版本 |
+| FLUSH | |
+| FOR | |
+| FORCE | |
+| FORCE_WINDOW_CLOSE | 3.3.4.3 及后续版本 |
+| FROM | |
+| FROWTS | 3.3.4.3 及后续版本 |
+| FULL | |
+| FUNCTION | |
+| FUNCTIONS | |
### G
-
-- GLOB
-- GRANT
-- GRANTS
-- GROUP
+|关键字|说明|
+|----------------------|-|
+| GE | |
+| GEOMETRY | |
+| GLOB | |
+| GRANT | |
+| GRANTS | |
+| GROUP | |
+| GT | |
### H
-
-- HAVING
-- HOST
+|关键字|说明|
+|----------------------|-|
+| HAVING | |
+| HEX | |
+| HOST | |
### I
-
-- ID
-- IF
-- IGNORE
-- IMMEDIATE
-- IMPORT
-- IN
-- INDEX
-- INDEXES
-- INITIALLY
-- INNER
-- INSERT
-- INSTEAD
-- INT
-- INTEGER
-- INTERVAL
-- INTO
-- IS
-- IS NULL
+|关键字|说明|
+|----------------------|-|
+| ID | |
+| IF | |
+| IGNORE | |
+| ILLEGAL | |
+| IMMEDIATE | |
+| IMPORT | |
+| IN | |
+| INDEX | |
+| INDEXES | |
+| INITIALLY | |
+| INNER | |
+| INSERT | |
+| INSTEAD | |
+| INT | |
+| INTEGER | |
+| INTERSECT | |
+| INTERVAL | |
+| INTO | |
+| IPTOKEN | |
+| IROWTS | |
+| IS | |
+| IS_IMPORT | |
+| ISFILLED | |
+| ISNULL | |
### J
-
-- JOIN
-- JSON
+|关键字|说明|
+|----------------------|-|
+| JLIMIT | |
+| JOIN | |
+| JSON | |
### K
-
-- KEEP
-- KEY
-- KILL
+|关键字|说明|
+|----------------------|-|
+| KEEP | |
+| KEEP_TIME_OFFSET | |
+| KEY | |
+| KILL | |
### L
-
-- LAST
-- LAST_ROW
-- LICENCES
-- LIKE
-- LIMIT
-- LINEAR
-- LOCAL
+|关键字|说明|
+|----------------------|-|
+| LANGUAGE | |
+| LAST | |
+| LAST_ROW | |
+| LE | |
+| LEADER | |
+| LEADING | |
+| LEFT | |
+| LICENCES | |
+| LIKE | |
+| LIMIT | |
+| LINEAR | |
+| LOCAL | |
+| LOGS | |
+| LP | |
+| LSHIFT | |
+| LT | |
### M
-
-- MATCH
-- MAX_DELAY
-- BWLIMIT
-- MAXROWS
-- MAX_SPEED
-- MERGE
-- META
-- MINROWS
-- MINUS
-- MNODE
-- MNODES
-- MODIFY
-- MODULES
+|关键字|说明|
+|----------------------|-|
+| MACHINES | |
+| MATCH | |
+| MAX_DELAY | |
+| MAXROWS | |
+| MEDIUMBLOB | |
+| MERGE | |
+| META | |
+| MINROWS | |
+| MINUS | |
+| MNODE | |
+| MNODES | |
+| MODIFY | |
+| MODULES | |
### N
-
-- NCHAR
-- NEXT
-- NMATCH
-- NONE
-- NOT
-- NOT NULL
-- NOW
-- NULL
-- NULLS
+|关键字|说明|
+|----------------------|-|
+| NCHAR | |
+| NE | |
+| NEXT | |
+| NMATCH | |
+| NONE | |
+| NORMAL | |
+| NOT | |
+| NOTNULL | |
+| NOW | |
+| NULL | |
+| NULL_F | |
+| NULLS | |
### O
-
-- OF
-- OFFSET
-- ON
-- OR
-- ORDER
-- OUTPUTTYPE
+|关键字|说明|
+|----------------------|-|
+| OF | |
+| OFFSET | |
+| ON | |
+| ONLY | |
+| OR | |
+| ORDER | |
+| OUTER | |
+| OUTPUTTYPE | |
### P
-
-- PAGES
-- PAGESIZE
-- PARTITIONS
-- PASS
-- PLUS
-- PORT
-- PPS
-- PRECISION
-- PREV
-- PRIVILEGE
+|关键字|说明|
+|----------------------|-|
+| PAGES | |
+| PAGESIZE | |
+| PARTITION | |
+| PASS | |
+| PAUSE | |
+| PI | |
+| PLUS | |
+| PORT | |
+| POSITION | |
+| PPS | |
+| PRECISION | |
+| PREV | |
+| PRIMARY | |
+| PRIVILEGE | |
+| PRIVILEGES | |
### Q
-
-- QNODE
-- QNODES
-- QTIME
-- QUERIES
-- QUERY
+|关键字|说明|
+|----------------------|-|
+| QDURATION | |
+| QEND | |
+| QNODE | |
+| QNODES | |
+| QSTART | |
+| QTAGS | |
+| QTIME | |
+| QUERIES | |
+| QUERY | |
+| QUESTION | |
### R
-
-- RAISE
-- RANGE
-- RATIO
-- READ
-- REDISTRIBUTE
-- RENAME
-- REPLACE
-- REPLICA
-- RESET
-- RESTRICT
-- RETENTIONS
-- REVOKE
-- ROLLUP
-- ROW
+|关键字|说明|
+|----------------------|-|
+| RAISE | |
+| RAND | |
+| RANGE | |
+| RATIO | |
+| READ | |
+| RECURSIVE | |
+| REDISTRIBUTE | |
+| REM | |
+| REPLACE | |
+| REPLICA | |
+| RESET | |
+| RESTORE | |
+| RESTRICT | |
+| RESUME | |
+| RETENTIONS | |
+| REVOKE | |
+| RIGHT | |
+| ROLLUP | |
+| ROW | |
+| ROWTS | |
+| RP | |
+| RSHIFT | |
### S
-
-- SCHEMALESS
-- SCORES
-- SELECT
-- SEMI
-- SERVER_STATUS
-- SERVER_VERSION
-- SESSION
-- SET
-- SHOW
-- SINGLE_STABLE
-- SLIDING
-- SLIMIT
-- SMA
-- SMALLINT
-- SNODE
-- SNODES
-- SOFFSET
-- SPLIT
-- STABLE
-- STABLES
-- START
-- STATE
-- STATE_WINDOW
-- STATEMENT
-- STORAGE
-- STREAM
-- STREAMS
-- STRICT
-- STRING
-- SUBSCRIPTIONS
-- SYNCDB
-- SYSINFO
+|关键字|说明|
+|----------------------|-|
+| S3_CHUNKPAGES | |
+| S3_COMPACT | |
+| S3_KEEPLOCAL | |
+| SCHEMALESS | |
+| SCORES | |
+| SELECT | |
+| SEMI | |
+| SERVER_STATUS | |
+| SERVER_VERSION | |
+| SESSION | |
+| SET | |
+| SHOW | |
+| SINGLE_STABLE | |
+| SLASH | |
+| SLIDING | |
+| SLIMIT | |
+| SMA | |
+| SMALLINT | |
+| SMIGRATE | |
+| SNODE | |
+| SNODES | |
+| SOFFSET | |
+| SPLIT | |
+| STABLE | |
+| STABLES | |
+| STAR | |
+| START | |
+| STATE | |
+| STATE_WINDOW | |
+| STATEMENT | |
+| STORAGE | |
+| STREAM | |
+| STREAMS | |
+| STRICT | |
+| STRING | |
+| STT_TRIGGER | |
+| SUBSCRIBE | |
+| SUBSCRIPTIONS | |
+| SUBSTR | |
+| SUBSTRING | |
+| SUBTABLE | |
+| SYSINFO | |
+| SYSTEM | |
### T
-
-- TABLE
-- TABLES
-- TAG
-- TAGS
-- TBNAME
-- TIMES
-- TIMESTAMP
-- TIMEZONE
-- TINYINT
-- TO
-- TODAY
-- TOPIC
-- TOPICS
-- TRANSACTION
-- TRANSACTIONS
-- TRIGGER
-- TRIM
-- TSERIES
-- TTL
+|关键字|说明|
+|----------------------|-|
+| TABLE | |
+| TABLE_PREFIX | |
+| TABLE_SUFFIX | |
+| TABLES | |
+| TAG | |
+| TAGS | |
+| TBNAME | |
+| THEN | |
+| TIMES | |
+| TIMESTAMP | |
+| TIMEZONE | |
+| TINYINT | |
+| TO | |
+| TODAY | |
+| TOPIC | |
+| TOPICS | |
+| TRAILING | |
+| TRANSACTION | |
+| TRANSACTIONS | |
+| TRIGGER | |
+| TRIM | |
+| TSDB_PAGESIZE | |
+| TSERIES | |
+| TSMA | |
+| TSMAS | |
+| TTL | |
### U
-
-- UNION
-- UNSIGNED
-- UPDATE
-- USE
-- USER
-- USERS
-- USING
+|关键字|说明|
+|----------------------|-|
+| UNION | |
+| UNSAFE | |
+| UNSIGNED | |
+| UNTREATED | |
+| UPDATE | |
+| USE | |
+| USER | |
+| USERS | |
+| USING | |
### V
+|关键字|说明|
+|----------------------|-|
+| VALUE | |
+| VALUE_F | |
+| VALUES | |
+| VARBINARY | |
+| VARCHAR | |
+| VARIABLE | |
+| VARIABLES | |
+| VERBOSE | |
+| VGROUP | |
+| VGROUPS | |
+| VIEW | |
+| VIEWS | |
+| VNODE | |
+| VNODES | |
-- VALUE
-- VALUES
-- VARCHAR
-- VARIABLE
-- VARIABLES
-- VERBOSE
-- VGROUP
-- VGROUPS
-- VIEW
-- VNODES
### W
-
-- WAL
-- WAL_FSYNC_PERIOD
-- WAL_LEVEL
-- WAL_RETENTION_PERIOD
-- WAL_RETENTION_SIZE
-- WATERMARK
-- WHERE
-- WINDOW_CLOSE
-- WITH
-- WRITE
+|关键字|说明|
+|----------------------|-|
+| WAL | |
+| WAL_FSYNC_PERIOD | |
+| WAL_LEVEL | |
+| WAL_RETENTION_PERIOD | |
+| WAL_RETENTION_SIZE | |
+| WAL_ROLL_PERIOD | |
+| WAL_SEGMENT_SIZE | |
+| WATERMARK | |
+| WDURATION | |
+| WEND | |
+| WHEN | |
+| WHERE | |
+| WINDOW | |
+| WINDOW_CLOSE | |
+| WINDOW_OFFSET | |
+| WITH | |
+| WRITE | |
+| WSTART | |
### \_
diff --git a/docs/zh/14-reference/03-taos-sql/28-index.md b/docs/zh/14-reference/03-taos-sql/28-tsma.md
similarity index 100%
rename from docs/zh/14-reference/03-taos-sql/28-index.md
rename to docs/zh/14-reference/03-taos-sql/28-tsma.md
diff --git a/docs/zh/14-reference/03-taos-sql/32-compress.md b/docs/zh/14-reference/03-taos-sql/32-compress.md
index 0f2b260832..51650c9123 100644
--- a/docs/zh/14-reference/03-taos-sql/32-compress.md
+++ b/docs/zh/14-reference/03-taos-sql/32-compress.md
@@ -31,11 +31,12 @@ description: 可配置压缩算法
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法|压缩算法默认值| 压缩等级默认值|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
-| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
-| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
+| int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
+| tinyint/untinyint/smallint/usmallint | simple8b| simple8b | lz4/zlib/zstd/xz| zlib | medium|
+| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
-|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
-|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
+|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| zstd| medium|
+|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| zstd| medium|
## SQL 语法
diff --git a/docs/zh/14-reference/03-taos-sql/pic/database-keep.jpg b/docs/zh/14-reference/03-taos-sql/pic/database-keep.jpg
new file mode 100644
index 0000000000..248a9041d3
Binary files /dev/null and b/docs/zh/14-reference/03-taos-sql/pic/database-keep.jpg differ
diff --git a/docs/zh/14-reference/05-connector/30-python.mdx b/docs/zh/14-reference/05-connector/30-python.mdx
index 8436c30249..3991477635 100644
--- a/docs/zh/14-reference/05-connector/30-python.mdx
+++ b/docs/zh/14-reference/05-connector/30-python.mdx
@@ -41,6 +41,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|Python Connector 版本|主要变化|
|:-------------------:|:----:|
+|2.7.16|新增订阅配置 (session.timeout.ms, max.poll.interval.ms)|
|2.7.15|新增 VARBINARY 和 GEOMETRY 类型支持|
|2.7.14|修复已知问题|
|2.7.13|新增 tmq 同步提交 offset 接口|
@@ -50,6 +51,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|Python WebSocket Connector 版本|主要变化|
|:----------------------------:|:-----:|
+|0.3.5|新增 VARBINARY 和 GEOMETRY 类型支持,修复已知问题|
|0.3.2|优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题|
|0.2.9|已知问题修复|
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度
2. 支持 schemaless
3. 支持 STMT|
diff --git a/docs/zh/14-reference/05-connector/35-node.mdx b/docs/zh/14-reference/05-connector/35-node.mdx
index d9512eae78..df2abfab3d 100644
--- a/docs/zh/14-reference/05-connector/35-node.mdx
+++ b/docs/zh/14-reference/05-connector/35-node.mdx
@@ -26,6 +26,7 @@ Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------: | :----------------: |
+| 3.1.2 | 对数据协议和解析进行了优化,性能得到大幅提升| 3.3.2.0 及更高版本 |
| 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 |
| 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 |
diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md
index 8aa69e45d5..7091ca9661 100644
--- a/docs/zh/26-tdinternal/01-arch.md
+++ b/docs/zh/26-tdinternal/01-arch.md
@@ -293,6 +293,14 @@ TDengine 采纳了一种独特的时间驱动缓存管理策略,亦称为写
此外,考虑到物联网数据的特点,用户通常最关注的是数据的实时性,即最新产生的数据。TDengine 很好地利用了这一特点,优先将最新到达的(即当前状态)数据存储在缓存中。具体而言,TDengine 会将最新到达的数据直接存入缓存,以便快速响应用户对最新一条或多条数据的查询和分析需求,从而在整体上提高数据库查询的响应速度。从这个角度来看,通过合理设置数据库参数,TDengine 完全可以作为数据缓存来使用,这样就无须再部署 Redis 或其他额外的缓存系统。这种做法不仅有效简化了系统架构,还有助于降低运维成本。需要注意的是,一旦 TDengine 重启,缓存中的数据将被清除,所有先前缓存的数据都会被批量写入硬盘,而不会像专业的 Key-Value 缓存系统那样自动将之前缓存的数据重新加载回缓存。
+### last/last_row 缓存
+
+在时序数据的场景中,查询表的最后一条记录(last_row)或最后一条非 NULL 记录(last)是一个常见的需求。为了提高 TDengine 对这种查询的响应速度,TSDB 为每张表的 last 和 last_row 数据提供了 LRU 缓存。LRU 缓存采用延迟加载策略,当首次查询某张表的 last 或 last_row 时,缓存模块会去内存池和磁盘文件加载数据,处理后放入LRU 缓存,并返回给查询模块继续处理;当有新的数据插入或删除时,如果缓存需要更新,会进行相应的更新操作;如果缓存中没有当前被写入表的数据,则直接跳过,无需其它操作。
+
+此外在缓存配置更新的时候,也会更新缓存数据。比如,缓存功能默认是关闭的,用户使用命令开启缓存功能之后,就会在首次查询时加载数据;当关闭缓存开关时,会释放之前的缓存区。当查询某一个子表的 last 或 last_row 数据时,如果缓存中没有,则从内存池和磁盘文件加载对应的 last 或 last_row 数据到缓存中;当查询某一个超级表的 last 或 last_row 数据时,这个超级表对应的所有子表都需要加载到缓存中。
+
+通过数据库参数 cachemodel 可以配置某一个数据库的缓存参数,默认值为 "none",表示不开启缓存,另外三个值为 "last_row","last_value","both";分别是开启 last_row 缓存,开启 last 缓存,和两个同时开启。缓存当前所使用的内存数量,可在通过 show vgroups; 命令,在 cacheload 列中进行查看,单位为字节。
+
### 持久化存储
TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化存储。当 vnode 中的缓存数据积累到一定量时,为了避免阻塞后续数据的写入,TDengine 会启动落盘线程,将这些缓存数据写入持久化存储设备。在此过程中,TDengine 会创建新的数据库日志文件用于数据落盘,并在落盘成功后删除旧的日志文件,以防止日志文件无限制增长。
diff --git a/docs/zh/26-tdinternal/10-cache.md b/docs/zh/26-tdinternal/10-cache.md
new file mode 100644
index 0000000000..698f4ee87a
--- /dev/null
+++ b/docs/zh/26-tdinternal/10-cache.md
@@ -0,0 +1,62 @@
+---
+sidebar_label: 数据缓存
+title: 数据缓存
+toc_max_heading_level: 4
+---
+在现代物联网(IoT)和工业互联网(IIoT)应用中,数据的高效管理对系统性能和用户体验至关重要。为了应对高并发环境下的实时读写需求,TDengine 设计了一套完整的缓存机制,包括写缓存、读缓存、元数据缓存和文件系统缓存。这些缓存机制紧密结合,既能优化数据查询的响应速度,又能提高数据写入的效率,同时保障数据的可靠性和系统的高可用性。通过灵活配置缓存参数,TDengine 为用户提供了性能与成本之间的最佳平衡。
+
+## 写缓存
+
+TDengine 采用了一种创新的时间驱动缓存管理策略,亦称为写驱动的缓存管理机制。这一策略与传统的读驱动的缓存模式有所不同,其核心思想是将最新写入的数据优先保存在缓存中。当缓存容量达到预设的临界值时,系统会将最早存储的数据批量写入硬盘,从而实现缓存与硬盘之间的动态平衡。
+
+在物联网数据应用中,用户往往最关注最近产生的数据,即设备的当前状态。TDengine 充分利用了这一业务特性,将最近到达的当前状态数据优先存储在缓存中,以便用户能够快速获取所需信息。
+
+为了实现数据的分布式存储和高可用性,TDengine 引入了虚拟节点(vnode)的概念。每个 vnode 可以拥有多达 3 个副本,这些副本共同组成一个 vnode group,简称 vgroup。在创建数据库时,用户需要确定每个 vnode 的写入缓存大小,以确保数据的合理分配和高效存储。
+
+创建数据库时的两个关键参数 `vgroups` 和 `buffer` 分别决定了数据库中的数据由多少个 vgroup 进行处理,以及为每个 vnode 分配多少写入缓存。通过合理配置这两个
+参数,用户可以根据实际需求调整数据库的性能和存储容量,从而实现最佳的性能和成本效益。
+
+例 如, 下面的 SQL 创建了包含 10 个 vgroup,每个 vnode 占 用 256MB 内存的数据库。
+```sql
+CREATE DATABASE POWER VGROUPS 10 BUFFER 256 CACHEMODEL 'NONE' PAGES 128 PAGESIZE 16;
+```
+
+缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助。
+
+## 读缓存
+
+TDengine 的读缓存机制专为高频实时查询场景设计,尤其适用于物联网和工业互联网等需要实时掌握设备状态的业务场景。在这些场景中,用户往往最关心最新的数据,如设备的当前读数或状态。
+
+通过设置 cachemodel 参数,TDengine 用户可以灵活选择适合的缓存模式,包括缓存最新一行数据、每列最近的非 NULL 值,或同时缓存行和列的数据。这种灵活性使 TDengine 能根据具体业务需求提供精准优化,在物联网场景下尤为突出,助力用户快速访问设备的最新状态。
+
+这种设计不仅降低了查询的响应延迟,还能有效缓解存储系统的 I/O 压力。在高并发场景下,读缓存能够帮助系统维持更高的吞吐量,确保查询性能的稳定性。借助 TDengine 读缓存,用户无需再集成如 Redis 一类的外部缓存系统,避免了系统架构的复杂化,显著降低运维和部署成本。
+
+此外,TDengine 的读缓存机制还能够根据实际业务场景灵活调整。在数据访问热点集中在最新记录的场景中,这种内置缓存能够显著提高用户体验,让关键数据的获取更加快速高效。相比传统缓存方案,这种无缝集成的缓存策略不仅简化了开发流程,还为用户提供了更高的性能保障。
+
+关于 TDengine 读缓存的更多详细内容请看[读缓存](../../advanced/cache/)
+
+## 元数据缓存
+
+为了提升查询和写入操作的效率,每个 vnode 都配备了缓存机制,用于存储其曾经获取过的元数据。这一元数据缓存的大小由创建数据库时的两个参数 pages 和 pagesize 共同决定。其中,pagesize 参数的单位是 KB,用于指定每个缓存页的大小。如下 SQL 会为数据库 power 的每个 vnode 创建 128 个 page、每个 page 16KB 的元数据缓存
+
+```sql
+CREATE DATABASE POWER PAGES 128 PAGESIZE 16;
+```
+
+## 文件系统缓存
+
+TDengine 采用 WAL 技术作为基本的数据可靠性保障手段。WAL 是一种先进的数据保护机制,旨在确保在发生故障时能够迅速恢复数据。其核心原理在于,在数据实际写入数据存储层之前,先将其变更记录到一个日志文件中。这样一来,即便集群遭遇崩溃或其他故障,也能确保数据安全无损。
+
+TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 的过程中,数据是以顺序追加的方式写入硬盘文件的。因此,文件系统缓存在此过程中发挥着关键作用,对写入性能产生显著影响。为了确保数据真正落盘,系统会调用 fsync 函数,该函数负责将文件系统缓存中的数据强制写入硬盘。
+
+数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。
+- wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。
+- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 则表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,则表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。
+
+```sql
+CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000;
+```
+
+在创建数据库时,用户可以根据需求选择不同的参数设置,以在性能和可靠性之间找到最佳平衡:
+- 性能优先:将数据写入 WAL,但不立即执行 fsync 操作,此时新写入的数据仅保存在文件系统缓存中,尚未同步到磁盘。这种配置能够显著提高写入性能。
+- 可靠性优先:将数据写入 WAL 的同时执行 fsync 操作,将数据立即同步到磁盘,确保数据持久化,可靠性更高。
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index 0f9ceada50..cf9d7b6878 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -24,6 +24,10 @@ TDengine 3.x 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
+## 3.3.4.3
+
+
+
## 3.3.3.0
diff --git a/docs/zh/28-releases/03-notes/3.3.4.3.md b/docs/zh/28-releases/03-notes/3.3.4.3.md
new file mode 100644
index 0000000000..8ffd5802ed
--- /dev/null
+++ b/docs/zh/28-releases/03-notes/3.3.4.3.md
@@ -0,0 +1,69 @@
+---
+title: 3.3.4.3 版本说明
+sidebar_label: 3.3.4.3
+description: 3.3.4.3 版本说明
+---
+
+### 行为变更及兼容性
+1. 多副本流计算中必须使用 snode
+1. 增加了流计算的兼容性保证机制,避免后续函数变更产生新的兼容性问题,但之前版本的流计算必须重建,具体参见 https://docs.taosdata.com/advanced/stream/#流计算升级故障恢复
+1. 调整 case when 语句结果类型的判断方法
+
+### 新特性
+1. 新功能:流计算的 TWA 函数支持时间驱动的结果推送模式
+1. 新功能:流计算的 Interp 函数支持时间驱动的结果推送模式
+1. 新功能:支持微软对象存储
+
+### 优化
+1. 优化:提升并发大查询时节点之间互相拉数据的效率
+1. 优化:支持使用 AVX2 和 AVX512 对 double 、timestamp 和 bigint 类型进行解码优化
+1. 优化:调整 case when 语句的结果类型判断方法
+1. 优化:顺序执行 compact 和 split vgroup操作时的日志错误提示
+1. 优化:提升查询 “select ... from ... where ts in (...)” 的数据扫描速度
+1. 优化:增加了流计算的兼容性保证机制,避免后续函数变更产生新的兼容性问题,之前版本的流计算必须重建
+1. 优化:提升 taosX 在交叉写入场景下的数据同步性能
+1. 优化:支持关闭整数/浮点数类型的编码
+1. 优化:多副本流计算中必须使用 snode
+1. 优化:客户端生成唯一 ID 标识每一个查询任务,避免重复 ID 导致的内存损坏
+1. 优化:加快数据库的创建时间
+1. 优化:修改 s3MigrateEnabled 默认值为0
+1. 优化:支持在审计数据库中记录删除操作
+1. 优化:支持在指定的 dnode 中创建数据库 [企业版]
+1. 优化:调整删除超级表数据列时的报错信息
+
+### 修复
+1. 修复:last_row 查询性能在 3.3.3.0 中大幅下降的问题
+1. 修复:WAL 条目不完整时 taosd 无法启动的问题
+1. 修复: partition by 常量时查询结果错误的问题
+1. 修复:标量函数包含 _wstart 且填充方式为 prev 时计算结果错误
+1. 修复:Windows 平台下的时区设置问题
+1. 修复:空数据库进行 compact 操作时,事务无法结束【企业版】
+1. 修复:事务冲突的逻辑错误
+1. 修复:管理节点某些错误会导致事务无法停止
+1. 修复:管理节点某些错误会导致事务无法停止
+1. 修复:dnode 数据清空后 taosc 重试错误的问题
+1. 修复:Data Compact 被异常终止后,中间文件未被清理
+1. 修复:新增列后,Kafka 连接器的 earliest 模式消费不到新列数据
+1. 修复:interp 函数在 fill(prev) 时行为不正确
+1. 修复:TSMA 在高频元数据操作时异常停止的问题
+1. 修复:show create stable 语句执行结果的标签显示错误
+1. 修复:Percentile 函数在大数据量查询时会崩溃。
+1. 修复:partition by 和 having 联合使用时的语法错误问题
+1. 修复:interp 在 partition by tbname,c1 时 tbname 为空的问题
+1. 修复:通过 stmt 写入非法布尔数值时 taosd 可能 crash
+1. 修复:库符号 version 与使用相同符号的库冲突的问题
+1. 修复:在 windows 平台下 JDBC 驱动的句柄数持续升高问题
+1. 修复:3.3.3.1 升级至 3.3.4.0 偶现的启动失败问题
+1. 修复:Windows 平台重复增删表的内存泄漏
+1. 修复:无法限制并发拉起 checkpoint 数量导致流计算消耗资源过多
+1. 修复:并发查询时的 too many session 问题
+1. 修复:Windows 平台下 taos shell 在慢查询场景中崩溃的问题
+1. 修复:当打开 dnode日志时,加密数据库无法恢复的问题
+1. 修复:由于 mnode 同步超时,进而导致 taosd 无法启动的问题
+1. 修复:由于在快照同步过程中整理文件组数据的速度过慢,从而导致 Vnode(虚拟节点)无法恢复的问题
+1. 修复:通过行协议向字符串类型的字段中写入带转义符的数据时,taosd 会崩溃
+1. 修复:Error Code 逻辑处理错误导致的元数据文件损坏
+1. 修复:查询语句中包含多个 “not” 条件语句嵌套时,未设置标量模式导致查询错误
+1. 修复:vnode 统计信息上报超时导致的 dnode offline 问题
+1. 修复:在不支持 avx 指令集的服务器上,taosd 启动失败问题
+1. 修复:taosX 数据迁移容错处理 0x09xx 错误码
diff --git a/docs/zh/28-releases/03-notes/index.md b/docs/zh/28-releases/03-notes/index.md
index 132a72d9ca..d1a48ab9a8 100644
--- a/docs/zh/28-releases/03-notes/index.md
+++ b/docs/zh/28-releases/03-notes/index.md
@@ -3,5 +3,7 @@ title: 版本说明
sidebar_label: 版本说明
description: 各版本版本说明
---
+
+[3.3.4.3](./3.3.4.3)
[3.3.3.0](./3.3.3.0)
[3.3.2.0](./3.3.2.0)
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index 07fc2fd71b..e3c992f53f 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -1,7 +1,7 @@
PROJECT(TDengine)
-IF (TD_LINUX)
- INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
+IF(TD_LINUX)
+ INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
AUX_SOURCE_DIRECTORY(. SRC)
add_executable(tmq "")
@@ -12,58 +12,58 @@ IF (TD_LINUX)
add_executable(asyncdemo "")
target_sources(tmq
- PRIVATE
- "tmq.c"
- )
+ PRIVATE
+ "tmq.c"
+ )
target_sources(stream_demo
- PRIVATE
- "stream_demo.c"
- )
+ PRIVATE
+ "stream_demo.c"
+ )
target_sources(schemaless
- PRIVATE
- "schemaless.c"
- )
+ PRIVATE
+ "schemaless.c"
+ )
target_sources(prepare
- PRIVATE
+ PRIVATE
"prepare.c"
- )
-
+ )
+
target_sources(demo
- PRIVATE
+ PRIVATE
"demo.c"
- )
+ )
target_sources(asyncdemo
- PRIVATE
+ PRIVATE
"asyncdemo.c"
- )
+ )
target_link_libraries(tmq
- taos
- )
+ ${TAOS_LIB}
+ )
target_link_libraries(stream_demo
- taos
- )
+ ${TAOS_LIB}
+ )
target_link_libraries(schemaless
- taos
- )
+ ${TAOS_LIB}
+ )
target_link_libraries(prepare
- taos
- )
-
+ ${TAOS_LIB}
+ )
+
target_link_libraries(demo
- taos
- )
+ ${TAOS_LIB}
+ )
target_link_libraries(asyncdemo
- taos
- )
+ ${TAOS_LIB}
+ )
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
@@ -71,8 +71,9 @@ IF (TD_LINUX)
SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare)
SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo)
SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo)
-ENDIF ()
-IF (TD_DARWIN)
- INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
+ENDIF()
+
+IF(TD_DARWIN)
+ INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
AUX_SOURCE_DIRECTORY(. SRC)
-ENDIF ()
+ENDIF()
diff --git a/include/libs/stream/streamMsg.h b/include/common/streamMsg.h
similarity index 85%
rename from include/libs/stream/streamMsg.h
rename to include/common/streamMsg.h
index 0ceaa93a72..3db92ba58d 100644
--- a/include/libs/stream/streamMsg.h
+++ b/include/common/streamMsg.h
@@ -17,12 +17,23 @@
#define TDENGINE_STREAMMSG_H
#include "tmsg.h"
-#include "trpc.h"
+//#include "trpc.h"
#ifdef __cplusplus
extern "C" {
#endif
+typedef struct SStreamRetrieveReq SStreamRetrieveReq;
+typedef struct SStreamDispatchReq SStreamDispatchReq;
+typedef struct STokenBucket STokenBucket;
+typedef struct SMetaHbInfo SMetaHbInfo;
+
+typedef struct SNodeUpdateInfo {
+ int32_t nodeId;
+ SEpSet prevEp;
+ SEpSet newEp;
+} SNodeUpdateInfo;
+
typedef struct SStreamUpstreamEpInfo {
int32_t nodeId;
int32_t childId;
@@ -170,8 +181,8 @@ typedef struct SStreamHbMsg {
SArray* pUpdateNodes; // SArray, needs update the epsets in stream tasks for those nodes.
} SStreamHbMsg;
-int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
-int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp);
+int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq);
+int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq);
void tCleanupStreamHbMsg(SStreamHbMsg* pMsg);
typedef struct {
@@ -179,6 +190,9 @@ typedef struct {
int32_t msgId;
} SMStreamHbRspMsg;
+int32_t tEncodeStreamHbRsp(SEncoder* pEncoder, const SMStreamHbRspMsg* pRsp);
+int32_t tDecodeStreamHbRsp(SDecoder* pDecoder, SMStreamHbRspMsg* pRsp);
+
typedef struct SRetrieveChkptTriggerReq {
SMsgHead head;
int64_t streamId;
@@ -189,6 +203,9 @@ typedef struct SRetrieveChkptTriggerReq {
int64_t downstreamTaskId;
} SRetrieveChkptTriggerReq;
+int32_t tEncodeRetrieveChkptTriggerReq(SEncoder* pEncoder, const SRetrieveChkptTriggerReq* pReq);
+int32_t tDecodeRetrieveChkptTriggerReq(SDecoder* pDecoder, SRetrieveChkptTriggerReq* pReq);
+
typedef struct SCheckpointTriggerRsp {
int64_t streamId;
int64_t checkpointId;
@@ -198,6 +215,9 @@ typedef struct SCheckpointTriggerRsp {
int32_t rspCode;
} SCheckpointTriggerRsp;
+int32_t tEncodeCheckpointTriggerRsp(SEncoder* pEncoder, const SCheckpointTriggerRsp* pRsp);
+int32_t tDecodeCheckpointTriggerRsp(SDecoder* pDecoder, SCheckpointTriggerRsp* pRsp);
+
typedef struct SCheckpointReport {
int64_t streamId;
int32_t taskId;
@@ -222,7 +242,7 @@ typedef struct SRestoreCheckpointInfo {
int32_t nodeId;
} SRestoreCheckpointInfo;
-int32_t tEncodeRestoreCheckpointInfo (SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq);
+int32_t tEncodeRestoreCheckpointInfo(SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq);
int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq);
typedef struct {
@@ -232,10 +252,8 @@ typedef struct {
int32_t reqType;
} SStreamTaskRunReq;
-typedef struct SCheckpointConsensusEntry {
- SRestoreCheckpointInfo req;
- int64_t ts;
-} SCheckpointConsensusEntry;
+int32_t tEncodeStreamTaskRunReq(SEncoder* pEncoder, const SStreamTaskRunReq* pReq);
+int32_t tDecodeStreamTaskRunReq(SDecoder* pDecoder, SStreamTaskRunReq* pReq);
#ifdef __cplusplus
}
diff --git a/include/common/tanalytics.h b/include/common/tanalytics.h
index 85eb963129..d0af84ecfb 100644
--- a/include/common/tanalytics.h
+++ b/include/common/tanalytics.h
@@ -39,14 +39,14 @@ typedef struct {
} SAnalyticsUrl;
typedef enum {
- ANAL_BUF_TYPE_JSON = 0,
- ANAL_BUF_TYPE_JSON_COL = 1,
- ANAL_BUF_TYPE_OTHERS,
+ ANALYTICS_BUF_TYPE_JSON = 0,
+ ANALYTICS_BUF_TYPE_JSON_COL = 1,
+ ANALYTICS_BUF_TYPE_OTHERS,
} EAnalBufType;
typedef enum {
- ANAL_HTTP_TYPE_GET = 0,
- ANAL_HTTP_TYPE_POST,
+ ANALYTICS_HTTP_TYPE_GET = 0,
+ ANALYTICS_HTTP_TYPE_POST,
} EAnalHttpType;
typedef struct {
@@ -61,11 +61,11 @@ typedef struct {
char fileName[TSDB_FILENAME_LEN];
int32_t numOfCols;
SAnalyticsColBuf *pCols;
-} SAnalBuf;
+} SAnalyticBuf;
int32_t taosAnalyticsInit();
void taosAnalyticsCleanup();
-SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf);
+SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf);
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen);
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen);
@@ -73,18 +73,18 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optV
int64_t taosAnalGetVersion();
void taosAnalUpdate(int64_t newVer, SHashObj *pHash);
-int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols);
-int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal);
-int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal);
-int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal);
-int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName);
-int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf);
-int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex);
-int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue);
-int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex);
-int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf);
-int32_t taosAnalBufClose(SAnalBuf *pBuf);
-void taosAnalBufDestroy(SAnalBuf *pBuf);
+int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols);
+int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal);
+int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal);
+int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal);
+int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName);
+int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf);
+int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex);
+int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue);
+int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex);
+int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf);
+int32_t taosAnalBufClose(SAnalyticBuf *pBuf);
+void taosAnalBufDestroy(SAnalyticBuf *pBuf);
const char *taosAnalAlgoStr(EAnalAlgoType algoType);
EAnalAlgoType taosAnalAlgoInt(const char *algoName);
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index ea764e6760..4189d1ebdb 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -155,6 +155,7 @@ typedef enum EStreamType {
STREAM_MID_RETRIEVE,
STREAM_PARTITION_DELETE_DATA,
STREAM_GET_RESULT,
+ STREAM_DROP_CHILD_TABLE,
} EStreamType;
#pragma pack(push, 1)
@@ -250,6 +251,7 @@ typedef struct SQueryTableDataCond {
int32_t type; // data block load type:
bool skipRollup;
STimeWindow twindows;
+ STimeWindow extTwindows[2];
int64_t startVersion;
int64_t endVersion;
bool notLoadData; // response the actual data, not only the rows in the attribute of info.row of ssdatablock
@@ -401,6 +403,8 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol);
#define TSMA_RES_STB_EXTRA_COLUMN_NUM 4 // 3 columns: _wstart, _wend, _wduration, 1 tag: tbname
static inline bool isTsmaResSTb(const char* stbName) {
+ static bool showTsmaTables = false;
+ if (showTsmaTables) return false;
const char* pos = strstr(stbName, TSMA_RES_STB_POSTFIX);
if (pos && strlen(stbName) == (pos - stbName) + strlen(TSMA_RES_STB_POSTFIX)) {
return true;
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index 19f3e222d1..f899fc5589 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -57,9 +57,9 @@ const static uint8_t BIT2_MAP[4] = {0b11111100, 0b11110011, 0b11001111, 0b001111
#define ONE ((uint8_t)1)
#define THREE ((uint8_t)3)
#define DIV_8(i) ((i) >> 3)
-#define MOD_8(i) ((i)&7)
+#define MOD_8(i) ((i) & 7)
#define DIV_4(i) ((i) >> 2)
-#define MOD_4(i) ((i)&3)
+#define MOD_4(i) ((i) & 3)
#define MOD_4_TIME_2(i) (MOD_4(i) << 1)
#define BIT1_SIZE(n) (DIV_8((n)-1) + 1)
#define BIT2_SIZE(n) (DIV_4((n)-1) + 1)
@@ -173,6 +173,8 @@ typedef struct {
} SColDataCompressInfo;
typedef void *(*xMallocFn)(void *, int32_t);
+typedef int32_t (*checkWKBGeometryFn)(const unsigned char *geoWKB, size_t nGeom);
+typedef int32_t (*initGeosFn)();
void tColDataDestroy(void *ph);
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t cflag);
@@ -191,7 +193,8 @@ int32_t tColDataCompress(SColData *colData, SColDataCompressInfo *info, SBuffer
int32_t tColDataDecompress(void *input, SColDataCompressInfo *info, SColData *colData, SBuffer *assist);
// for stmt bind
-int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen);
+int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos,
+ checkWKBGeometryFn cgeos);
int32_t tColDataSortMerge(SArray **arr);
// for raw block
@@ -378,7 +381,8 @@ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted,
SArray *rowArray);
// stmt2 binding
-int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen);
+int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos,
+ checkWKBGeometryFn cgeos);
typedef struct {
int32_t columnId;
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index bf3fa716c6..5125c1caef 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -67,6 +67,7 @@ extern int64_t tsTickPerHour[3];
extern int32_t tsCountAlwaysReturnValue;
extern float tsSelectivityRatio;
extern int32_t tsTagFilterResCacheSize;
+extern int32_t tsBypassFlag;
// queue & threads
extern int32_t tsNumOfRpcThreads;
@@ -188,7 +189,6 @@ extern int32_t tsMaxRetryWaitTime;
extern bool tsUseAdapter;
extern int32_t tsMetaCacheMaxSize;
extern int32_t tsSlowLogThreshold;
-extern int32_t tsSlowLogThresholdTest;
extern char tsSlowLogExceptDb[];
extern int32_t tsSlowLogScope;
extern int32_t tsSlowLogMaxLen;
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 7ff70b243a..2294cf6f73 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -676,7 +676,7 @@ typedef struct {
int32_t tsSlowLogThreshold;
int32_t tsSlowLogMaxLen;
int32_t tsSlowLogScope;
- int32_t tsSlowLogThresholdTest;
+ int32_t tsSlowLogThresholdTest; //Obsolete
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN];
} SMonitorParas;
@@ -2187,8 +2187,9 @@ int32_t tSerializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq
typedef struct {
char name[TSDB_CONFIG_OPTION_LEN + 1];
- char value[TSDB_CONFIG_VALUE_LEN + 1];
+ char value[TSDB_CONFIG_PATH_LEN + 1];
char scope[TSDB_CONFIG_SCOPE_LEN + 1];
+ char info[TSDB_CONFIG_INFO_LEN + 1];
} SVariablesInfo;
typedef struct {
@@ -2307,6 +2308,7 @@ typedef struct {
typedef struct {
SExplainRsp rsp;
uint64_t qId;
+ uint64_t cId;
uint64_t tId;
int64_t rId;
int32_t eId;
@@ -2660,6 +2662,7 @@ typedef struct SSubQueryMsg {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@@ -2689,6 +2692,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int32_t execId;
} SQueryContinueReq;
@@ -2723,6 +2727,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int32_t execId;
SOperatorParam* pOpParam;
@@ -2738,6 +2743,7 @@ typedef struct {
typedef struct {
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@@ -2784,6 +2790,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@@ -2797,6 +2804,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@@ -2813,6 +2821,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
int64_t refId;
int32_t execId;
@@ -3220,6 +3229,7 @@ int tDecodeSVCreateTbBatchRsp(SDecoder* pCoder, SVCreateTbBatchRsp* pRsp);
typedef struct {
char* name;
uint64_t suid; // for tmq in wal format
+ int64_t uid;
int8_t igNotExists;
} SVDropTbReq;
@@ -3788,7 +3798,14 @@ typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
-} SVPauseStreamTaskReq, SVResetStreamTaskReq;
+} SVPauseStreamTaskReq;
+
+typedef struct {
+ SMsgHead head;
+ int64_t streamId;
+ int32_t taskId;
+ int64_t chkptId;
+} SVResetStreamTaskReq;
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
@@ -4261,6 +4278,7 @@ typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
+ uint64_t clientId;
uint64_t taskId;
uint32_t sqlLen;
uint32_t phyLen;
diff --git a/include/common/ttime.h b/include/common/ttime.h
index 65bb763b1f..1ffcc29eca 100644
--- a/include/common/ttime.h
+++ b/include/common/ttime.h
@@ -62,7 +62,8 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
: (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
: 1000000000;
- time_t t = taosTime(NULL);
+ time_t t;
+ (void) taosTime(&t);
struct tm tm;
(void) taosLocalTime(&t, &tm, NULL, 0);
tm.tm_hour = 0;
diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h
index df3f87973f..7c6f02513e 100644
--- a/include/libs/catalog/catalog.h
+++ b/include/libs/catalog/catalog.h
@@ -102,6 +102,7 @@ typedef struct SCatalogReq {
bool svrVerRequired;
bool forceUpdate;
bool cloned;
+ bool forceFetchViewMeta;
} SCatalogReq;
typedef struct SMetaRes {
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index d955a7b3b9..82cb899cb6 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -31,7 +31,7 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
-typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
+typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef struct {
void* handle;
diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h
index db0d6339c8..feb7bcc25e 100644
--- a/include/libs/executor/storageapi.h
+++ b/include/libs/executor/storageapi.h
@@ -336,6 +336,7 @@ typedef struct SStateStore {
int32_t (*streamStatePutParName)(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache,
int32_t* pWinCode);
+ int32_t (*streamStateDeleteParName)(SStreamState* pState, int64_t groupId);
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h
index acdbc09be6..52cb847b6f 100644
--- a/include/libs/function/tudf.h
+++ b/include/libs/function/tudf.h
@@ -61,6 +61,35 @@ extern "C" {
} \
} while (0)
+#define TAOS_UDF_CHECK_PTR_RCODE(...) \
+ do { \
+ const void *ptrs[] = {__VA_ARGS__}; \
+ for (int i = 0; i < sizeof(ptrs) / sizeof(ptrs[0]); ++i) { \
+ if (ptrs[i] == NULL) { \
+ fnError("udfd %dth parameter invalid, NULL PTR.line:%d", i, __LINE__); \
+ return TSDB_CODE_INVALID_PARA; \
+ } \
+ } \
+ } while (0)
+
+#define TAOS_UDF_CHECK_PTR_RVOID(...) \
+ do { \
+ const void *ptrs[] = {__VA_ARGS__}; \
+ for (int i = 0; i < sizeof(ptrs) / sizeof(ptrs[0]); ++i) { \
+ if (ptrs[i] == NULL) { \
+ fnError("udfd %dth parameter invalid, NULL PTR.line:%d", i, __LINE__); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+#define TAOS_UDF_CHECK_CONDITION(o, code) \
+ do { \
+ if ((o) == false) { \
+ fnError("Condition not met.line:%d", __LINE__); \
+ return code; \
+ } \
+ } while (0)
// low level APIs
/**
diff --git a/include/libs/geometry/geosWrapper.h b/include/libs/geometry/geosWrapper.h
index a5bc0cec17..d27d300b82 100644
--- a/include/libs/geometry/geosWrapper.h
+++ b/include/libs/geometry/geosWrapper.h
@@ -35,6 +35,7 @@ int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t
int32_t initCtxAsText();
int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT);
+int32_t checkWKB(const unsigned char *wkb, size_t size);
int32_t initCtxRelationFunc();
int32_t doIntersects(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
@@ -47,11 +48,12 @@ int32_t doCovers(const GEOSGeometry *geom1, const GEOSPreparedGeometry *prepared
bool swapped, char *res);
int32_t doContains(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
-int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
- bool swapped, char *res);
+int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1,
+ const GEOSGeometry *geom2, bool swapped, char *res);
-int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom);
-void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom);
+int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom,
+ const GEOSPreparedGeometry **outputPreparedGeom);
+void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom);
#ifdef __cplusplus
}
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index 514eddbc24..867f8c8efc 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -42,10 +42,11 @@ extern "C" {
#define SHOW_CREATE_VIEW_RESULT_FIELD1_LEN (TSDB_VIEW_FNAME_LEN + 4 + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_VIEW_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN + VARSTR_HEADER_SIZE)
-#define SHOW_LOCAL_VARIABLES_RESULT_COLS 3
+#define SHOW_LOCAL_VARIABLES_RESULT_COLS 4
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
-#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
+#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
+#define SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE)
#define COMPACT_DB_RESULT_COLS 3
#define COMPACT_DB_RESULT_FIELD1_LEN 32
@@ -321,7 +322,7 @@ typedef struct SAlterDnodeStmt {
typedef struct {
ENodeType type;
- char url[TSDB_ANAL_ANODE_URL_LEN + 3];
+ char url[TSDB_ANALYTIC_ANODE_URL_LEN + 3];
} SCreateAnodeStmt;
typedef struct {
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index 72dd3ef3e0..6384c536ce 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -174,6 +174,7 @@ char* nodesGetNameFromColumnNode(SNode* pNode);
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
void nodesSortList(SNodeList** pList, int32_t (*)(SNode* pNode1, SNode* pNode2));
void destroyFuncParam(void* pFuncStruct);
+int32_t nodesListDeduplicate(SNodeList** pList);
#ifdef __cplusplus
}
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index cfd9c1a422..89bc27a1fa 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -334,7 +334,7 @@ typedef struct SWindowLogicNode {
int64_t windowSliding;
SNodeList* pTsmaSubplans;
SNode* pAnomalyExpr;
- char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
+ char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
} SWindowLogicNode;
typedef struct SFillLogicNode {
@@ -624,6 +624,7 @@ typedef struct SAggPhysiNode {
typedef struct SDownstreamSourceNode {
ENodeType type;
SQueryNodeAddr addr;
+ uint64_t clientId;
uint64_t taskId;
uint64_t schedId;
int32_t execId;
@@ -739,7 +740,7 @@ typedef SCountWinodwPhysiNode SStreamCountWinodwPhysiNode;
typedef struct SAnomalyWindowPhysiNode {
SWindowPhysiNode window;
SNode* pAnomalyKey;
- char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
+ char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
} SAnomalyWindowPhysiNode;
typedef struct SSortPhysiNode {
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 763882ab3a..7af74a347a 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -351,7 +351,7 @@ typedef struct SAnomalyWindowNode {
ENodeType type; // QUERY_NODE_ANOMALY_WINDOW
SNode* pCol; // timestamp primary key
SNode* pExpr;
- char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
+ char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
} SAnomalyWindowNode;
typedef enum EFillMode {
diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h
index 83daf0376c..cb4e359727 100644
--- a/include/libs/qworker/qworker.h
+++ b/include/libs/qworker/qworker.h
@@ -105,11 +105,11 @@ void qWorkerDestroy(void **qWorkerMgmt);
int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat);
-int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
- SQWMsg *qwMsg, SArray *explainRes);
+int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
+ int32_t eId, SQWMsg *qwMsg, SArray *explainRes);
-int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
- void **pRsp, SArray *explainRes);
+int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
+ int32_t eId, void **pRsp, SArray *explainRes);
int32_t qWorkerDbgEnableDebug(char *option);
diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h
index af8deff1a0..2988ffc4b1 100644
--- a/include/libs/scheduler/scheduler.h
+++ b/include/libs/scheduler/scheduler.h
@@ -83,6 +83,9 @@ void schedulerStopQueryHb(void* pTrans);
int32_t schedulerUpdatePolicy(int32_t policy);
int32_t schedulerEnableReSchedule(bool enableResche);
+int32_t initClientId(void);
+uint64_t getClientId(void);
+
/**
* Cancel query job
* @param pJob
diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h
index a50451c3eb..2179547352 100644
--- a/include/libs/stream/streamState.h
+++ b/include/libs/stream/streamState.h
@@ -116,6 +116,7 @@ void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode);
+int32_t streamStateDeleteParName(SStreamState* pState, int64_t groupId);
// group id
int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index de10d6844e..6b8e9f12a6 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -319,11 +319,6 @@ typedef struct SSTaskBasicInfo {
SInterval interval;
} SSTaskBasicInfo;
-typedef struct SStreamRetrieveReq SStreamRetrieveReq;
-typedef struct SStreamDispatchReq SStreamDispatchReq;
-typedef struct STokenBucket STokenBucket;
-typedef struct SMetaHbInfo SMetaHbInfo;
-
typedef struct SDispatchMsgInfo {
SStreamDispatchReq* pData; // current dispatch data
@@ -462,7 +457,7 @@ struct SStreamTask {
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
void* pBackend;
- int8_t subtableWithoutMd5;
+ int8_t subtableWithoutMd5; // only for tsma stream tasks
char reserve[256];
char* backendPath;
};
@@ -626,11 +621,11 @@ typedef struct STaskStatusEntry {
STaskCkptInfo checkpointInfo;
} STaskStatusEntry;
-typedef struct SNodeUpdateInfo {
- int32_t nodeId;
- SEpSet prevEp;
- SEpSet newEp;
-} SNodeUpdateInfo;
+//typedef struct SNodeUpdateInfo {
+// int32_t nodeId;
+// SEpSet prevEp;
+// SEpSet newEp;
+//} SNodeUpdateInfo;
typedef struct SStreamTaskState {
ETaskStatus state;
@@ -643,6 +638,11 @@ typedef struct SCheckpointConsensusInfo {
int64_t streamId;
} SCheckpointConsensusInfo;
+typedef struct SCheckpointConsensusEntry {
+ SRestoreCheckpointInfo req;
+ int64_t ts;
+} SCheckpointConsensusEntry;
+
void streamSetupScheduleTrigger(SStreamTask* pTask);
// dispatch related
@@ -718,6 +718,7 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask);
void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId);
int32_t streamTaskSendCheckpointTriggerMsg(SStreamTask* pTask, int32_t dstTaskId, int32_t downstreamNodeId,
SRpcHandleInfo* pInfo, int32_t code);
+void streamTaskSetFailedCheckpointId(SStreamTask* pTask, int64_t failedId);
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
int32_t streamQueueGetNumOfUnAccessedItems(const SStreamQueue* pQueue);
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index f95b3f20ca..999adc2eff 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -138,6 +138,7 @@ typedef struct {
int8_t scanMeta;
int8_t deleteMsg;
int8_t enableRef;
+ int8_t scanDropCtb;
} SWalFilterCond;
// todo hide this struct
diff --git a/include/os/os.h b/include/os/os.h
index e3808065dd..4314148685 100644
--- a/include/os/os.h
+++ b/include/os/os.h
@@ -137,6 +137,14 @@ extern threadlocal bool tsEnableRandErr;
terrno = _code; \
}
+#define OS_PARAM_CHECK(_o) \
+ do { \
+ if ((_o) == NULL) { \
+ terrno = TSDB_CODE_INVALID_PARA; \
+ return terrno; \
+ } \
+ } while (0)
+
#ifdef __cplusplus
}
#endif
diff --git a/include/os/osSemaphore.h b/include/os/osSemaphore.h
index d893f42740..48836eeb0c 100644
--- a/include/os/osSemaphore.h
+++ b/include/os/osSemaphore.h
@@ -104,6 +104,7 @@ int64_t taosGetPthreadId(TdThread thread);
void taosResetPthread(TdThread *thread);
bool taosComparePthread(TdThread first, TdThread second);
int32_t taosGetPId();
+int32_t taosGetPIdByName(const char* name, int32_t* pPId);
int32_t taosGetAppName(char *name, int32_t *len);
#ifdef __cplusplus
diff --git a/include/os/osSystem.h b/include/os/osSystem.h
index fe181d291a..06f23eec0f 100644
--- a/include/os/osSystem.h
+++ b/include/os/osSystem.h
@@ -48,8 +48,6 @@ void taosCloseCmd(TdCmdPtr *ppCmd);
void *taosLoadDll(const char *filename);
-void *taosLoadSym(void *handle, char *name);
-
void taosCloseDll(void *handle);
int32_t taosSetConsoleEcho(bool on);
diff --git a/include/os/osTime.h b/include/os/osTime.h
index 5d74146e9c..7a65efe28d 100644
--- a/include/os/osTime.h
+++ b/include/os/osTime.h
@@ -93,7 +93,7 @@ static FORCE_INLINE int64_t taosGetMonoTimestampMs() {
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf, int32_t bufSize);
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
-time_t taosTime(time_t *t);
+int32_t taosTime(time_t *t);
time_t taosMktime(struct tm *timep);
int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour,
const uint32_t min, const uint32_t sec, int64_t time_zone);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 2c811495fd..6cedaeeef1 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -491,13 +491,14 @@ int32_t taosGetErrSize();
#define TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE TAOS_DEF_ERROR_CODE(0, 0x0438)
// analysis
-#define TSDB_CODE_ANAL_URL_RSP_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x0440)
-#define TSDB_CODE_ANAL_URL_CANT_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0441)
-#define TSDB_CODE_ANAL_ALGO_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0442)
-#define TSDB_CODE_ANAL_ALGO_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x0443)
-#define TSDB_CODE_ANAL_BUF_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0444)
-#define TSDB_CODE_ANAL_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445)
-#define TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446)
+#define TSDB_CODE_ANA_URL_RSP_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x0440)
+#define TSDB_CODE_ANA_URL_CANT_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0441)
+#define TSDB_CODE_ANA_ALGO_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0442)
+#define TSDB_CODE_ANA_ALGO_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x0443)
+#define TSDB_CODE_ANA_BUF_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0444)
+#define TSDB_CODE_ANA_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445)
+#define TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446)
+#define TSDB_CODE_ANA_WN_DATA TAOS_DEF_ERROR_CODE(0, 0x0447)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
@@ -1011,6 +1012,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_STREAM_CONFLICT_EVENT TAOS_DEF_ERROR_CODE(0, 0x4106)
#define TSDB_CODE_STREAM_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x4107)
#define TSDB_CODE_STREAM_INPUTQ_FULL TAOS_DEF_ERROR_CODE(0, 0x4108)
+#define TSDB_CODE_STREAM_INVLD_CHKPT TAOS_DEF_ERROR_CODE(0, 0x4109)
// TDLite
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)
diff --git a/include/util/tbuffer.inc b/include/util/tbuffer.inc
index 39090fb7fa..633517ca58 100644
--- a/include/util/tbuffer.inc
+++ b/include/util/tbuffer.inc
@@ -186,11 +186,25 @@ static int32_t tBufferGetI16(SBufferReader *reader, int16_t *value) {
}
static int32_t tBufferGetI32(SBufferReader *reader, int32_t *value) {
- return tBufferGet(reader, sizeof(*value), value);
+ if (reader->offset + sizeof(int32_t) > reader->buffer->size) {
+ return TSDB_CODE_OUT_OF_RANGE;
+ }
+ if (value) {
+ *value = *(int32_t*)BR_PTR(reader);
+ }
+ reader->offset += sizeof(int32_t);
+ return 0;
}
static int32_t tBufferGetI64(SBufferReader *reader, int64_t *value) {
- return tBufferGet(reader, sizeof(*value), value);
+ if (reader->offset + sizeof(int64_t) > reader->buffer->size) {
+ return TSDB_CODE_OUT_OF_RANGE;
+ }
+ if (value) {
+ *value = *(int64_t*)BR_PTR(reader);
+ }
+ reader->offset += sizeof(int64_t);
+ return 0;
}
static int32_t tBufferGetU8(SBufferReader *reader, uint8_t *value) { return tBufferGet(reader, sizeof(*value), value); }
diff --git a/include/util/tdef.h b/include/util/tdef.h
index ba30e78c59..c69d4f8f19 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -195,9 +195,9 @@ static const EOperatorType OPERATOR_ARRAY[] = {
OP_TYPE_MULTI,
OP_TYPE_DIV,
OP_TYPE_REM,
-
+
OP_TYPE_MINUS,
-
+
OP_TYPE_BIT_AND,
OP_TYPE_BIT_OR,
@@ -213,7 +213,7 @@ static const EOperatorType OPERATOR_ARRAY[] = {
OP_TYPE_NOT_LIKE,
OP_TYPE_MATCH,
OP_TYPE_NMATCH,
-
+
OP_TYPE_IS_NULL,
OP_TYPE_IS_NOT_NULL,
OP_TYPE_IS_TRUE,
@@ -222,7 +222,7 @@ static const EOperatorType OPERATOR_ARRAY[] = {
OP_TYPE_IS_NOT_TRUE,
OP_TYPE_IS_NOT_FALSE,
OP_TYPE_IS_NOT_UNKNOWN,
- //OP_TYPE_COMPARE_MAX_VALUE,
+ //OP_TYPE_COMPARE_MAX_VALUE,
OP_TYPE_JSON_GET_VALUE,
OP_TYPE_JSON_CONTAINS,
@@ -335,12 +335,13 @@ typedef enum ELogicConditionType {
#define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_SHOW_SUBQUERY_LEN 1000
#define TSDB_LOG_VAR_LEN 32
-#define TSDB_ANAL_ANODE_URL_LEN 128
-#define TSDB_ANAL_ALGO_NAME_LEN 64
-#define TSDB_ANAL_ALGO_TYPE_LEN 24
-#define TSDB_ANAL_ALGO_KEY_LEN (TSDB_ANAL_ALGO_NAME_LEN + 9)
-#define TSDB_ANAL_ALGO_URL_LEN (TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN + 1)
-#define TSDB_ANAL_ALGO_OPTION_LEN 256
+
+#define TSDB_ANALYTIC_ANODE_URL_LEN 128
+#define TSDB_ANALYTIC_ALGO_NAME_LEN 64
+#define TSDB_ANALYTIC_ALGO_TYPE_LEN 24
+#define TSDB_ANALYTIC_ALGO_KEY_LEN (TSDB_ANALYTIC_ALGO_NAME_LEN + 9)
+#define TSDB_ANALYTIC_ALGO_URL_LEN (TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN + 1)
+#define TSDB_ANALYTIC_ALGO_OPTION_LEN 256
#define TSDB_MAX_EP_NUM 10
@@ -620,6 +621,16 @@ enum {
enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
+/**
+ * RB: return before
+ * RA: return after
+ * NR: not return, skip and go on following steps
+ */
+#define TSDB_BYPASS_RB_RPC_SEND_SUBMIT 0x01u
+#define TSDB_BYPASS_RA_RPC_RECV_SUBMIT 0x02u
+#define TSDB_BYPASS_RB_TSDB_WRITE_MEM 0x04u
+#define TSDB_BYPASS_RB_TSDB_COMMIT 0x08u
+
#define DEFAULT_HANDLE 0
#define MNODE_HANDLE 1
#define QNODE_HANDLE -1
@@ -631,6 +642,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
#define TSDB_CONFIG_VALUE_LEN 64
#define TSDB_CONFIG_SCOPE_LEN 8
#define TSDB_CONFIG_NUMBER 16
+#define TSDB_CONFIG_PATH_LEN 4096
+#define TSDB_CONFIG_INFO_LEN 64
#define QUERY_ID_SIZE 20
#define QUERY_OBJ_ID_SIZE 18
diff --git a/include/util/tjson.h b/include/util/tjson.h
index 88c2a1efb7..728b6b5585 100644
--- a/include/util/tjson.h
+++ b/include/util/tjson.h
@@ -25,7 +25,7 @@ extern "C" {
#define tjsonGetNumberValue(pJson, pName, val, code) \
do { \
- uint64_t _tmp = 0; \
+ int64_t _tmp = 0; \
code = tjsonGetBigIntValue(pJson, pName, &_tmp); \
val = _tmp; \
} while (0)
diff --git a/include/util/tutil.h b/include/util/tutil.h
index 87710b091d..aa3b774e84 100644
--- a/include/util/tutil.h
+++ b/include/util/tutil.h
@@ -120,6 +120,18 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
}
}
+/*
+ * LIKELY and UNLIKELY macros for branch predication hints. Use them judiciously
+ * only in very hot code paths. Misuse or abuse can lead to performance degradation.
+ */
+#if __GNUC__ >= 3
+#define LIKELY(x) __builtin_expect((x) != 0, 1)
+#define UNLIKELY(x) __builtin_expect((x) != 0, 0)
+#else
+#define LIKELY(x) ((x) != 0)
+#define UNLIKELY(x) ((x) != 0)
+#endif
+
#define TAOS_CHECK_ERRNO(CODE) \
do { \
terrno = (CODE); \
@@ -129,25 +141,27 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
} \
} while (0)
-#define TSDB_CHECK_CODE(CODE, LINO, LABEL) \
- do { \
- if (TSDB_CODE_SUCCESS != (CODE)) { \
- LINO = __LINE__; \
- goto LABEL; \
- } \
+#define TSDB_CHECK_CODE(CODE, LINO, LABEL) \
+ do { \
+ if (UNLIKELY(TSDB_CODE_SUCCESS != (CODE))) { \
+ LINO = __LINE__; \
+ goto LABEL; \
+ } \
} while (0)
#define QUERY_CHECK_CODE TSDB_CHECK_CODE
-#define QUERY_CHECK_CONDITION(condition, CODE, LINO, LABEL, ERRNO) \
- if (!condition) { \
- (CODE) = (ERRNO); \
- (LINO) = __LINE__; \
- goto LABEL; \
+#define TSDB_CHECK_CONDITION(condition, CODE, LINO, LABEL, ERRNO) \
+ if (UNLIKELY(!(condition))) { \
+ (CODE) = (ERRNO); \
+ (LINO) = __LINE__; \
+ goto LABEL; \
}
+#define QUERY_CHECK_CONDITION TSDB_CHECK_CONDITION
+
#define TSDB_CHECK_NULL(ptr, CODE, LINO, LABEL, ERRNO) \
- if ((ptr) == NULL) { \
+ if (UNLIKELY((ptr) == NULL)) { \
(CODE) = (ERRNO); \
(LINO) = __LINE__; \
goto LABEL; \
diff --git a/packaging/smokeTest/test_server.py b/packaging/smokeTest/test_server.py
index 36d86357a3..04231f122f 100644
--- a/packaging/smokeTest/test_server.py
+++ b/packaging/smokeTest/test_server.py
@@ -61,7 +61,7 @@ def setup_module(get_config):
else:
cmd = "mkdir -p ../../debug/build/bin/"
subprocess.getoutput(cmd)
- if config["system"] == "Linux": # add tmq_sim
+ if config["system"] == "Linux" or config["system"] == "Darwin" : # add tmq_sim
cmd = "cp -rf ../../../debug/build/bin/tmq_sim ../../debug/build/bin/."
subprocess.getoutput(cmd)
if config["system"] == "Darwin":
@@ -140,9 +140,11 @@ class TestServer:
if line:
print(line.strip())
if "succeed to write dnode" in line:
- time.sleep(15)
+ time.sleep(5)
# 发送终止信号
- os.kill(process.pid, signal.SIGTERM)
+ os.kill(process.pid, signal.SIGKILL)
+ # Waiting for the process to be completely killed
+ time.sleep(5)
break
@pytest.mark.all
diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt
index 6d5f006517..2113aa7921 100644
--- a/source/client/CMakeLists.txt
+++ b/source/client/CMakeLists.txt
@@ -5,24 +5,24 @@ if(TD_ENTERPRISE)
endif()
if(TD_WINDOWS)
- add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in)
+ add_library(${TAOS_LIB} SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in)
else()
- add_library(taos SHARED ${CLIENT_SRC})
+ add_library(${TAOS_LIB} SHARED ${CLIENT_SRC})
endif()
if(${TD_DARWIN})
- target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype)
+ target_compile_options(${TAOS_LIB} PRIVATE -Wno-error=deprecated-non-prototype)
endif()
INCLUDE_DIRECTORIES(jni)
target_include_directories(
- taos
+ ${TAOS_LIB}
PUBLIC "${TD_SOURCE_DIR}/include/client"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
- taos
+ ${TAOS_LIB}
INTERFACE api
PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry
)
@@ -36,32 +36,32 @@ else()
endif()
set_target_properties(
- taos
+ ${TAOS_LIB}
PROPERTIES
CLEAN_DIRECT_OUTPUT
1
)
set_target_properties(
- taos
+ ${TAOS_LIB}
PROPERTIES
VERSION ${TD_VER_NUMBER}
SOVERSION 1
)
-add_library(taos_static STATIC ${CLIENT_SRC})
+add_library(${TAOS_LIB_STATIC} STATIC ${CLIENT_SRC})
if(${TD_DARWIN})
- target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype)
+ target_compile_options(${TAOS_LIB_STATIC} PRIVATE -Wno-error=deprecated-non-prototype)
endif()
target_include_directories(
- taos_static
+ ${TAOS_LIB_STATIC}
PUBLIC "${TD_SOURCE_DIR}/include/client"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
- taos_static
+ ${TAOS_LIB_STATIC}
INTERFACE api
PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry
)
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 90505ed25a..da56dcf75e 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -47,10 +47,11 @@ enum {
RES_TYPE__TMQ_BATCH_META,
};
-#define SHOW_VARIABLES_RESULT_COLS 3
+#define SHOW_VARIABLES_RESULT_COLS 4
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
+#define SHOW_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE)
#define TD_RES_QUERY(res) (*(int8_t*)(res) == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)(res) == RES_TYPE__TMQ)
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index c56a627ec7..b0be3a4d3b 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -294,8 +294,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
}
}
- if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL ||
- duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
+ if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
@@ -983,6 +982,7 @@ void taos_init_imp(void) {
SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100};
ENV_ERR_RET(catalogInit(&cfg), "failed to init catalog");
ENV_ERR_RET(schedulerInit(), "failed to init scheduler");
+ ENV_ERR_RET(initClientId(), "failed to init clientId");
tscDebug("starting to initialize TAOS driver");
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 8a0b1ddaab..94d06166f2 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -3032,13 +3032,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
if (code != TSDB_CODE_SUCCESS) {
pRequest->code = code;
taosMemoryFreeClear(pResultInfo->pData);
- pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0);
+ pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code);
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pResultInfo->pData);
- pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0);
+ pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, pRequest->code);
return;
}
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index 9a723218ff..e182cd97ee 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -541,6 +541,10 @@ static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) {
infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD3_LEN;
TSDB_CHECK_NULL(taosArrayPush(pBlock->pDataBlock, &infoData), code, line, END, terrno);
+ infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
+ infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD4_LEN;
+ TSDB_CHECK_NULL(taosArrayPush(pBlock->pDataBlock, &infoData), code, line, END, terrno);
+
int32_t numOfCfg = taosArrayGetSize(pVars);
code = blockDataEnsureCapacity(pBlock, numOfCfg);
TSDB_CHECK_CODE(code, line, END);
@@ -569,6 +573,13 @@ static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) {
TSDB_CHECK_NULL(pColInfo, code, line, END, terrno);
code = colDataSetVal(pColInfo, i, scope, false);
TSDB_CHECK_CODE(code, line, END);
+
+ char info[TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE] = {0};
+ STR_WITH_MAXSIZE_TO_VARSTR(info, pInfo->info, TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
+ TSDB_CHECK_NULL(pColInfo, code, line, END, terrno);
+ code = colDataSetVal(pColInfo, i, info, false);
+ TSDB_CHECK_CODE(code, line, END);
}
pBlock->info.rows = numOfCfg;
@@ -825,7 +836,7 @@ int32_t processCompactDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
tscError("failed to post semaphore");
}
}
- return code;
+ return code;
}
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType) {
@@ -845,7 +856,7 @@ __async_send_cb_fn_t getMsgRspHandle(int32_t msgType) {
case TDMT_MND_SHOW_VARIABLES:
return processShowVariablesRsp;
case TDMT_MND_COMPACT_DB:
- return processCompactDbRsp;
+ return processCompactDbRsp;
default:
return genericRspCallback;
}
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index 4d6a6fbff7..902e581bfd 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -1872,13 +1872,17 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
if (tmq->epTimer == NULL){
tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(tmq->refId), tmqMgmt.timer);
+ if (tmq->epTimer == NULL) {
+ code = TSDB_CODE_TSC_INTERNAL_ERROR;
+ goto END;
+ }
}
- if (tmq->commitTimer == NULL){
+ if (tmq->autoCommit && tmq->commitTimer == NULL){
tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, (void*)(tmq->refId), tmqMgmt.timer);
- }
- if (tmq->epTimer == NULL || tmq->commitTimer == NULL) {
- code = TSDB_CODE_TSC_INTERNAL_ERROR;
- goto END;
+ if (tmq->commitTimer == NULL) {
+ code = TSDB_CODE_TSC_INTERNAL_ERROR;
+ goto END;
+ }
}
END:
diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt
index 054b5af2b9..9393bfc449 100644
--- a/source/client/test/CMakeLists.txt
+++ b/source/client/test/CMakeLists.txt
@@ -8,27 +8,33 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(clientTest clientTests.cpp)
TARGET_LINK_LIBRARIES(
clientTest
- os util common transport parser catalog scheduler gtest taos_static qcom executor function
+ os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function
)
ADD_EXECUTABLE(tmqTest tmqTest.cpp)
TARGET_LINK_LIBRARIES(
tmqTest
- PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom
+ PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom
)
ADD_EXECUTABLE(smlTest smlTest.cpp)
TARGET_LINK_LIBRARIES(
smlTest
- PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry
+ PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom geometry
)
#ADD_EXECUTABLE(clientMonitorTest clientMonitorTests.cpp)
#TARGET_LINK_LIBRARIES(
# clientMonitorTest
-# PUBLIC os util common transport monitor parser catalog scheduler function gtest taos_static qcom executor
+# PUBLIC os util common transport monitor parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom executor
#)
+ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c)
+TARGET_LINK_LIBRARIES(
+ userOperTest
+ PUBLIC ${TAOS_LIB}
+)
+
TARGET_INCLUDE_DIRECTORIES(
clientTest
PUBLIC "${TD_SOURCE_DIR}/include/client/"
@@ -69,3 +75,8 @@ add_test(
# NAME clientMonitorTest
# COMMAND clientMonitorTest
# )
+
+add_test(
+ NAME userOperTest
+ COMMAND userOperTest
+)
diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt
index f10eb6a611..39380a0644 100644
--- a/source/common/CMakeLists.txt
+++ b/source/common/CMakeLists.txt
@@ -1,4 +1,7 @@
aux_source_directory(src COMMON_SRC)
+aux_source_directory(src/msg COMMON_MSG_SRC)
+
+LIST(APPEND COMMON_SRC ${COMMON_MSG_SRC})
if(TD_ENTERPRISE)
LIST(APPEND COMMON_SRC ${TD_ENTERPRISE_DIR}/src/plugins/common/src/tglobal.c)
diff --git a/source/libs/stream/src/streamMsg.c b/source/common/src/msg/streamMsg.c
similarity index 75%
rename from source/libs/stream/src/streamMsg.c
rename to source/common/src/msg/streamMsg.c
index 193daa0cc4..c92ab52ac1 100644
--- a/source/libs/stream/src/streamMsg.c
+++ b/source/common/src/msg/streamMsg.c
@@ -15,8 +15,48 @@
#include "streamMsg.h"
#include "os.h"
-#include "tstream.h"
-#include "streamInt.h"
+#include "tcommon.h"
+
+typedef struct STaskId {
+ int64_t streamId;
+ int64_t taskId;
+} STaskId;
+
+typedef struct STaskCkptInfo {
+ int64_t latestId; // saved checkpoint id
+ int64_t latestVer; // saved checkpoint ver
+ int64_t latestTime; // latest checkpoint time
+ int64_t latestSize; // latest checkpoint size
+ int8_t remoteBackup; // latest checkpoint backup done
+ int64_t activeId; // current active checkpoint id
+ int32_t activeTransId; // checkpoint trans id
+ int8_t failed; // denote if the checkpoint is failed or not
+ int8_t consensusChkptId; // required the consensus-checkpointId
+ int64_t consensusTs; //
+} STaskCkptInfo;
+
+typedef struct STaskStatusEntry {
+ STaskId id;
+ int32_t status;
+ int32_t statusLastDuration; // to record the last duration of current status
+ int64_t stage;
+ int32_t nodeId;
+ SVersionRange verRange; // start/end version in WAL, only valid for source task
+ int64_t processedVer; // only valid for source task
+ double inputQUsed; // in MiB
+ double inputRate;
+ double procsThroughput; // duration between one element put into input queue and being processed.
+ double procsTotal; // duration between one element put into input queue and being processed.
+ double outputThroughput; // the size of dispatched result blocks in bytes
+ double outputTotal; // the size of dispatched result blocks in bytes
+ double sinkQuota; // existed quota size for sink task
+ double sinkDataSize; // sink to dst data size
+ int64_t startTime;
+ int64_t startCheckpointId;
+ int64_t startCheckpointVer;
+ int64_t hTaskId;
+ STaskCkptInfo checkpointInfo;
+} STaskStatusEntry;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamUpstreamEpInfo* pInfo) {
TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pInfo->taskId));
@@ -289,7 +329,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p
TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->totalLen));
if (taosArrayGetSize(pReq->data) != pReq->blockNum || taosArrayGetSize(pReq->dataLen) != pReq->blockNum) {
- stError("invalid dispatch req msg");
+ uError("invalid dispatch req msg");
TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG);
}
@@ -605,173 +645,92 @@ void tCleanupStreamHbMsg(SStreamHbMsg* pMsg) {
pMsg->numOfTasks = -1;
}
-int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
+int32_t tEncodeStreamHbRsp(SEncoder* pEncoder, const SMStreamHbRspMsg* pRsp) {
int32_t code = 0;
int32_t lino;
TAOS_CHECK_EXIT(tStartEncode(pEncoder));
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->ver));
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->id.streamId));
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->id.taskId));
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.trigger));
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.taskLevel));
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.type));
- TAOS_CHECK_EXIT(tEncodeI16(pEncoder, pTask->msgInfo.msgType));
-
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.taskStatus));
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.schedStatus));
-
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.selfChildId));
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.nodeId));
- TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.epSet));
- TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.mnodeEpset));
-
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointId));
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointVer));
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.fillHistory));
-
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->hTaskInfo.id.streamId));
- int32_t taskId = pTask->hTaskInfo.id.taskId;
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId));
-
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->streamTaskId.streamId));
- taskId = pTask->streamTaskId.taskId;
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId));
-
- TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.minVer));
- TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.maxVer));
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.skey));
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.ekey));
-
- int32_t epSz = taosArrayGetSize(pTask->upstreamInfo.pList);
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, epSz));
- for (int32_t i = 0; i < epSz; i++) {
- SStreamUpstreamEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i);
- TAOS_CHECK_EXIT(tEncodeStreamEpInfo(pEncoder, pInfo));
- }
-
- if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
- TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->exec.qmsg));
- }
-
- if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.tbSink.stbUid));
- TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.tbSink.stbFullName));
- TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, pTask->outputInfo.tbSink.pSchemaWrapper));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) {
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.smaSink.smaId));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.fetchSink.reserved));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.taskId));
- TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.nodeId));
- TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->outputInfo.fixedDispatcher.epSet));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
- TAOS_CHECK_EXIT(tSerializeSUseDbRspImp(pEncoder, &pTask->outputInfo.shuffleDispatcher.dbInfo));
- TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.shuffleDispatcher.stbFullName));
- }
- TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->info.delaySchedParam));
- TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5));
- TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1));
-
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->msgId));
tEndEncode(pEncoder);
+
_exit:
return code;
}
-int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
- int32_t taskId = 0;
+int32_t tDecodeStreamHbRsp(SDecoder* pDecoder, SMStreamHbRspMsg* pRsp) {
int32_t code = 0;
int32_t lino;
TAOS_CHECK_EXIT(tStartDecode(pDecoder));
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->ver));
- if (pTask->ver <= SSTREAM_TASK_INCOMPATIBLE_VER || pTask->ver > SSTREAM_TASK_VER) {
- TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG);
- }
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->msgId));
+ tEndDecode(pDecoder);
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->id.streamId));
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->id.taskId));
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.trigger));
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.taskLevel));
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.type));
- TAOS_CHECK_EXIT(tDecodeI16(pDecoder, &pTask->msgInfo.msgType));
+_exit:
+ return code;
+}
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.taskStatus));
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.schedStatus));
+int32_t tEncodeRetrieveChkptTriggerReq(SEncoder* pEncoder, const SRetrieveChkptTriggerReq* pReq) {
+ int32_t code = 0;
+ int32_t lino;
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.selfChildId));
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.nodeId));
- TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.epSet));
- TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.mnodeEpset));
+ TAOS_CHECK_EXIT(tStartEncode(pEncoder));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->streamId));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->checkpointId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->upstreamNodeId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->upstreamTaskId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->downstreamNodeId));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->downstreamTaskId));
+ tEndEncode(pEncoder);
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointId));
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointVer));
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.fillHistory));
+_exit:
+ return code;
+}
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->hTaskInfo.id.streamId));
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId));
- pTask->hTaskInfo.id.taskId = taskId;
+int32_t tDecodeRetrieveChkptTriggerReq(SDecoder* pDecoder, SRetrieveChkptTriggerReq* pReq) {
+ int32_t code = 0;
+ int32_t lino;
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->streamTaskId.streamId));
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId));
- pTask->streamTaskId.taskId = taskId;
+ TAOS_CHECK_EXIT(tStartDecode(pDecoder));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->streamId));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->checkpointId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->upstreamNodeId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->upstreamTaskId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->downstreamNodeId));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->downstreamTaskId));
+ tEndDecode(pDecoder);
- TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.minVer));
- TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.maxVer));
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.skey));
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.ekey));
+_exit:
+ return code;
+}
- int32_t epSz = -1;
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &epSz) < 0);
+int32_t tEncodeCheckpointTriggerRsp(SEncoder* pEncoder, const SCheckpointTriggerRsp* pRsp) {
+ int32_t code = 0;
+ int32_t lino;
- if ((pTask->upstreamInfo.pList = taosArrayInit(epSz, POINTER_BYTES)) == NULL) {
- TAOS_CHECK_EXIT(terrno);
- }
- for (int32_t i = 0; i < epSz; i++) {
- SStreamUpstreamEpInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamUpstreamEpInfo));
- if (pInfo == NULL) {
- TAOS_CHECK_EXIT(terrno);
- }
- if ((code = tDecodeStreamEpInfo(pDecoder, pInfo)) < 0) {
- taosMemoryFreeClear(pInfo);
- goto _exit;
- }
- if (taosArrayPush(pTask->upstreamInfo.pList, &pInfo) == NULL) {
- TAOS_CHECK_EXIT(terrno);
- }
- }
+ TAOS_CHECK_EXIT(tStartEncode(pEncoder));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pRsp->streamId));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pRsp->checkpointId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->upstreamTaskId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->taskId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->transId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->rspCode));
+ tEndEncode(pEncoder);
- if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
- TAOS_CHECK_EXIT(tDecodeCStrAlloc(pDecoder, &pTask->exec.qmsg));
- }
+_exit:
+ return code;
+}
- if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.tbSink.stbUid));
- TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.tbSink.stbFullName));
- pTask->outputInfo.tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
- if (pTask->outputInfo.tbSink.pSchemaWrapper == NULL) {
- TAOS_CHECK_EXIT(terrno);
- }
- TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, pTask->outputInfo.tbSink.pSchemaWrapper));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) {
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.smaSink.smaId));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.fetchSink.reserved));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.taskId));
- TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.nodeId));
- TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->outputInfo.fixedDispatcher.epSet));
- } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
- TAOS_CHECK_EXIT(tDeserializeSUseDbRspImp(pDecoder, &pTask->outputInfo.shuffleDispatcher.dbInfo));
- TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.shuffleDispatcher.stbFullName));
- }
- TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->info.delaySchedParam));
- if (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) {
- TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->subtableWithoutMd5));
- }
- TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve));
+int32_t tDecodeCheckpointTriggerRsp(SDecoder* pDecoder, SCheckpointTriggerRsp* pRsp) {
+ int32_t code = 0;
+ int32_t lino;
+ TAOS_CHECK_EXIT(tStartDecode(pDecoder));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pRsp->streamId));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pRsp->checkpointId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->upstreamTaskId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->taskId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->transId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->rspCode));
tEndDecode(pDecoder);
_exit:
@@ -830,11 +789,7 @@ int32_t tEncodeRestoreCheckpointInfo(SEncoder* pEncoder, const SRestoreCheckpoin
tEndEncode(pEncoder);
_exit:
- if (code) {
- return code;
- } else {
- return pEncoder->pos;
- }
+ return code;
}
int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq) {
@@ -853,3 +808,31 @@ int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo*
_exit:
return code;
}
+
+int32_t tEncodeStreamTaskRunReq (SEncoder* pEncoder, const SStreamTaskRunReq* pReq) {
+ int32_t code = 0;
+ int32_t lino;
+
+ TAOS_CHECK_EXIT(tStartEncode(pEncoder));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->streamId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->taskId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->reqType));
+ tEndEncode(pEncoder);
+
+_exit:
+ return code;
+}
+
+int32_t tDecodeStreamTaskRunReq(SDecoder* pDecoder, SStreamTaskRunReq* pReq) {
+ int32_t code = 0;
+ int32_t lino;
+
+ TAOS_CHECK_EXIT(tStartDecode(pDecoder));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->streamId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->taskId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->reqType));
+ tEndDecode(pDecoder);
+
+_exit:
+ return code;
+}
\ No newline at end of file
diff --git a/source/common/src/tmsg.c b/source/common/src/msg/tmsg.c
similarity index 99%
rename from source/common/src/tmsg.c
rename to source/common/src/msg/tmsg.c
index 6d1699b911..2e997218ac 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/msg/tmsg.c
@@ -76,7 +76,7 @@ static int32_t tSerializeSMonitorParas(SEncoder *encoder, const SMonitorParas *p
TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogScope));
TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogMaxLen));
TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThreshold));
- TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThresholdTest));
+ TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThresholdTest)); //Obsolete
TAOS_CHECK_RETURN(tEncodeCStr(encoder, pMonitorParas->tsSlowLogExceptDb));
return 0;
}
@@ -87,7 +87,7 @@ static int32_t tDeserializeSMonitorParas(SDecoder *decoder, SMonitorParas *pMoni
TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogScope));
TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogMaxLen));
TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThreshold));
- TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThresholdTest));
+ TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThresholdTest)); //Obsolete
TAOS_CHECK_RETURN(tDecodeCStrTo(decoder, pMonitorParas->tsSlowLogExceptDb));
return 0;
}
@@ -2169,7 +2169,7 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl
SAnalyticsUrl *pUrl = pIter;
size_t nameLen = 0;
const char *name = taosHashGetKey(pIter, &nameLen);
- if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_KEY_LEN && pUrl->urlLen > 0) {
+ if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_KEY_LEN && pUrl->urlLen > 0) {
numOfAlgos++;
}
pIter = taosHashIterate(pRsp->hash, pIter);
@@ -2224,7 +2224,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal
int32_t numOfAlgos = 0;
int32_t nameLen;
int32_t type;
- char name[TSDB_ANAL_ALGO_KEY_LEN];
+ char name[TSDB_ANALYTIC_ALGO_KEY_LEN];
SAnalyticsUrl url = {0};
TAOS_CHECK_EXIT(tStartDecode(&decoder));
@@ -2233,7 +2233,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal
for (int32_t f = 0; f < numOfAlgos; ++f) {
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &nameLen));
- if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_NAME_LEN) {
+ if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_NAME_LEN) {
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, name));
}
@@ -5642,6 +5642,12 @@ int32_t tSerializeSShowVariablesRsp(void *buf, int32_t bufLen, SShowVariablesRsp
SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i);
TAOS_CHECK_EXIT(tEncodeSVariablesInfo(&encoder, pInfo));
}
+
+ for (int32_t i = 0; i < varNum; ++i) {
+ SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i);
+ TAOS_CHECK_RETURN(tEncodeCStr(&encoder, pInfo->info));
+ }
+
tEndEncode(&encoder);
_exit:
@@ -5675,6 +5681,13 @@ int32_t tDeserializeSShowVariablesRsp(void *buf, int32_t bufLen, SShowVariablesR
TAOS_CHECK_EXIT(terrno);
}
}
+
+ if (!tDecodeIsEnd(&decoder)) {
+ for (int32_t i = 0; i < varNum; ++i) {
+ SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i);
+ TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pInfo->info));
+ }
+ }
}
tEndDecode(&decoder);
@@ -8717,6 +8730,7 @@ int32_t tSerializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq) {
TAOS_CHECK_EXIT(tEncodeCStrWithLen(&encoder, pReq->sql, pReq->sqlLen));
TAOS_CHECK_EXIT(tEncodeU32(&encoder, pReq->msgLen));
TAOS_CHECK_EXIT(tEncodeBinary(&encoder, (uint8_t *)pReq->msg, pReq->msgLen));
+ TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@@ -8765,6 +8779,11 @@ int32_t tDeserializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq)
TAOS_CHECK_EXIT(tDecodeCStrAlloc(&decoder, &pReq->sql));
TAOS_CHECK_EXIT(tDecodeU32(&decoder, &pReq->msgLen));
TAOS_CHECK_EXIT(tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, NULL));
+ if (!tDecodeIsEnd(&decoder)) {
+ TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
+ } else {
+ pReq->clientId = 0;
+ }
tEndDecode(&decoder);
@@ -8894,6 +8913,7 @@ int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
} else {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, 0));
}
+ TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@@ -8943,6 +8963,11 @@ int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq)
}
TAOS_CHECK_EXIT(tDeserializeSOperatorParam(&decoder, pReq->pOpParam));
}
+ if (!tDecodeIsEnd(&decoder)) {
+ TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
+ } else {
+ pReq->clientId = 0;
+ }
tEndDecode(&decoder);
@@ -9055,6 +9080,7 @@ int32_t tSerializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq) {
TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->taskId));
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->refId));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->execId));
+ TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@@ -9095,6 +9121,11 @@ int32_t tDeserializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq)
TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->taskId));
TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->refId));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->execId));
+ if (!tDecodeIsEnd(&decoder)) {
+ TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
+ } else {
+ pReq->clientId = 0;
+ }
tEndDecode(&decoder);
@@ -9123,6 +9154,7 @@ int32_t tSerializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pReq
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->refId));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->execId));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->type));
+ TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
@@ -9164,6 +9196,11 @@ int32_t tDeserializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pR
TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->refId));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->execId));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, (int32_t *)&pReq->type));
+ if (!tDecodeIsEnd(&decoder)) {
+ TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
+ } else {
+ pReq->clientId = 0;
+ }
tEndDecode(&decoder);
@@ -9353,6 +9390,10 @@ int32_t tSerializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *pR
TAOS_CHECK_EXIT(tEncodeI32(&encoder, status->execId));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, status->status));
}
+ for (int32_t i = 0; i < num; ++i) {
+ STaskStatus *status = taosArrayGet(pRsp->taskStatus, i);
+ TAOS_CHECK_EXIT(tEncodeU64(&encoder, status->clientId));
+ }
} else {
TAOS_CHECK_EXIT(tEncodeI32(&encoder, 0));
}
@@ -9396,6 +9437,12 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *
TAOS_CHECK_EXIT(terrno);
}
}
+ if (!tDecodeIsEnd(&decoder)) {
+ for (int32_t i = 0; i < num; ++i) {
+ STaskStatus *status = taosArrayGet(pRsp->taskStatus, i);
+ TAOS_CHECK_EXIT(tDecodeU64(&decoder, &status->clientId));
+ }
+ }
} else {
pRsp->taskStatus = NULL;
}
@@ -9560,6 +9607,7 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->sql));
TAOS_CHECK_EXIT(tEncodeBinary(&encoder, pReq->msg, pReq->phyLen));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->source));
+ TAOS_CHECK_EXIT(tEncodeU64(&encoder, pReq->clientId));
tEndEncode(&encoder);
_exit:
@@ -9608,6 +9656,11 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->source));
}
+ if (!tDecodeIsEnd(&decoder)) {
+ TAOS_CHECK_EXIT(tDecodeU64(&decoder, &pReq->clientId));
+ } else {
+ pReq->clientId = 0;
+ }
tEndDecode(&decoder);
_exit:
@@ -10277,6 +10330,7 @@ static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) {
TAOS_CHECK_RETURN(tStartEncode(pCoder));
TAOS_CHECK_RETURN(tEncodeCStr(pCoder, pReq->name));
TAOS_CHECK_RETURN(tEncodeU64(pCoder, pReq->suid));
+ TAOS_CHECK_RETURN(tEncodeI64(pCoder, pReq->uid));
TAOS_CHECK_RETURN(tEncodeI8(pCoder, pReq->igNotExists));
tEndEncode(pCoder);
@@ -10287,6 +10341,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) {
TAOS_CHECK_RETURN(tStartDecode(pCoder));
TAOS_CHECK_RETURN(tDecodeCStr(pCoder, &pReq->name));
TAOS_CHECK_RETURN(tDecodeU64(pCoder, &pReq->suid));
+ TAOS_CHECK_RETURN(tDecodeI64(pCoder, &pReq->uid));
TAOS_CHECK_RETURN(tDecodeI8(pCoder, &pReq->igNotExists));
tEndDecode(pCoder);
diff --git a/source/common/src/rsync.c b/source/common/src/rsync.c
index b7352acf25..8b453f7d82 100644
--- a/source/common/src/rsync.c
+++ b/source/common/src/rsync.c
@@ -119,11 +119,21 @@ static int32_t execCommand(char* command) {
}
void stopRsync() {
- int32_t code =
+ int32_t pid = 0;
+ int32_t code = 0;
+ char buf[128] = {0};
+
#ifdef WINDOWS
- system("taskkill /f /im rsync.exe");
+ code = system("taskkill /f /im rsync.exe");
#else
- system("pkill rsync");
+ code = taosGetPIdByName("rsync", &pid);
+ if (code == 0) {
+ int32_t ret = tsnprintf(buf, tListLen(buf), "kill -9 %d", pid);
+ if (ret > 0) {
+ uInfo("kill rsync program pid:%d", pid);
+ code = system(buf);
+ }
+ }
#endif
if (code != 0) {
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 4993ece7c1..bfe82aa7ae 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -327,8 +327,9 @@ static const SSysDbTableSchema configSchema[] = {
static const SSysDbTableSchema variablesSchema[] = {
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
- {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "value", .bytes = TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "scope", .bytes = TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "info", .bytes = TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema topicSchema[] = {
@@ -401,7 +402,7 @@ static const SSysDbTableSchema userCompactsDetailSchema[] = {
static const SSysDbTableSchema anodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
- {.name = "url", .bytes = TSDB_ANAL_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "url", .bytes = TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
{.name = "update_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
@@ -409,8 +410,8 @@ static const SSysDbTableSchema anodesSchema[] = {
static const SSysDbTableSchema anodesFullSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
- {.name = "type", .bytes = TSDB_ANAL_ALGO_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
- {.name = "algo", .bytes = TSDB_ANAL_ALGO_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "type", .bytes = TSDB_ANALYTIC_ALGO_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "algo", .bytes = TSDB_ANALYTIC_ALGO_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema tsmaSchema[] = {
diff --git a/source/common/src/tcol.c b/source/common/src/tcol.c
index 923aab12ca..55a4b21208 100644
--- a/source/common/src/tcol.c
+++ b/source/common/src/tcol.c
@@ -81,26 +81,42 @@ const char* getDefaultEncodeStr(uint8_t type) { return columnEncodeStr(getDefaul
uint16_t getDefaultCompress(uint8_t type) {
switch (type) {
case TSDB_DATA_TYPE_NULL:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_BOOL:
+ return TSDB_COLVAL_COMPRESS_ZSTD;
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
+ return TSDB_COLVAL_COMPRESS_ZLIB;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_BIGINT:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_VARCHAR: // TSDB_DATA_TYPE_BINARY
+ return TSDB_COLVAL_COMPRESS_ZSTD;
case TSDB_DATA_TYPE_TIMESTAMP:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_NCHAR:
+ return TSDB_COLVAL_COMPRESS_ZSTD;
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_USMALLINT:
+ return TSDB_COLVAL_COMPRESS_ZLIB;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_UBIGINT:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_JSON:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_VARBINARY:
+ return TSDB_COLVAL_COMPRESS_ZSTD;
case TSDB_DATA_TYPE_DECIMAL:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_BLOB:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_MEDIUMBLOB:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_GEOMETRY:
+ return TSDB_COLVAL_COMPRESS_LZ4;
case TSDB_DATA_TYPE_MAX:
return TSDB_COLVAL_COMPRESS_LZ4;
default:
@@ -166,6 +182,7 @@ const char* columnCompressStr(uint16_t type) {
}
uint8_t columnLevelVal(const char* level) {
+ if (level == NULL) return TSDB_COLVAL_LEVEL_NOCHANGE;
uint8_t l = TSDB_COLVAL_LEVEL_MEDIUM;
if (0 == strcmp(level, "h") || 0 == strcmp(level, TSDB_COLUMN_LEVEL_HIGH)) {
l = TSDB_COLVAL_LEVEL_HIGH;
@@ -180,6 +197,7 @@ uint8_t columnLevelVal(const char* level) {
}
uint16_t columnCompressVal(const char* compress) {
+ if (compress == NULL) return TSDB_COLVAL_COMPRESS_NOCHANGE;
uint16_t c = TSDB_COLVAL_COMPRESS_LZ4;
if (0 == strcmp(compress, TSDB_COLUMN_COMPRESS_LZ4)) {
c = TSDB_COLVAL_COMPRESS_LZ4;
@@ -200,6 +218,7 @@ uint16_t columnCompressVal(const char* compress) {
}
uint8_t columnEncodeVal(const char* encode) {
+ if (encode == NULL) return TSDB_COLVAL_ENCODE_NOCHANGE;
uint8_t e = TSDB_COLVAL_ENCODE_SIMPLE8B;
if (0 == strcmp(encode, TSDB_COLUMN_ENCODE_SIMPLE8B)) {
e = TSDB_COLVAL_ENCODE_SIMPLE8B;
@@ -311,6 +330,7 @@ void setColLevel(uint32_t* compress, uint8_t level) {
int32_t setColCompressByOption(uint8_t type, uint8_t encode, uint16_t compressType, uint8_t level, bool check,
uint32_t* compress) {
+ if(compress == NULL) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR;
if (check && !validColEncode(type, encode)) return TSDB_CODE_TSC_ENCODE_PARAM_ERROR;
setColEncode(compress, encode);
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index e580ad33bd..a38842735c 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -3036,7 +3036,8 @@ _exit:
return code;
}
-int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen) {
+int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos,
+ checkWKBGeometryFn cgeos) {
int32_t code = 0;
if (!(pBind->num == 1 && pBind->is_null && *pBind->is_null)) {
@@ -3046,6 +3047,12 @@ int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32
}
if (IS_VAR_DATA_TYPE(pColData->type)) { // var-length data type
+ if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) {
+ code = igeos();
+ if (code) {
+ return code;
+ }
+ }
for (int32_t i = 0; i < pBind->num; ++i) {
if (pBind->is_null && pBind->is_null[i]) {
if (pColData->cflag & COL_IS_KEY) {
@@ -3055,9 +3062,12 @@ int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
if (code) goto _exit;
} else if (pBind->length[i] > buffMaxLen) {
- uError("var data length too big, len:%d, max:%d", pBind->length[i], buffMaxLen);
- return TSDB_CODE_INVALID_PARA;
+ return TSDB_CODE_PAR_VALUE_TOO_LONG;
} else {
+ if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) {
+ code = cgeos((char *)pBind->buffer + pBind->buffer_length * i, (size_t)pBind->length[i]);
+ if (code) goto _exit;
+ }
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](
pColData, (uint8_t *)pBind->buffer + pBind->buffer_length * i, pBind->length[i]);
}
@@ -3108,7 +3118,8 @@ _exit:
return code;
}
-int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen) {
+int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos,
+ checkWKBGeometryFn cgeos) {
int32_t code = 0;
if (!(pBind->num == 1 && pBind->is_null && *pBind->is_null)) {
@@ -3118,6 +3129,13 @@ int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int3
}
if (IS_VAR_DATA_TYPE(pColData->type)) { // var-length data type
+ if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) {
+ code = igeos();
+ if (code) {
+ return code;
+ }
+ }
+
uint8_t *buf = pBind->buffer;
for (int32_t i = 0; i < pBind->num; ++i) {
if (pBind->is_null && pBind->is_null[i]) {
@@ -3133,9 +3151,12 @@ int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int3
if (code) goto _exit;
}
} else if (pBind->length[i] > buffMaxLen) {
- uError("var data length too big, len:%d, max:%d", pBind->length[i], buffMaxLen);
- return TSDB_CODE_INVALID_PARA;
+ return TSDB_CODE_PAR_VALUE_TOO_LONG;
} else {
+ if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) {
+ code = cgeos(buf, pBind->length[i]);
+ if (code) goto _exit;
+ }
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](pColData, buf, pBind->length[i]);
buf += pBind->length[i];
}
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 9c72e3b498..93c86a2dcd 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -59,7 +59,6 @@ int32_t tsNumOfRpcSessions = 30000;
int32_t tsShareConnLimit = 10;
int32_t tsReadTimeout = 900;
int32_t tsTimeToGetAvailableConn = 500000;
-int32_t tsKeepAliveIdle = 60;
int32_t tsNumOfCommitThreads = 2;
int32_t tsNumOfTaskQueueThreads = 16;
@@ -185,7 +184,6 @@ int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
int32_t tsMetaCacheMaxSize = -1; // MB
int32_t tsSlowLogThreshold = 10; // seconds
-int32_t tsSlowLogThresholdTest = INT32_MAX; // seconds
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN] = ""; // seconds
int32_t tsSlowLogScope = SLOW_LOG_TYPE_QUERY;
char *tsSlowLogScopeString = "query";
@@ -219,6 +217,8 @@ float tsSelectivityRatio = 1.0;
int32_t tsTagFilterResCacheSize = 1024 * 10;
char tsTagFilterCache = 0;
+int32_t tsBypassFlag = 0;
+
// the maximum allowed query buffer size during query processing for each data node.
// -1 no limit (default)
// 0 no query allowed, queries are disabled
@@ -523,7 +523,7 @@ static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *input
int32_t taosAddClientLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddDir(pCfg, "configDir", configDir, CFG_SCOPE_BOTH, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddDir(pCfg, "scriptDir", configDir, CFG_SCOPE_BOTH, CFG_DYN_NONE));
+ TAOS_CHECK_RETURN(cfgAddDir(pCfg, "scriptDir", configDir, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddDir(pCfg, "logDir", tsLogDir, CFG_SCOPE_BOTH, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "minimalLogDirGB", 1.0f, 0.001f, 10000000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(
@@ -531,13 +531,14 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "asyncLog", tsAsyncLog, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "logKeepDays", 0, -365000, 365000, CFG_SCOPE_BOTH, CFG_DYN_ENT_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "simDebugFlag", simDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "simDebugFlag", simDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "uDebugFlag", uDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "rpcDebugFlag", rpcDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "qDebugFlag", qDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "cDebugFlag", cDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_CLIENT, CFG_DYN_SERVER));
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
@@ -550,7 +551,6 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@@ -591,17 +591,18 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(
cfgAddBool(pCfg, "queryUseNodeAllocator", tsQueryUseNodeAllocator, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlChildTableName", tsSmlChildTableName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlAutoChildTableNameDelimiter", tsSmlAutoChildTableNameDelimiter,
CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlTagName", tsSmlTagName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "smlTsDefaultName", tsSmlTsDefaultName, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "smlDot2Underline", tsSmlDot2Underline, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxInsertBatchRows", tsMaxInsertBatchRows, 1, INT32_MAX, CFG_SCOPE_CLIENT,
CFG_DYN_CLIENT) != 0);
TAOS_CHECK_RETURN(
- cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
+ cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_SERVER, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_CLIENT));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX,
@@ -613,6 +614,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "bypassFlag", tsBypassFlag, 0, INT32_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS);
@@ -631,15 +633,12 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(
cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH, CFG_DYN_NONE));
- tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 72000);
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "keepAliveIdle", tsKeepAliveIdle, 1, 7200000, CFG_SCOPE_BOTH, CFG_DYN_NONE));
-
tsNumOfTaskQueueThreads = tsNumOfCores * 2;
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 16);
TAOS_CHECK_RETURN(
cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 4, 1024, CFG_SCOPE_CLIENT, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddBool(pCfg, "experimental", tsExperimental, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
+ TAOS_CHECK_RETURN(cfgAddBool(pCfg, "experimental", tsExperimental, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "multiResultFunctionStarReturnTags", tsMultiResultFunctionStarReturnTags,
CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
@@ -728,8 +727,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddString(pCfg, "encryptScope", tsEncryptScope, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
@@ -747,7 +745,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_NONE));
+ TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE));
@@ -766,7 +764,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitor", tsEnableMonitor, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 86400, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "slowLogThresholdTest", tsSlowLogThresholdTest, 0, INT32_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 1, INT32_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "slowLogMaxLen", tsSlowLogMaxLen, 1, 16384, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "slowLogScope", tsSlowLogScopeString, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@@ -784,12 +781,12 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, CFG_SCOPE_BOTH, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddString(pCfg, "telemetryServer", tsTelemServer, CFG_SCOPE_BOTH, CFG_DYN_BOTH));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, CFG_SCOPE_BOTH, CFG_DYN_NONE));
+ TAOS_CHECK_RETURN(cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE));
+ TAOS_CHECK_RETURN(cfgAddString(pCfg, "telemetryServer", tsTelemServer, CFG_SCOPE_SERVER, CFG_DYN_BOTH));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, CFG_SCOPE_SERVER, CFG_DYN_NONE));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "rsyncPort", tsRsyncPort, 1, 65535, CFG_SCOPE_BOTH, CFG_DYN_SERVER));
+ TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "rsyncPort", tsRsyncPort, 1, 65535, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "snodeAddress", tsSnodeAddress, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "checkpointBackupDir", tsCheckpointBackupDir, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@@ -1295,9 +1292,6 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "timeToGetAvailableConn");
tsTimeToGetAvailableConn = pItem->i32;
- TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "keepAliveIdle");
- tsKeepAliveIdle = pItem->i32;
-
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "experimental");
tsExperimental = pItem->bval;
@@ -1312,6 +1306,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel");
tsSafetyCheckLevel = pItem->i32;
+
+ TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "bypassFlag");
+ tsBypassFlag = pItem->i32;
+
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
@@ -1450,9 +1448,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "slowLogExceptDb");
tstrncpy(tsSlowLogExceptDb, pItem->str, TSDB_DB_NAME_LEN);
- TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "slowLogThresholdTest");
- tsSlowLogThresholdTest = pItem->i32;
-
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "slowLogThreshold");
tsSlowLogThreshold = pItem->i32;
@@ -2024,7 +2019,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"monitor", &tsEnableMonitor},
{"monitorInterval", &tsMonitorInterval},
{"slowLogThreshold", &tsSlowLogThreshold},
- {"slowLogThresholdTest", &tsSlowLogThresholdTest},
{"slowLogMaxLen", &tsSlowLogMaxLen},
{"mndSdbWriteDelta", &tsMndSdbWriteDelta},
@@ -2036,7 +2030,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"cacheLazyLoadThreshold", &tsCacheLazyLoadThreshold},
{"checkpointInterval", &tsStreamCheckpointInterval},
- {"keepAliveIdle", &tsKeepAliveIdle},
{"logKeepDays", &tsLogKeepDays},
{"maxStreamBackendCache", &tsMaxStreamBackendCache},
{"mqRebalanceInterval", &tsMqRebalanceInterval},
@@ -2060,7 +2053,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"supportVnodes", &tsNumOfSupportVnodes},
{"experimental", &tsExperimental},
{"maxTsmaNum", &tsMaxTsmaNum},
- {"safetyCheckLevel", &tsSafetyCheckLevel}};
+ {"safetyCheckLevel", &tsSafetyCheckLevel},
+ {"bypassFlag", &tsBypassFlag}};
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
code = taosCfgSetOption(options, tListLen(options), pItem, false);
@@ -2294,7 +2288,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
{"crashReporting", &tsEnableCrashReport},
{"enableQueryHb", &tsEnableQueryHb},
{"keepColumnName", &tsKeepColumnName},
- {"keepAliveIdle", &tsKeepAliveIdle},
{"logKeepDays", &tsLogKeepDays},
{"maxInsertBatchRows", &tsMaxInsertBatchRows},
{"maxRetryWaitTime", &tsMaxRetryWaitTime},
@@ -2317,7 +2310,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
{"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags},
{"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay},
{"tsmaDataDeleteMark", &tsmaDataDeleteMark},
- {"safetyCheckLevel", &tsSafetyCheckLevel}};
+ {"safetyCheckLevel", &tsSafetyCheckLevel},
+ {"bypassFlag", &tsBypassFlag}};
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
code = taosCfgSetOption(options, tListLen(options), pItem, false);
diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c
index 4df458c2bb..8988fab56a 100644
--- a/source/common/src/tmisce.c
+++ b/source/common/src/tmisce.c
@@ -267,7 +267,14 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
int8_t locked = 0;
- TAOS_CHECK_GOTO(blockDataEnsureCapacity(pBlock, cfgGetSize(pConf)), NULL, _exit);
+ size_t exSize = 0;
+ size_t index = 0;
+ SConfigItem* pDataDirItem = cfgGetItem(pConf, "dataDir");
+ if (pDataDirItem) {
+ exSize = TMAX(taosArrayGetSize(pDataDirItem->array), 1) - 1;
+ }
+
+ TAOS_CHECK_GOTO(blockDataEnsureCapacity(pBlock, cfgGetSize(pConf) + exSize), NULL, _exit);
TAOS_CHECK_GOTO(cfgCreateIter(pConf, &pIter), NULL, _exit);
@@ -275,6 +282,7 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
locked = 1;
while ((pItem = cfgNextIter(pIter)) != NULL) {
+_start:
col = startCol;
// GRANT_CFG_SKIP;
@@ -289,9 +297,18 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, name, false), NULL, _exit);
- char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
+ char value[TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t valueLen = 0;
- TAOS_CHECK_GOTO(cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen), NULL, _exit);
+ SDiskCfg* pDiskCfg = NULL;
+ if (strcasecmp(pItem->name, "dataDir") == 0 && exSize > 0) {
+ char* buf = &value[VARSTR_HEADER_SIZE];
+ pDiskCfg = taosArrayGet(pItem->array, index);
+ valueLen = tsnprintf(buf, TSDB_CONFIG_PATH_LEN, "%s", pDiskCfg->dir);
+ index++;
+ } else {
+ TAOS_CHECK_GOTO(cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_PATH_LEN, &valueLen), NULL,
+ _exit);
+ }
varDataSetLen(value, valueLen);
pColInfo = taosArrayGet(pBlock->pDataBlock, col++);
@@ -313,8 +330,28 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) {
}
TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, scope, false), NULL, _exit);
+ char info[TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE] = {0};
+ if (strcasecmp(pItem->name, "dataDir") == 0 && pDiskCfg) {
+ char* buf = &info[VARSTR_HEADER_SIZE];
+ valueLen = tsnprintf(buf, TSDB_CONFIG_INFO_LEN, "level %d primary %d disabled %" PRIi8, pDiskCfg->level,
+ pDiskCfg->primary, pDiskCfg->disable);
+ } else {
+ valueLen = 0;
+ }
+ varDataSetLen(info, valueLen);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, col++);
+ if (pColInfo == NULL) {
+ code = terrno;
+ TAOS_CHECK_GOTO(code, NULL, _exit);
+ }
+ TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, info, false), NULL, _exit);
+
numOfRows++;
- }
+ if (index > 0 && index <= exSize) {
+ goto _start;
+ }
+}
pBlock->info.rows = numOfRows;
_exit:
if (locked) cfgUnLock(pConf);
diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c
index 75624593d9..ecdb3de9a2 100644
--- a/source/common/src/ttime.c
+++ b/source/common/src/ttime.c
@@ -30,7 +30,7 @@ static int64_t m_deltaUtc = 0;
void deltaToUtcInitOnce() {
struct tm tm = {0};
- if (taosStrpTime("1970-01-01 00:00:00", (const char*)("%Y-%m-%d %H:%M:%S"), &tm) != 0) {
+ if (taosStrpTime("1970-01-01 00:00:00", (const char*)("%Y-%m-%d %H:%M:%S"), &tm) == NULL) {
uError("failed to parse time string");
}
m_deltaUtc = (int64_t)taosMktime(&tm);
diff --git a/source/common/test/CMakeLists.txt b/source/common/test/CMakeLists.txt
index 2fe3ef652d..bb12612273 100644
--- a/source/common/test/CMakeLists.txt
+++ b/source/common/test/CMakeLists.txt
@@ -46,7 +46,7 @@ if (${TD_LINUX})
target_sources(tmsgTest
PRIVATE
"tmsgTest.cpp"
- "../src/tmsg.c"
+ "../src/msg/tmsg.c"
)
target_include_directories(tmsgTest PUBLIC "${TD_SOURCE_DIR}/include/common/")
target_link_libraries(tmsgTest PUBLIC os util gtest gtest_main)
diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
index 78cc35a62c..c01fdcc85b 100644
--- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
+++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
@@ -195,7 +195,6 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
req.clusterCfg.monitorParas.tsSlowLogScope = tsSlowLogScope;
req.clusterCfg.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
req.clusterCfg.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
- req.clusterCfg.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
tstrncpy(req.clusterCfg.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
char timestr[32] = "1970-01-01 00:00:00.00";
if (taosParseTime(timestr, &req.clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0) != 0) {
diff --git a/source/dnode/mgmt/mgmt_snode/src/smWorker.c b/source/dnode/mgmt/mgmt_snode/src/smWorker.c
index 8c33c5bb4b..1e882fc656 100644
--- a/source/dnode/mgmt/mgmt_snode/src/smWorker.c
+++ b/source/dnode/mgmt/mgmt_snode/src/smWorker.c
@@ -36,14 +36,15 @@ static void smProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
dTrace("msg:%p, get from snode-write queue", pMsg);
int32_t code = sndProcessWriteMsg(pMgmt->pSnode, pMsg, NULL);
- if (code < 0) {
- dGError("snd, msg:%p failed to process write since %s", pMsg, tstrerror(code));
- if (pMsg->info.handle != NULL) {
- tmsgSendRsp(pMsg);
- }
- } else {
- smSendRsp(pMsg, 0);
- }
+ // if (code < 0) {
+ // dGError("snd, msg:%p failed to process write since %s", pMsg, tstrerror(code));
+ // if (pMsg->info.handle != NULL) {
+ // tmsgSendRsp(pMsg);
+ // }
+ // } else {
+ // smSendRsp(pMsg, 0);
+ // }
+ smSendRsp(pMsg, code);
dTrace("msg:%p, is freed", pMsg);
rpcFreeCont(pMsg->pCont);
diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
index 989adf84ac..7842077d88 100644
--- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
+++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
@@ -37,7 +37,9 @@ typedef struct SVnodeMgmt {
SSingleWorker mgmtMultiWorker;
SHashObj *hash;
SHashObj *closedHash;
+ SHashObj *creatingHash;
TdThreadRwlock lock;
+ TdThreadMutex mutex;
SVnodesStat state;
STfs *pTfs;
TdThread thread;
@@ -96,6 +98,7 @@ SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict);
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed);
+void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId);
// vmHandle.c
SArray *vmGetMsgHandles();
@@ -113,6 +116,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt);
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
+int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
// vmWorker.c
int32_t vmStartWorker(SVnodeMgmt *pMgmt);
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c
index 7566b69c02..b4453ad6fc 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c
@@ -67,6 +67,54 @@ int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnod
return 0;
}
+int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
+ (void)taosThreadRwlockRdlock(&pMgmt->lock);
+
+ int32_t num = 0;
+ int32_t size = taosHashGetSize(pMgmt->hash);
+ int32_t creatingSize = taosHashGetSize(pMgmt->creatingHash);
+ size += creatingSize;
+ SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *));
+ if (pVnodes == NULL) {
+ (void)taosThreadRwlockUnlock(&pMgmt->lock);
+ return terrno;
+ }
+
+ void *pIter = taosHashIterate(pMgmt->hash, NULL);
+ while (pIter) {
+ SVnodeObj **ppVnode = pIter;
+ SVnodeObj *pVnode = *ppVnode;
+ if (pVnode && num < size) {
+ int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
+ dTrace("vgId:%d,acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount);
+ pVnodes[num++] = (*ppVnode);
+ pIter = taosHashIterate(pMgmt->hash, pIter);
+ } else {
+ taosHashCancelIterate(pMgmt->hash, pIter);
+ }
+ }
+
+ pIter = taosHashIterate(pMgmt->creatingHash, NULL);
+ while (pIter) {
+ SVnodeObj **ppVnode = pIter;
+ SVnodeObj *pVnode = *ppVnode;
+ if (pVnode && num < size) {
+ int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
+ dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount);
+ pVnodes[num++] = (*ppVnode);
+ pIter = taosHashIterate(pMgmt->creatingHash, pIter);
+ } else {
+ taosHashCancelIterate(pMgmt->creatingHash, pIter);
+ }
+ }
+ (void)taosThreadRwlockUnlock(&pMgmt->lock);
+
+ *numOfVnodes = num;
+ *ppVnodes = pVnodes;
+
+ return 0;
+}
+
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
(void)taosThreadRwlockRdlock(&pMgmt->lock);
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 006f44b349..90b3f0025d 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -381,6 +381,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
if (vnodeCreate(path, &vnodeCfg, diskPrimary, pMgmt->pTfs) < 0) {
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
vmReleaseVnode(pMgmt, pVnode);
+ vmRemoveFromCreatingHash(pMgmt, req.vgId);
(void)tFreeSCreateVnodeReq(&req);
code = terrno != 0 ? terrno : -1;
return code;
@@ -422,6 +423,8 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
_OVER:
+ vmRemoveFromCreatingHash(pMgmt, req.vgId);
+
if (code != 0) {
int32_t r = 0;
r = taosThreadRwlockWrlock(&pMgmt->lock);
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
index 682c179270..c0f15b8877 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
@@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "vmInt.h"
#include "libs/function/tudf.h"
+#include "osMemory.h"
#include "tfs.h"
#include "vnd.h"
@@ -62,10 +63,20 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
int32_t numOfVnodes = 0;
SVnodeObj **ppVnodes = NULL;
- code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes);
+ code = taosThreadMutexLock(&pMgmt->mutex);
if (code != 0) {
return code;
}
+
+ code = vmGetAllVnodeListFromHashWithCreating(pMgmt, &numOfVnodes, &ppVnodes);
+ if (code != 0) {
+ int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
+ if (r != 0) {
+ dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
+ }
+ return code;
+ }
+
for (int32_t v = 0; v < numOfVnodes; v++) {
SVnodeObj *pVnode = ppVnodes[v];
disks[pVnode->diskPrimary] += 1;
@@ -81,6 +92,51 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
}
}
+ SVnodeObj *pCreatingVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
+ if (pCreatingVnode == NULL) {
+ code = -1;
+ if (terrno != 0) code = terrno;
+ dError("failed to alloc vnode since %s", tstrerror(code));
+ int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
+ if (r != 0) {
+ dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
+ }
+ goto _OVER;
+ }
+ (void)memset(pCreatingVnode, 0, sizeof(SVnodeObj));
+
+ pCreatingVnode->vgId = vgId;
+ pCreatingVnode->diskPrimary = diskId;
+
+ code = taosThreadRwlockWrlock(&pMgmt->lock);
+ if (code != 0) {
+ int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
+ if (r != 0) {
+ dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
+ }
+ taosMemoryFree(pCreatingVnode);
+ goto _OVER;
+ }
+
+ dTrace("vgId:%d, put vnode into creating hash, pCreatingVnode:%p", vgId, pCreatingVnode);
+ code = taosHashPut(pMgmt->creatingHash, &vgId, sizeof(int32_t), &pCreatingVnode, sizeof(SVnodeObj *));
+ if (code != 0) {
+ dError("vgId:%d, failed to put vnode to creatingHash", vgId);
+ taosMemoryFree(pCreatingVnode);
+ }
+
+ int32_t r = taosThreadRwlockUnlock(&pMgmt->lock);
+ if (r != 0) {
+ dError("vgId:%d, failed to unlock since %s", vgId, tstrerror(r));
+ }
+
+ code = taosThreadMutexUnlock(&pMgmt->mutex);
+ if (code != 0) {
+ goto _OVER;
+ }
+
+_OVER:
+
for (int32_t i = 0; i < numOfVnodes; ++i) {
if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
vmReleaseVnode(pMgmt, ppVnodes[i]);
@@ -89,8 +145,13 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
taosMemoryFree(ppVnodes);
}
- dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes);
- return diskId;
+ if (code != 0) {
+ dError("vgId:%d, failed to alloc disk since %s", vgId, tstrerror(code));
+ return code;
+ } else {
+ dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes);
+ return diskId;
+ }
}
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) {
@@ -172,7 +233,9 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
}
(void)taosThreadRwlockWrlock(&pMgmt->lock);
+
SVnodeObj *pOld = NULL;
+
int32_t r = taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld);
if (r != 0) {
dError("vgId:%d, failed to get vnode from hash", pVnode->vgId);
@@ -187,15 +250,15 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
if (r != 0) {
dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId);
}
- if (pOld) {
+ if (pOld != NULL) {
vmFreeVnodeObj(&pOld);
+ dInfo("vgId:%d, remove from closedHash", pVnode->vgId);
+ r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t));
+ if (r != 0) {
+ dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId);
+ }
}
- dInfo("vgId:%d, remove from closedHash", pVnode->vgId);
- r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t));
- if (r != 0) {
- dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId);
- }
(void)taosThreadRwlockUnlock(&pMgmt->lock);
return code;
@@ -216,12 +279,12 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
}
if (keepClosed) {
SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
- (void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
- if (pVnode == NULL) {
- dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr());
+ if (pClosedVnode == NULL) {
+ dError("failed to alloc vnode since %s", terrstr());
(void)taosThreadRwlockUnlock(&pMgmt->lock);
return;
}
+ (void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
pClosedVnode->vgId = pVnode->vgId;
pClosedVnode->dropped = pVnode->dropped;
@@ -427,11 +490,18 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
pMgmt->closedHash =
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
- if (pMgmt->hash == NULL) {
+ if (pMgmt->closedHash == NULL) {
dError("failed to init vnode closed hash since %s", terrstr());
return TSDB_CODE_OUT_OF_MEMORY;
}
+ pMgmt->creatingHash =
+ taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
+ if (pMgmt->creatingHash == NULL) {
+ dError("failed to init vnode creatingHash hash since %s", terrstr());
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
SWrapperCfg *pCfgs = NULL;
int32_t numOfVnodes = 0;
if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) {
@@ -509,6 +579,32 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
return 0;
}
+void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId) {
+ SVnodeObj *pOld = NULL;
+
+ (void)taosThreadRwlockWrlock(&pMgmt->lock);
+ int32_t r = taosHashGetDup(pMgmt->creatingHash, &vgId, sizeof(int32_t), (void *)&pOld);
+ if (r != 0) {
+ dError("vgId:%d, failed to get vnode from creating Hash", vgId);
+ }
+ dTrace("vgId:%d, remove from creating Hash", vgId);
+ r = taosHashRemove(pMgmt->creatingHash, &vgId, sizeof(int32_t));
+ if (r != 0) {
+ dError("vgId:%d, failed to remove vnode from hash", vgId);
+ }
+ (void)taosThreadRwlockUnlock(&pMgmt->lock);
+
+ if (pOld) {
+ dTrace("vgId:%d, free vnode pOld:%p", vgId, &pOld);
+ vmFreeVnodeObj(&pOld);
+ }
+
+_OVER:
+ if (r != 0) {
+ dError("vgId:%d, failed to remove vnode from creatingHash since %s", vgId, tstrerror(r));
+ }
+}
+
static void *vmCloseVnodeInThread(void *param) {
SVnodeThread *pThread = param;
SVnodeMgmt *pMgmt = pThread->pMgmt;
@@ -614,6 +710,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) {
pMgmt->closedHash = NULL;
}
+ pIter = taosHashIterate(pMgmt->creatingHash, NULL);
+ while (pIter) {
+ SVnodeObj **ppVnode = pIter;
+ vmFreeVnodeObj(ppVnode);
+ pIter = taosHashIterate(pMgmt->creatingHash, pIter);
+ }
+
+ if (pMgmt->creatingHash != NULL) {
+ taosHashCleanup(pMgmt->creatingHash);
+ pMgmt->creatingHash = NULL;
+ }
+
dInfo("total vnodes:%d are all closed", numOfVnodes);
}
@@ -622,6 +730,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) {
vmStopWorker(pMgmt);
vnodeCleanup();
(void)taosThreadRwlockDestroy(&pMgmt->lock);
+ (void)taosThreadMutexDestroy(&pMgmt->mutex);
(void)taosThreadMutexDestroy(&pMgmt->fileLock);
taosMemoryFree(pMgmt);
}
@@ -714,6 +823,12 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
goto _OVER;
}
+ code = taosThreadMutexInit(&pMgmt->mutex, NULL);
+ if (code != 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _OVER;
+ }
+
code = taosThreadMutexInit(&pMgmt->fileLock, NULL);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index 61543e619e..5f396a520a 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -214,8 +214,6 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
} else if ((pRpc->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRpc->code == TSDB_CODE_RPC_BROKEN_LINK) &&
(!IsReq(pRpc)) && (pRpc->pCont == NULL)) {
dGError("msg:%p, type:%s pCont is NULL, err: %s", pRpc, TMSG_INFO(pRpc->msgType), tstrerror(pRpc->code));
- code = pRpc->code;
- goto _OVER;
}
if (pHandle->defaultNtype == NODE_END) {
diff --git a/source/dnode/mgmt/node_util/CMakeLists.txt b/source/dnode/mgmt/node_util/CMakeLists.txt
index d882d784de..320da45065 100644
--- a/source/dnode/mgmt/node_util/CMakeLists.txt
+++ b/source/dnode/mgmt/node_util/CMakeLists.txt
@@ -6,5 +6,5 @@ target_include_directories(
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
- node_util cjson mnode vnode qnode snode wal sync taos_static tfs monitor monitorfw
+ node_util cjson mnode vnode qnode snode wal sync ${TAOS_LIB_STATIC} tfs monitor monitorfw
)
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h
index c9155f536c..b519d8509a 100644
--- a/source/dnode/mnode/impl/inc/mndStream.h
+++ b/source/dnode/mnode/impl/inc/mndStream.h
@@ -56,6 +56,7 @@ typedef struct SStreamTransMgmt {
typedef struct SStreamTaskResetMsg {
int64_t streamId;
int32_t transId;
+ int64_t checkpointId;
} SStreamTaskResetMsg;
typedef struct SChkptReportInfo {
@@ -142,9 +143,9 @@ int32_t mndStreamSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamObj *pSt
int32_t mndStreamSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
int32_t mndStreamSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
int32_t mndStreamSetDropActionFromList(SMnode *pMnode, STrans *pTrans, SArray *pList);
-int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
+int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream, int64_t chkptId);
int32_t mndStreamSetUpdateChkptAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
-int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream);
+int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream, int64_t chkptId);
int32_t mndStreamSetChkptIdAction(SMnode *pMnode, STrans *pTrans, SStreamTask* pTask, int64_t checkpointId, int64_t ts);
int32_t mndStreamSetRestartAction(SMnode* pMnode, STrans *pTrans, SStreamObj* pStream);
int32_t mndStreamSetCheckpointAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, int64_t checkpointId,
diff --git a/source/dnode/mnode/impl/src/mndAnode.c b/source/dnode/mnode/impl/src/mndAnode.c
index 87bfe9f7af..c64208600a 100644
--- a/source/dnode/mnode/impl/src/mndAnode.c
+++ b/source/dnode/mnode/impl/src/mndAnode.c
@@ -309,7 +309,7 @@ static int32_t mndCreateAnode(SMnode *pMnode, SRpcMsg *pReq, SMCreateAnodeReq *p
anodeObj.updateTime = anodeObj.createdTime;
anodeObj.version = 0;
anodeObj.urlLen = pCreate->urlLen;
- if (anodeObj.urlLen > TSDB_ANAL_ANODE_URL_LEN) {
+ if (anodeObj.urlLen > TSDB_ANALYTIC_ANODE_URL_LEN) {
code = TSDB_CODE_MND_ANODE_TOO_LONG_URL;
goto _OVER;
}
@@ -491,23 +491,24 @@ static int32_t mndSetDropAnodeRedoLogs(STrans *pTrans, SAnodeObj *pObj) {
int32_t code = 0;
SSdbRaw *pRedoRaw = mndAnodeActionEncode(pObj);
if (pRedoRaw == NULL) {
- code = TSDB_CODE_MND_RETURN_VALUE_NULL;
- if (terrno != 0) code = terrno;
- TAOS_RETURN(code);
+ code = terrno;
+ return code;
}
+
TAOS_CHECK_RETURN(mndTransAppendRedolog(pTrans, pRedoRaw));
TAOS_CHECK_RETURN(sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPING));
- TAOS_RETURN(code);
+
+ return code;
}
static int32_t mndSetDropAnodeCommitLogs(STrans *pTrans, SAnodeObj *pObj) {
int32_t code = 0;
SSdbRaw *pCommitRaw = mndAnodeActionEncode(pObj);
if (pCommitRaw == NULL) {
- code = TSDB_CODE_MND_RETURN_VALUE_NULL;
- if (terrno != 0) code = terrno;
- TAOS_RETURN(code);
+ code = terrno;
+ return code;
}
+
TAOS_CHECK_RETURN(mndTransAppendCommitlog(pTrans, pCommitRaw));
TAOS_CHECK_RETURN(sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED));
TAOS_RETURN(code);
@@ -521,25 +522,25 @@ static int32_t mndSetDropAnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SAnode
}
static int32_t mndDropAnode(SMnode *pMnode, SRpcMsg *pReq, SAnodeObj *pObj) {
- int32_t code = -1;
+ int32_t code = 0;
+ int32_t lino = 0;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-anode");
- if (pTrans == NULL) {
- code = TSDB_CODE_MND_RETURN_VALUE_NULL;
- if (terrno != 0) code = terrno;
- goto _OVER;
- }
+ TSDB_CHECK_NULL(pTrans, code, lino, _OVER, terrno);
+
mndTransSetSerial(pTrans);
+ mInfo("trans:%d, to drop anode:%d", pTrans->id, pObj->id);
- mInfo("trans:%d, used to drop anode:%d", pTrans->id, pObj->id);
- TAOS_CHECK_GOTO(mndSetDropAnodeInfoToTrans(pMnode, pTrans, pObj, false), NULL, _OVER);
- TAOS_CHECK_GOTO(mndTransPrepare(pMnode, pTrans), NULL, _OVER);
+ code = mndSetDropAnodeInfoToTrans(pMnode, pTrans, pObj, false);
+ mndReleaseAnode(pMnode, pObj);
- code = 0;
+ TSDB_CHECK_CODE(code, lino, _OVER);
+
+ code = mndTransPrepare(pMnode, pTrans);
_OVER:
mndTransDrop(pTrans);
- TAOS_RETURN(code);
+ return code;
}
static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) {
@@ -560,20 +561,20 @@ static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) {
pObj = mndAcquireAnode(pMnode, dropReq.anodeId);
if (pObj == NULL) {
- code = TSDB_CODE_MND_RETURN_VALUE_NULL;
- if (terrno != 0) code = terrno;
+ code = terrno;
goto _OVER;
}
code = mndDropAnode(pMnode, pReq, pObj);
- if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
+ if (code == 0) {
+ code = TSDB_CODE_ACTION_IN_PROGRESS;
+ }
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("anode:%d, failed to drop since %s", dropReq.anodeId, tstrerror(code));
}
- mndReleaseAnode(pMnode, pObj);
tFreeSMDropAnodeReq(&dropReq);
TAOS_RETURN(code);
}
@@ -584,7 +585,7 @@ static int32_t mndRetrieveAnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
int32_t numOfRows = 0;
int32_t cols = 0;
SAnodeObj *pObj = NULL;
- char buf[TSDB_ANAL_ANODE_URL_LEN + VARSTR_HEADER_SIZE];
+ char buf[TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE];
char status[64];
int32_t code = 0;
@@ -642,7 +643,7 @@ static int32_t mndRetrieveAnodesFull(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
int32_t numOfRows = 0;
int32_t cols = 0;
SAnodeObj *pObj = NULL;
- char buf[TSDB_ANAL_ALGO_NAME_LEN + VARSTR_HEADER_SIZE];
+ char buf[TSDB_ANALYTIC_ALGO_NAME_LEN + VARSTR_HEADER_SIZE];
int32_t code = 0;
while (numOfRows < rows) {
@@ -693,7 +694,7 @@ static int32_t mndDecodeAlgoList(SJson *pJson, SAnodeObj *pObj) {
int32_t code = 0;
int32_t protocol = 0;
double tmp = 0;
- char buf[TSDB_ANAL_ALGO_NAME_LEN + 1] = {0};
+ char buf[TSDB_ANALYTIC_ALGO_NAME_LEN + 1] = {0};
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT;
@@ -753,10 +754,10 @@ static int32_t mndDecodeAlgoList(SJson *pJson, SAnodeObj *pObj) {
}
static int32_t mndGetAnodeAlgoList(const char *url, SAnodeObj *pObj) {
- char anodeUrl[TSDB_ANAL_ANODE_URL_LEN + 1] = {0};
- snprintf(anodeUrl, TSDB_ANAL_ANODE_URL_LEN, "%s/%s", url, "list");
+ char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
+ snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", url, "list");
- SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANAL_HTTP_TYPE_GET, NULL);
+ SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL);
if (pJson == NULL) return terrno;
int32_t code = mndDecodeAlgoList(pJson, pObj);
@@ -769,10 +770,10 @@ static int32_t mndGetAnodeStatus(SAnodeObj *pObj, char *status, int32_t statusLe
int32_t code = 0;
int32_t protocol = 0;
double tmp = 0;
- char anodeUrl[TSDB_ANAL_ANODE_URL_LEN + 1] = {0};
- snprintf(anodeUrl, TSDB_ANAL_ANODE_URL_LEN, "%s/%s", pObj->url, "status");
+ char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
+ snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", pObj->url, "status");
- SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANAL_HTTP_TYPE_GET, NULL);
+ SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL);
if (pJson == NULL) return terrno;
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
@@ -808,7 +809,7 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) {
SAnodeObj *pObj = NULL;
SAnalyticsUrl url;
int32_t nameLen;
- char name[TSDB_ANAL_ALGO_KEY_LEN];
+ char name[TSDB_ANALYTIC_ALGO_KEY_LEN];
SRetrieveAnalAlgoReq req = {0};
SRetrieveAnalAlgoRsp rsp = {0};
@@ -847,13 +848,13 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) {
goto _OVER;
}
}
- url.url = taosMemoryMalloc(TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN + 1);
+ url.url = taosMemoryMalloc(TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN + 1);
if (url.url == NULL) {
sdbRelease(pSdb, pAnode);
goto _OVER;
}
- url.urlLen = 1 + tsnprintf(url.url, TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN, "%s/%s", pAnode->url,
+ url.urlLen = 1 + tsnprintf(url.url, TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN, "%s/%s", pAnode->url,
taosAnalAlgoUrlStr(url.type));
if (taosHashPut(rsp.hash, name, nameLen, &url, sizeof(SAnalyticsUrl)) != 0) {
taosMemoryFree(url.url);
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 24ae8382f9..0011c11b0a 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -482,7 +482,6 @@ static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const S
CHECK_MONITOR_PARA(tsEnableMonitor, DND_REASON_STATUS_MONITOR_SWITCH_NOT_MATCH);
CHECK_MONITOR_PARA(tsMonitorInterval, DND_REASON_STATUS_MONITOR_INTERVAL_NOT_MATCH);
CHECK_MONITOR_PARA(tsSlowLogThreshold, DND_REASON_STATUS_MONITOR_SLOW_LOG_THRESHOLD_NOT_MATCH);
- CHECK_MONITOR_PARA(tsSlowLogThresholdTest, DND_REASON_STATUS_MONITOR_NOT_MATCH);
CHECK_MONITOR_PARA(tsSlowLogMaxLen, DND_REASON_STATUS_MONITOR_SLOW_LOG_SQL_MAX_LEN_NOT_MATCH);
CHECK_MONITOR_PARA(tsSlowLogScope, DND_REASON_STATUS_MONITOR_SLOW_LOG_SCOPE_NOT_MATCH);
@@ -1104,6 +1103,7 @@ static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) {
(void)strcpy(info.name, "statusInterval");
(void)snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval);
(void)strcpy(info.scope, "server");
+ // fill info.info
if (taosArrayPush(rsp.variables, &info) == NULL) {
code = terrno;
goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index 6c30193ea7..9dd43225b1 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -53,7 +53,7 @@ static inline int32_t mndAcquireRpc(SMnode *pMnode) {
if (pMnode->stopped) {
code = TSDB_CODE_APP_IS_STOPPING;
} else if (!mndIsLeader(pMnode)) {
- code = -1;
+ code = 1;
} else {
#if 1
(void)atomic_add_fetch_32(&pMnode->rpcRef, 1);
@@ -1002,8 +1002,12 @@ int64_t mndGenerateUid(const char *name, int32_t len) {
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo) {
- int32_t code = 0;
- TAOS_CHECK_RETURN(mndAcquireRpc(pMnode));
+ int32_t code = mndAcquireRpc(pMnode);
+ if (code < 0) {
+ TAOS_RETURN(code);
+ } else if (code == 1) {
+ TAOS_RETURN(TSDB_CODE_SUCCESS);
+ }
SSdb *pSdb = pMnode->pSdb;
int64_t ms = taosGetTimestampMs();
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index 21aba8df10..fd02367f6d 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -304,7 +304,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
connectRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
- connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
connectRsp.enableAuditDelete = tsEnableAuditDelete;
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
connectRsp.whiteListVer = pUser->ipWhiteListVer;
@@ -706,7 +705,6 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) {
batchRsp.monitorParas.tsEnableMonitor = tsEnableMonitor;
batchRsp.monitorParas.tsMonitorInterval = tsMonitorInterval;
batchRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
- batchRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 3725d3a3fc..eb6c326d1e 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -4063,8 +4063,8 @@ static int32_t mndProcessDropStbReqFromMNode(SRpcMsg *pReq) {
}
typedef struct SVDropTbVgReqs {
- SVDropTbBatchReq req;
- SVgroupInfo info;
+ SArray *pBatchReqs;
+ SVgroupInfo info;
} SVDropTbVgReqs;
typedef struct SMDropTbDbInfo {
@@ -4086,45 +4086,21 @@ typedef struct SMDropTbTsmaInfos {
} SMDropTbTsmaInfos;
typedef struct SMndDropTbsWithTsmaCtx {
- SHashObj *pTsmaMap; //
- SHashObj *pDbMap; //
- SHashObj *pVgMap; //
- SArray *pResTbNames; // SArray
+ SHashObj *pVgMap; //
} SMndDropTbsWithTsmaCtx;
-static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
+static int32_t mndDropTbForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
int32_t vgId);
+static void destroySVDropTbBatchReqs(void *p);
static void mndDestroyDropTbsWithTsmaCtx(SMndDropTbsWithTsmaCtx *p) {
if (!p) return;
- if (p->pDbMap) {
- void *pIter = taosHashIterate(p->pDbMap, NULL);
- while (pIter) {
- SMDropTbDbInfo *pInfo = pIter;
- taosArrayDestroy(pInfo->dbVgInfos);
- pIter = taosHashIterate(p->pDbMap, pIter);
- }
- taosHashCleanup(p->pDbMap);
- }
- if (p->pResTbNames) {
- taosArrayDestroyP(p->pResTbNames, taosMemoryFree);
- }
- if (p->pTsmaMap) {
- void *pIter = taosHashIterate(p->pTsmaMap, NULL);
- while (pIter) {
- SMDropTbTsmaInfos *pInfos = pIter;
- taosArrayDestroy(pInfos->pTsmaInfos);
- pIter = taosHashIterate(p->pTsmaMap, pIter);
- }
- taosHashCleanup(p->pTsmaMap);
- }
-
if (p->pVgMap) {
void *pIter = taosHashIterate(p->pVgMap, NULL);
while (pIter) {
SVDropTbVgReqs *pReqs = pIter;
- taosArrayDestroy(pReqs->req.pArray);
+ taosArrayDestroyEx(pReqs->pBatchReqs, destroySVDropTbBatchReqs);
pIter = taosHashIterate(p->pVgMap, pIter);
}
taosHashCleanup(p->pVgMap);
@@ -4136,24 +4112,13 @@ static int32_t mndInitDropTbsWithTsmaCtx(SMndDropTbsWithTsmaCtx **ppCtx) {
int32_t code = 0;
SMndDropTbsWithTsmaCtx *pCtx = taosMemoryCalloc(1, sizeof(SMndDropTbsWithTsmaCtx));
if (!pCtx) return terrno;
- pCtx->pTsmaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
- if (!pCtx->pTsmaMap) {
- code = terrno;
- goto _end;
- }
-
- pCtx->pDbMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- if (!pCtx->pDbMap) {
- code = terrno;
- goto _end;
- }
- pCtx->pResTbNames = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
pCtx->pVgMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (!pCtx->pVgMap) {
code = terrno;
goto _end;
}
+
*ppCtx = pCtx;
_end:
if (code) mndDestroyDropTbsWithTsmaCtx(pCtx);
@@ -4192,16 +4157,43 @@ static void *mndBuildVDropTbsReq(SMnode *pMnode, const SVgroupInfo *pVgInfo, con
}
static int32_t mndSetDropTbsRedoActions(SMnode *pMnode, STrans *pTrans, const SVDropTbVgReqs *pVgReqs, void *pCont,
- int32_t contLen) {
+ int32_t contLen, tmsg_t msgType) {
STransAction action = {0};
action.epSet = pVgReqs->info.epSet;
action.pCont = pCont;
action.contLen = contLen;
- action.msgType = TDMT_VND_DROP_TABLE;
+ action.msgType = msgType;
action.acceptableCode = TSDB_CODE_TDB_TABLE_NOT_EXIST;
return mndTransAppendRedoAction(pTrans, &action);
}
+static int32_t mndBuildDropTbRedoActions(SMnode *pMnode, STrans *pTrans, SHashObj *pVgMap, tmsg_t msgType) {
+ int32_t code = 0;
+ void *pIter = taosHashIterate(pVgMap, NULL);
+ while (pIter) {
+ const SVDropTbVgReqs *pVgReqs = pIter;
+ int32_t len = 0;
+ for (int32_t i = 0; i < taosArrayGetSize(pVgReqs->pBatchReqs) && code == TSDB_CODE_SUCCESS; ++i) {
+ SVDropTbBatchReq *pBatchReq = taosArrayGet(pVgReqs->pBatchReqs, i);
+ void *p = mndBuildVDropTbsReq(pMnode, &pVgReqs->info, pBatchReq, &len);
+ if (!p) {
+ code = TSDB_CODE_MND_RETURN_VALUE_NULL;
+ if (terrno != 0) code = terrno;
+ break;
+ }
+ if ((code = mndSetDropTbsRedoActions(pMnode, pTrans, pVgReqs, p, len, msgType)) != 0) {
+ break;
+ }
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ taosHashCancelIterate(pVgMap, pIter);
+ break;
+ }
+ pIter = taosHashIterate(pVgMap, pIter);
+ }
+ return code;
+}
+
static int32_t mndCreateDropTbsTxnPrepare(SRpcMsg *pRsp, SMndDropTbsWithTsmaCtx *pCtx) {
int32_t code = 0;
SMnode *pMnode = pRsp->info.node;
@@ -4216,23 +4208,7 @@ static int32_t mndCreateDropTbsTxnPrepare(SRpcMsg *pRsp, SMndDropTbsWithTsmaCtx
TAOS_CHECK_GOTO(mndTransCheckConflict(pMnode, pTrans), NULL, _OVER);
- void *pIter = taosHashIterate(pCtx->pVgMap, NULL);
- while (pIter) {
- const SVDropTbVgReqs *pVgReqs = pIter;
- int32_t len = 0;
- void *p = mndBuildVDropTbsReq(pMnode, &pVgReqs->info, &pVgReqs->req, &len);
- if (!p) {
- taosHashCancelIterate(pCtx->pVgMap, pIter);
- code = TSDB_CODE_MND_RETURN_VALUE_NULL;
- if (terrno != 0) code = terrno;
- goto _OVER;
- }
- if ((code = mndSetDropTbsRedoActions(pMnode, pTrans, pVgReqs, p, len)) != 0) {
- taosHashCancelIterate(pCtx->pVgMap, pIter);
- goto _OVER;
- }
- pIter = taosHashIterate(pCtx->pVgMap, pIter);
- }
+ if ((code = mndBuildDropTbRedoActions(pMnode, pTrans, pCtx->pVgMap, TDMT_VND_DROP_TABLE)) != 0) goto _OVER;
if ((code = mndTransPrepare(pMnode, pTrans)) != 0) goto _OVER;
_OVER:
@@ -4257,10 +4233,11 @@ static int32_t mndProcessDropTbWithTsma(SRpcMsg *pReq) {
if (code) goto _OVER;
for (int32_t i = 0; i < dropReq.pVgReqs->size; ++i) {
SMDropTbReqsOnSingleVg *pReq = taosArrayGet(dropReq.pVgReqs, i);
- code = mndDropTbAddTsmaResTbsForSingleVg(pMnode, pCtx, pReq->pTbs, pReq->vgInfo.vgId);
+ code = mndDropTbForSingleVg(pMnode, pCtx, pReq->pTbs, pReq->vgInfo.vgId);
if (code) goto _OVER;
}
- if (mndCreateDropTbsTxnPrepare(pReq, pCtx) == 0) {
+ code = mndCreateDropTbsTxnPrepare(pReq, pCtx);
+ if (code == 0) {
code = TSDB_CODE_ACTION_IN_PROGRESS;
}
_OVER:
@@ -4269,87 +4246,58 @@ _OVER:
TAOS_RETURN(code);
}
+static int32_t createDropTbBatchReq(const SVDropTbReq *pReq, SVDropTbBatchReq *pBatchReq) {
+ pBatchReq->nReqs = 1;
+ pBatchReq->pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
+ if (!pBatchReq->pArray) return terrno;
+ if (taosArrayPush(pBatchReq->pArray, pReq) == NULL) {
+ taosArrayDestroy(pBatchReq->pArray);
+ pBatchReq->pArray = NULL;
+ return terrno;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static void destroySVDropTbBatchReqs(void *p) {
+ SVDropTbBatchReq *pReq = p;
+ taosArrayDestroy(pReq->pArray);
+ pReq->pArray = NULL;
+}
+
static int32_t mndDropTbAdd(SMnode *pMnode, SHashObj *pVgHashMap, const SVgroupInfo *pVgInfo, char *name, tb_uid_t suid,
bool ignoreNotExists) {
- SVDropTbReq req = {.name = name, .suid = suid, .igNotExists = ignoreNotExists};
+ SVDropTbReq req = {.name = name, .suid = suid, .igNotExists = ignoreNotExists, .uid = 0};
- SVDropTbVgReqs *pReqs = taosHashGet(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
- SVDropTbVgReqs reqs = {0};
- if (pReqs == NULL) {
- reqs.info = *pVgInfo;
- reqs.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
- if (reqs.req.pArray == NULL) {
+ SVDropTbVgReqs *pVgReqs = taosHashGet(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
+ SVDropTbVgReqs vgReqs = {0};
+ if (pVgReqs == NULL) {
+ vgReqs.info = *pVgInfo;
+ vgReqs.pBatchReqs = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbBatchReq));
+ if (!vgReqs.pBatchReqs) return terrno;
+ SVDropTbBatchReq batchReq = {0};
+ int32_t code = createDropTbBatchReq(&req, &batchReq);
+ if (TSDB_CODE_SUCCESS != code) return code;
+ if (taosArrayPush(vgReqs.pBatchReqs, &batchReq) == NULL) {
+ taosArrayDestroy(batchReq.pArray);
return terrno;
}
- if (taosArrayPush(reqs.req.pArray, &req) == NULL) {
- return terrno;
- }
- if (taosHashPut(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId), &reqs, sizeof(reqs)) != 0) {
+ if (taosHashPut(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId), &vgReqs, sizeof(vgReqs)) != 0) {
+ taosArrayDestroyEx(vgReqs.pBatchReqs, destroySVDropTbBatchReqs);
return terrno;
}
} else {
- if (taosArrayPush(pReqs->req.pArray, &req) == NULL) {
+ SVDropTbBatchReq batchReq = {0};
+ int32_t code = createDropTbBatchReq(&req, &batchReq);
+ if (TSDB_CODE_SUCCESS != code) return code;
+ if (taosArrayPush(pVgReqs->pBatchReqs, &batchReq) == NULL) {
+ taosArrayDestroy(batchReq.pArray);
return terrno;
}
}
return 0;
}
-int vgInfoCmp(const void *lp, const void *rp) {
- SVgroupInfo *pLeft = (SVgroupInfo *)lp;
- SVgroupInfo *pRight = (SVgroupInfo *)rp;
- if (pLeft->hashBegin < pRight->hashBegin) {
- return -1;
- } else if (pLeft->hashBegin > pRight->hashBegin) {
- return 1;
- }
-
- return 0;
-}
-
-static int32_t mndGetDbVgInfoForTsma(SMnode *pMnode, const char *dbname, SMDropTbTsmaInfo *pInfo) {
- int32_t code = 0;
- SDbObj *pDb = mndAcquireDb(pMnode, dbname);
- if (!pDb) {
- code = TSDB_CODE_MND_DB_NOT_EXIST;
- goto _end;
- }
-
- pInfo->dbInfo.dbVgInfos = taosArrayInit(pDb->cfg.numOfVgroups, sizeof(SVgroupInfo));
- if (!pInfo->dbInfo.dbVgInfos) {
- code = terrno;
- goto _end;
- }
- mndBuildDBVgroupInfo(pDb, pMnode, pInfo->dbInfo.dbVgInfos);
- taosArraySort(pInfo->dbInfo.dbVgInfos, vgInfoCmp);
-
- pInfo->dbInfo.hashPrefix = pDb->cfg.hashPrefix;
- pInfo->dbInfo.hashSuffix = pDb->cfg.hashSuffix;
- pInfo->dbInfo.hashMethod = pDb->cfg.hashMethod;
-
-_end:
- if (pDb) mndReleaseDb(pMnode, pDb);
- if (code && pInfo->dbInfo.dbVgInfos) {
- taosArrayDestroy(pInfo->dbInfo.dbVgInfos);
- pInfo->dbInfo.dbVgInfos = NULL;
- }
- TAOS_RETURN(code);
-}
-
-int32_t vgHashValCmp(const void *lp, const void *rp) {
- uint32_t *key = (uint32_t *)lp;
- SVgroupInfo *pVg = (SVgroupInfo *)rp;
-
- if (*key < pVg->hashBegin) {
- return -1;
- } else if (*key > pVg->hashEnd) {
- return 1;
- }
-
- return 0;
-}
-
-static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
+static int32_t mndDropTbForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
int32_t vgId) {
int32_t code = 0;
@@ -4365,88 +4313,9 @@ static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWith
vgInfo.epSet = mndGetVgroupEpset(pMnode, pVgObj);
mndReleaseVgroup(pMnode, pVgObj);
- // get all stb uids
- for (int32_t i = 0; i < pTbs->size; ++i) {
- const SVDropTbReq *pTb = taosArrayGet(pTbs, i);
- if (taosHashGet(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid))) {
- } else {
- SMDropTbTsmaInfos infos = {0};
- infos.pTsmaInfos = taosArrayInit(2, sizeof(SMDropTbTsmaInfo));
- if (!infos.pTsmaInfos) {
- code = terrno;
- goto _end;
- }
- if (taosHashPut(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid), &infos, sizeof(infos)) != 0) {
- code = terrno;
- goto _end;
- }
- }
- }
-
- void *pIter = NULL;
- SSmaObj *pSma = NULL;
- char buf[TSDB_TABLE_FNAME_LEN] = {0};
- // get used tsmas and it's dbs
- while (1) {
- pIter = sdbFetch(pMnode->pSdb, SDB_SMA, pIter, (void **)&pSma);
- if (!pIter) break;
- SMDropTbTsmaInfos *pInfos = taosHashGet(pCtx->pTsmaMap, &pSma->stbUid, sizeof(pSma->stbUid));
- if (pInfos) {
- SMDropTbTsmaInfo info = {0};
- int32_t len = sprintf(buf, "%s", pSma->name);
- sprintf(info.tsmaResTbDbFName, "%s", pSma->db);
- snprintf(info.tsmaResTbNamePrefix, TSDB_TABLE_FNAME_LEN, "%s", buf);
- SMDropTbDbInfo *pDbInfo = taosHashGet(pCtx->pDbMap, pSma->db, TSDB_DB_FNAME_LEN);
- info.suid = pSma->dstTbUid;
- if (!pDbInfo) {
- code = mndGetDbVgInfoForTsma(pMnode, pSma->db, &info);
- if (code != TSDB_CODE_SUCCESS) {
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pSma);
- goto _end;
- }
- if (taosHashPut(pCtx->pDbMap, pSma->db, TSDB_DB_FNAME_LEN, &info.dbInfo, sizeof(SMDropTbDbInfo)) != 0) {
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pSma);
- goto _end;
- }
- } else {
- info.dbInfo = *pDbInfo;
- }
- if (taosArrayPush(pInfos->pTsmaInfos, &info) == NULL) {
- code = terrno;
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pSma);
- goto _end;
- }
- }
- sdbRelease(pMnode->pSdb, pSma);
- }
-
- // generate vg req map
for (int32_t i = 0; i < pTbs->size; ++i) {
SVDropTbReq *pTb = taosArrayGet(pTbs, i);
TAOS_CHECK_GOTO(mndDropTbAdd(pMnode, pCtx->pVgMap, &vgInfo, pTb->name, pTb->suid, pTb->igNotExists), NULL, _end);
-
- SMDropTbTsmaInfos *pInfos = taosHashGet(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid));
- SArray *pVgInfos = NULL;
- char buf[TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN + 1];
- char resTbFullName[TSDB_TABLE_FNAME_LEN + 1] = {0};
- for (int32_t j = 0; j < pInfos->pTsmaInfos->size; ++j) {
- SMDropTbTsmaInfo *pInfo = taosArrayGet(pInfos->pTsmaInfos, j);
- int32_t len = sprintf(buf, "%s_%s", pInfo->tsmaResTbNamePrefix, pTb->name);
- len = taosCreateMD5Hash(buf, len);
- len = snprintf(resTbFullName, TSDB_TABLE_FNAME_LEN + 1, "%s.%s", pInfo->tsmaResTbDbFName, buf);
- uint32_t hashVal = taosGetTbHashVal(resTbFullName, len, pInfo->dbInfo.hashMethod, pInfo->dbInfo.hashPrefix,
- pInfo->dbInfo.hashSuffix);
- const SVgroupInfo *pVgInfo = taosArraySearch(pInfo->dbInfo.dbVgInfos, &hashVal, vgHashValCmp, TD_EQ);
- void *p = taosStrdup(resTbFullName + strlen(pInfo->tsmaResTbDbFName) + TSDB_NAME_DELIMITER_LEN);
- if (taosArrayPush(pCtx->pResTbNames, &p) == NULL) {
- code = terrno;
- goto _end;
- }
- TAOS_CHECK_GOTO(mndDropTbAdd(pMnode, pCtx->pVgMap, pVgInfo, p, pInfo->suid, true), NULL, _end);
- }
}
_end:
return code;
@@ -4474,9 +4343,10 @@ static int32_t mndProcessFetchTtlExpiredTbs(SRpcMsg *pRsp) {
code = mndInitDropTbsWithTsmaCtx(&pCtx);
if (code) goto _end;
- code = mndDropTbAddTsmaResTbsForSingleVg(pMnode, pCtx, rsp.pExpiredTbs, rsp.vgId);
+ code = mndDropTbForSingleVg(pMnode, pCtx, rsp.pExpiredTbs, rsp.vgId);
if (code) goto _end;
- if (mndCreateDropTbsTxnPrepare(pRsp, pCtx) == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
+ code = mndCreateDropTbsTxnPrepare(pRsp, pCtx);
+ if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
_end:
if (pCtx) mndDestroyDropTbsWithTsmaCtx(pCtx);
tDecoderClear(&decoder);
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 81db427afd..6336cd6e49 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -2434,7 +2434,12 @@ static void doAddReportStreamTask(SArray *pList, int64_t reportChkptId, const SC
mDebug("s-task:0x%x expired checkpoint-report msg in checkpoint-report list update from %" PRId64 "->%" PRId64,
pReport->taskId, p->checkpointId, pReport->checkpointId);
- memcpy(p, pReport, sizeof(STaskChkptInfo));
+ // update the checkpoint report info
+ p->checkpointId = pReport->checkpointId;
+ p->ts = pReport->checkpointTs;
+ p->version = pReport->checkpointVer;
+ p->transId = pReport->transId;
+ p->dropHTask = pReport->dropHTask;
} else {
mWarn("taskId:0x%x already in checkpoint-report list", pReport->taskId);
}
diff --git a/source/dnode/mnode/impl/src/mndStreamErrorInjection.c b/source/dnode/mnode/impl/src/mndStreamErrorInjection.c
new file mode 100644
index 0000000000..c68416369d
--- /dev/null
+++ b/source/dnode/mnode/impl/src/mndStreamErrorInjection.c
@@ -0,0 +1,72 @@
+#include "mndTrans.h"
+
+uint32_t seed = 0;
+
+static SRpcMsg createRpcMsg(STransAction* pAction, int64_t traceId, int64_t signature) {
+ SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature};
+ rpcMsg.pCont = rpcMallocCont(pAction->contLen);
+ if (rpcMsg.pCont == NULL) {
+ return rpcMsg;
+ }
+
+ rpcMsg.info.traceId.rootId = traceId;
+ rpcMsg.info.notFreeAhandle = 1;
+
+ memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen);
+ return rpcMsg;
+}
+
+void streamTransRandomErrorGen(STransAction *pAction, STrans *pTrans, int64_t signature) {
+ if ((pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT && pAction->id > 2) ||
+ (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) ||
+ (pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE && pAction->id > 2)) {
+ if (seed == 0) {
+ seed = taosGetTimestampSec();
+ }
+
+ uint32_t v = taosRandR(&seed);
+ int32_t choseItem = v % 5;
+
+ if (choseItem == 0) {
+ // 1. one of update-checkpoint not send, restart and send it again
+ taosMsleep(5000);
+ if (pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT) {
+ mError(
+ "***sleep 5s and core dump, following tasks will not recv update-checkpoint info, so the checkpoint will "
+ "rollback***");
+ exit(-1);
+ } else if (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) { // pAction->msgType == TDMT_STREAM_CONSEN_CHKPT
+ mError(
+ "***sleep 5s and core dump, following tasks will not recv consen-checkpoint info, so the tasks will "
+ "not started***");
+ } else { // pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE
+ mError(
+ "***sleep 5s and core dump, following tasks will not recv checkpoint-source info, so the tasks will "
+ "started after restart***");
+ exit(-1);
+ }
+ } else if (choseItem == 1) {
+ // 2. repeat send update chkpt msg
+ mError("***repeat send update-checkpoint/consensus/checkpoint trans msg 3times to vnode***");
+
+ mError("***repeat 1***");
+ SRpcMsg rpcMsg1 = createRpcMsg(pAction, pTrans->mTraceId, signature);
+ int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg1);
+
+ mError("***repeat 2***");
+ SRpcMsg rpcMsg2 = createRpcMsg(pAction, pTrans->mTraceId, signature);
+ code = tmsgSendReq(&pAction->epSet, &rpcMsg2);
+
+ mError("***repeat 3***");
+ SRpcMsg rpcMsg3 = createRpcMsg(pAction, pTrans->mTraceId, signature);
+ code = tmsgSendReq(&pAction->epSet, &rpcMsg3);
+ } else if (choseItem == 2) {
+ // 3. sleep 40s and then send msg
+ mError("***idle for 30s, and then send msg***");
+ taosMsleep(30000);
+ } else {
+ // do nothing
+ // mInfo("no error triggered");
+ }
+ }
+}
diff --git a/source/dnode/mnode/impl/src/mndStreamHb.c b/source/dnode/mnode/impl/src/mndStreamHb.c
index 941956ae2b..4b3db28aa1 100644
--- a/source/dnode/mnode/impl/src/mndStreamHb.c
+++ b/source/dnode/mnode/impl/src/mndStreamHb.c
@@ -24,7 +24,7 @@ typedef struct SFailedCheckpointInfo {
static int32_t mndStreamSendUpdateChkptInfoMsg(SMnode *pMnode);
static int32_t mndSendDropOrphanTasksMsg(SMnode *pMnode, SArray *pList);
-static int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId);
+static int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId, int64_t checkpointId);
static void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage);
static void addIntoFailedChkptList(SArray *pList, const SFailedCheckpointInfo *pInfo);
static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList);
@@ -68,7 +68,7 @@ void addIntoFailedChkptList(SArray *pList, const SFailedCheckpointInfo *pInfo) {
}
}
-int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
+int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream, int64_t chkptId) {
STrans *pTrans = NULL;
int32_t code = doCreateTrans(pMnode, pStream, NULL, TRN_CONFLICT_NOTHING, MND_STREAM_TASK_RESET_NAME,
" reset from failed checkpoint", &pTrans);
@@ -84,7 +84,7 @@ int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
return code;
}
- code = mndStreamSetResetTaskAction(pMnode, pTrans, pStream);
+ code = mndStreamSetResetTaskAction(pMnode, pTrans, pStream, chkptId);
if (code) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
@@ -115,7 +115,7 @@ int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
return code;
}
-int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId) {
+int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId, int64_t checkpointId) {
int32_t size = sizeof(SStreamTaskResetMsg);
int32_t num = taosArrayGetSize(execInfo.pKilledChkptTrans);
@@ -135,8 +135,9 @@ int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t
taosArrayRemove(execInfo.pKilledChkptTrans, 0); // remove this first, append new reset trans in the tail
}
- SStreamTaskResetMsg p = {.streamId = streamId, .transId = transId};
+ SStreamTaskResetMsg p = {.streamId = streamId, .transId = transId, .checkpointId = checkpointId};
+ // let's remember that this trans had been killed already
void *px = taosArrayPush(execInfo.pKilledChkptTrans, &p);
if (px == NULL) {
mError("failed to push reset-msg trans:%d into the killed chkpt trans list, size:%d", transId, num - 1);
@@ -150,6 +151,7 @@ int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t
pReq->streamId = streamId;
pReq->transId = transId;
+ pReq->checkpointId = checkpointId;
SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_TASK_RESET, .pCont = pReq, .contLen = size};
int32_t code = tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
@@ -234,7 +236,7 @@ int32_t mndProcessResetStatusReq(SRpcMsg *pReq) {
} else {
mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, transId:%d, create reset trans", pStream->name,
pStream->uid, pMsg->transId);
- code = mndCreateStreamResetStatusTrans(pMnode, pStream);
+ code = mndCreateStreamResetStatusTrans(pMnode, pStream, pMsg->checkpointId);
}
}
@@ -379,9 +381,10 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
}
if ((pEntry->lastHbMsgId == req.msgId) && (pEntry->lastHbMsgTs == req.ts)) {
- mError("vgId:%d HbMsgId:%d already handled, bh msg discard", pEntry->nodeId, req.msgId);
+ mError("vgId:%d HbMsgId:%d already handled, bh msg discard, and send HbRsp", pEntry->nodeId, req.msgId);
- terrno = TSDB_CODE_INVALID_MSG;
+ // return directly and after the vnode to continue to send the next HbMsg.
+ terrno = TSDB_CODE_SUCCESS;
doSendHbMsgRsp(terrno, &pReq->info, req.vgId, req.msgId);
streamMutexUnlock(&execInfo.lock);
@@ -495,10 +498,11 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
continue;
}
- mInfo("checkpointId:%" PRId64 " transId:%d failed, issue task-reset trans to reset all tasks status",
- pInfo->checkpointId, pInfo->transId);
+ mInfo("stream:0x%" PRIx64 " checkpointId:%" PRId64
+ " transId:%d failed issue task-reset trans to reset all tasks status",
+ pInfo->streamUid, pInfo->checkpointId, pInfo->transId);
- code = mndSendResetFromCheckpointMsg(pMnode, pInfo->streamUid, pInfo->transId);
+ code = mndSendResetFromCheckpointMsg(pMnode, pInfo->streamUid, pInfo->transId, pInfo->checkpointId);
if (code) {
mError("failed to create reset task trans, code:%s", tstrerror(code));
}
@@ -549,12 +553,37 @@ void cleanupAfterProcessHbMsg(SStreamHbMsg *pReq, SArray *pFailedChkptList, SArr
}
void doSendHbMsgRsp(int32_t code, SRpcHandleInfo *pRpcInfo, int32_t vgId, int32_t msgId) {
- SRpcMsg rsp = {.code = code, .info = *pRpcInfo, .contLen = sizeof(SMStreamHbRspMsg)};
- rsp.pCont = rpcMallocCont(rsp.contLen);
+ int32_t ret = 0;
+ int32_t tlen = 0;
+ void *buf = NULL;
- SMStreamHbRspMsg *pMsg = rsp.pCont;
- pMsg->head.vgId = htonl(vgId);
- pMsg->msgId = msgId;
+ const SMStreamHbRspMsg msg = {.msgId = msgId};
+
+ tEncodeSize(tEncodeStreamHbRsp, &msg, tlen, ret);
+ if (ret < 0) {
+ mError("encode stream hb msg rsp failed, code:%s", tstrerror(code));
+ }
+
+ buf = rpcMallocCont(tlen + sizeof(SMsgHead));
+ if (buf == NULL) {
+ mError("encode stream hb msg rsp failed, code:%s", tstrerror(terrno));
+ return;
+ }
+
+ ((SMStreamHbRspMsg*)buf)->head.vgId = htonl(vgId);
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
+
+ SEncoder encoder;
+ tEncoderInit(&encoder, abuf, tlen);
+ if ((code = tEncodeStreamHbRsp(&encoder, &msg)) < 0) {
+ rpcFreeCont(buf);
+ tEncoderClear(&encoder);
+ mError("encode stream hb msg rsp failed, code:%s", tstrerror(code));
+ return;
+ }
+ tEncoderClear(&encoder);
+
+ SRpcMsg rsp = {.code = code, .info = *pRpcInfo, .contLen = tlen + sizeof(SMsgHead), .pCont = buf};
tmsgSendRsp(&rsp);
pRpcInfo->handle = NULL; // disable auto rsp
diff --git a/source/dnode/mnode/impl/src/mndStreamTransAct.c b/source/dnode/mnode/impl/src/mndStreamTransAct.c
index 139ea4f147..5ccb626609 100644
--- a/source/dnode/mnode/impl/src/mndStreamTransAct.c
+++ b/source/dnode/mnode/impl/src/mndStreamTransAct.c
@@ -295,7 +295,7 @@ static int32_t doSetUpdateChkptAction(SMnode *pMnode, STrans *pTrans, SStreamTas
return code;
}
-static int32_t doSetResetAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) {
+static int32_t doSetResetAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, int64_t chkptId) {
SVResetStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResetStreamTaskReq));
if (pReq == NULL) {
mError("failed to malloc in reset stream, size:%" PRIzu ", code:%s", sizeof(SVResetStreamTaskReq),
@@ -306,6 +306,7 @@ static int32_t doSetResetAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa
pReq->head.vgId = htonl(pTask->info.nodeId);
pReq->taskId = pTask->id.taskId;
pReq->streamId = pTask->id.streamId;
+ pReq->chkptId = chkptId;
SEpSet epset = {0};
bool hasEpset = false;
@@ -544,7 +545,7 @@ int32_t mndStreamSetDropActionFromList(SMnode *pMnode, STrans *pTrans, SArray* p
return 0;
}
-int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
+int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream, int64_t chkptId) {
SStreamTaskIter *pIter = NULL;
taosWLockLatch(&pStream->lock);
@@ -564,7 +565,7 @@ int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *
return code;
}
- code = doSetResetAction(pMnode, pTrans, pTask);
+ code = doSetResetAction(pMnode, pTrans, pTask, chkptId);
if (code != TSDB_CODE_SUCCESS) {
destroyStreamTaskIter(pIter);
taosWUnLockLatch(&pStream->lock);
@@ -606,7 +607,7 @@ int32_t mndStreamSetChkptIdAction(SMnode *pMnode, STrans *pTrans, SStreamTask* p
tEncoderInit(&encoder, abuf, tlen);
code = tEncodeRestoreCheckpointInfo(&encoder, &req);
tEncoderClear(&encoder);
- if (code == -1) {
+ if (code < 0) {
taosMemoryFree(pBuf);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c
index f9b7644af4..615c383f07 100644
--- a/source/dnode/mnode/impl/src/mndStreamUtil.c
+++ b/source/dnode/mnode/impl/src/mndStreamUtil.c
@@ -1521,74 +1521,4 @@ int32_t mndCheckForSnode(SMnode *pMnode, SDbObj *pSrcDb) {
mError("snode not existed when trying to create stream in db with multiple replica");
return TSDB_CODE_SNODE_NOT_DEPLOYED;
}
-}
-
-uint32_t seed = 0;
-static SRpcMsg createRpcMsg(STransAction* pAction, int64_t traceId, int64_t signature) {
- SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature};
- rpcMsg.pCont = rpcMallocCont(pAction->contLen);
- if (rpcMsg.pCont == NULL) {
- return rpcMsg;
- }
-
- rpcMsg.info.traceId.rootId = traceId;
- rpcMsg.info.notFreeAhandle = 1;
-
- memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen);
- return rpcMsg;
-}
-
-void streamTransRandomErrorGen(STransAction *pAction, STrans *pTrans, int64_t signature) {
- if ((pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT && pAction->id > 2) ||
- (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) ||
- (pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE && pAction->id > 2)) {
- if (seed == 0) {
- seed = taosGetTimestampSec();
- }
-
- uint32_t v = taosRandR(&seed);
- int32_t choseItem = v % 5;
-
- if (choseItem == 0) {
- // 1. one of update-checkpoint not send, restart and send it again
- taosMsleep(5000);
- if (pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT) {
- mError(
- "***sleep 5s and core dump, following tasks will not recv update-checkpoint info, so the checkpoint will "
- "rollback***");
- exit(-1);
- } else if (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) { // pAction->msgType == TDMT_STREAM_CONSEN_CHKPT
- mError(
- "***sleep 5s and core dump, following tasks will not recv consen-checkpoint info, so the tasks will "
- "not started***");
- } else { // pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE
- mError(
- "***sleep 5s and core dump, following tasks will not recv checkpoint-source info, so the tasks will "
- "started after restart***");
- exit(-1);
- }
- } else if (choseItem == 1) {
- // 2. repeat send update chkpt msg
- mError("***repeat send update-checkpoint/consensus/checkpoint trans msg 3times to vnode***");
-
- mError("***repeat 1***");
- SRpcMsg rpcMsg1 = createRpcMsg(pAction, pTrans->mTraceId, signature);
- int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg1);
-
- mError("***repeat 2***");
- SRpcMsg rpcMsg2 = createRpcMsg(pAction, pTrans->mTraceId, signature);
- code = tmsgSendReq(&pAction->epSet, &rpcMsg2);
-
- mError("***repeat 3***");
- SRpcMsg rpcMsg3 = createRpcMsg(pAction, pTrans->mTraceId, signature);
- code = tmsgSendReq(&pAction->epSet, &rpcMsg3);
- } else if (choseItem == 2) {
- // 3. sleep 40s and then send msg
- mError("***idle for 30s, and then send msg***");
- taosMsleep(30000);
- } else {
- // do nothing
- // mInfo("no error triggered");
- }
- }
}
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/test/stream/stream.cpp b/source/dnode/mnode/impl/test/stream/stream.cpp
index d508cf7390..45bc4c2ce2 100644
--- a/source/dnode/mnode/impl/test/stream/stream.cpp
+++ b/source/dnode/mnode/impl/test/stream/stream.cpp
@@ -246,7 +246,7 @@ TEST_F(StreamTest, kill_checkpoint_trans) {
px = taosArrayPush(pStream->tasks, &pLevel);
ASSERT(px != NULL);
- code = mndCreateStreamResetStatusTrans(pMnode, pStream);
+ code = mndCreateStreamResetStatusTrans(pMnode, pStream, 1);
ASSERT(code != 0);
tFreeStreamObj(pStream);
diff --git a/source/dnode/snode/src/snodeInitApi.c b/source/dnode/snode/src/snodeInitApi.c
index 680a2fd83c..4fe4333534 100644
--- a/source/dnode/snode/src/snodeInitApi.c
+++ b/source/dnode/snode/src/snodeInitApi.c
@@ -31,6 +31,7 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStatePutParName = streamStatePutParName;
pStore->streamStateGetParName = streamStateGetParName;
+ pStore->streamStateDeleteParName = streamStateDeleteParName;
pStore->streamStateAddIfNotExist = streamStateAddIfNotExist;
pStore->streamStateReleaseBuf = streamStateReleaseBuf;
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 610ba43673..743bfd36d0 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -172,7 +172,7 @@ void tsdbReleaseDataBlock2(STsdbReader *pReader);
int32_t tsdbRetrieveDataBlock2(STsdbReader *pReader, SSDataBlock **pBlock, SArray *pIdList);
int32_t tsdbReaderReset2(STsdbReader *pReader, SQueryTableDataCond *pCond);
int32_t tsdbGetFileBlocksDistInfo2(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
-int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader *pHandle);
+int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader *pHandle, uint32_t *rows);
void *tsdbGetIdx2(SMeta *pMeta);
void *tsdbGetIvtIdx2(SMeta *pMeta);
uint64_t tsdbGetReaderMaxVersion2(STsdbReader *pReader);
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 653b47ff14..3c40100f9d 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -146,7 +146,7 @@ int32_t tqBuildFName(char** data, const char* path, char* name);
int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name);
// tq util
-int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type);
+int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type, EStreamType blockType);
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg);
int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch, int64_t consumerId,
int32_t type, int64_t sver, int64_t ever);
@@ -158,6 +158,7 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, SSDataBlock* pDataBlock,
SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq);
+int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type);
#define TQ_ERR_GO_TO_END(c) \
do { \
diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c
index 659ba3f777..9a5bea33e3 100644
--- a/source/dnode/vnode/src/meta/metaOpen.c
+++ b/source/dnode/vnode/src/meta/metaOpen.c
@@ -324,7 +324,11 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
SMetaEntry me = {0};
tDecoderInit(&dc, value, valueSize);
if (metaDecodeEntry(&dc, &me) == 0) {
- if (metaHandleEntry(pNewMeta, &me) != 0) {
+ if (me.type == TSDB_CHILD_TABLE &&
+ tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) {
+ metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64,
+ TD_VID(pVnode), me.ctbEntry.suid, uid);
+ } else if (metaHandleEntry(pNewMeta, &me) != 0) {
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
}
}
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 80c04a3276..bbc58004d9 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -1551,7 +1551,7 @@ static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SA
_resume_delete:
version = RSMA_EXEC_MSG_VER(msg);
if ((code = tqExtractDelDataBlock(RSMA_EXEC_MSG_BODY(msg), RSMA_EXEC_MSG_LEN(msg), version,
- &packData.pDataBlock, 1))) {
+ &packData.pDataBlock, 1, STREAM_DELETE_DATA))) {
taosFreeQitem(msg);
TAOS_CHECK_EXIT(code);
}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index bd78f62cae..a234777441 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -758,7 +758,8 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
}
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
- SWalFilterCond cond = {.deleteMsg = 1}; // delete msg also extract from wal files
+ bool scanDropCtb = pTask->subtableWithoutMd5 ? true : false;
+ SWalFilterCond cond = {.deleteMsg = 1, .scanDropCtb = scanDropCtb}; // delete msg also extract from wal files
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
if (pTask->exec.pWalReader == NULL) {
tqError("vgId:%d failed init wal reader, code:%s", vgId, tstrerror(terrno));
@@ -1008,21 +1009,34 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
}
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
- SStreamTaskRunReq* pReq = pMsg->pCont;
+ int32_t code = 0;
+ char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ int32_t len = pMsg->contLen - sizeof(SMsgHead);
+ SDecoder decoder;
+
+ SStreamTaskRunReq req = {0};
+ tDecoderInit(&decoder, (uint8_t*)msg, len);
+ if ((code = tDecodeStreamTaskRunReq(&decoder, &req)) < 0) {
+ tqError("vgId:%d failed to decode task run req, code:%s", pTq->pStreamMeta->vgId, tstrerror(code));
+ tDecoderClear(&decoder);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ tDecoderClear(&decoder);
// extracted submit data from wal files for all tasks
- if (pReq->reqType == STREAM_EXEC_T_EXTRACT_WAL_DATA) {
+ if (req.reqType == STREAM_EXEC_T_EXTRACT_WAL_DATA) {
return tqScanWal(pTq);
}
- int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
+ code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
if (code) {
tqError("vgId:%d failed to create task run req, code:%s", TD_VID(pTq->pVnode), tstrerror(code));
return code;
}
// let's continue scan data in the wal files
- if (pReq->reqType >= 0 || pReq->reqType == STREAM_EXEC_T_RESUME_TASK) {
+ if (req.reqType >= 0 || req.reqType == STREAM_EXEC_T_RESUME_TASK) {
code = tqScanWalAsync(pTq, false); // it's ok to failed
if (code) {
tqError("vgId:%d failed to start scan wal file, code:%s", pTq->pStreamMeta->vgId, tstrerror(code));
@@ -1296,7 +1310,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) {
int32_t vgId = TD_VID(pTq->pVnode);
- SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont;
+ SStreamCheckpointReadyMsg* pReq = (SStreamCheckpointReadyMsg*)pMsg->pCont;
if (!vnodeIsRoleLeader(pTq->pVnode)) {
tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId,
(int32_t)pReq->downstreamTaskId);
@@ -1317,10 +1331,23 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskRetrieveTriggerReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t vgId = TD_VID(pTq->pVnode);
- SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont;
if (!vnodeIsRoleLeader(pTq->pVnode)) {
- tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId,
- (int32_t)pReq->downstreamTaskId);
+ SRetrieveChkptTriggerReq req = {0};
+
+ char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ int32_t len = pMsg->contLen - sizeof(SMsgHead);
+ SDecoder decoder = {0};
+
+ tDecoderInit(&decoder, (uint8_t*)msg, len);
+ if (tDecodeRetrieveChkptTriggerReq(&decoder, &req) < 0) {
+ tDecoderClear(&decoder);
+ tqError("vgId:%d invalid retrieve checkpoint-trigger req received", vgId);
+ return TSDB_CODE_INVALID_MSG;
+ }
+ tDecoderClear(&decoder);
+
+ tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from s-task:0x%" PRId64, vgId,
+ req.downstreamTaskId);
return TSDB_CODE_STREAM_NOT_LEADER;
}
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index 95955e579f..d924e97ae3 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -366,8 +366,8 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
} else if (pCont->msgType == TDMT_VND_DELETE) {
void* pBody = POINTER_SHIFT(pCont->body, sizeof(SMsgHead));
int32_t len = pCont->bodyLen - sizeof(SMsgHead);
-
- code = tqExtractDelDataBlock(pBody, len, ver, (void**)pItem, 0);
+ EStreamType blockType = STREAM_DELETE_DATA;
+ code = tqExtractDelDataBlock(pBody, len, ver, (void**)pItem, 0, blockType);
if (code == TSDB_CODE_SUCCESS) {
if (*pItem == NULL) {
tqDebug("s-task:%s empty delete msg, discard it, len:%d, ver:%" PRId64, id, len, ver);
@@ -382,6 +382,20 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
return code;
}
+ } else if (pCont->msgType == TDMT_VND_DROP_TABLE && pReader->cond.scanDropCtb) {
+ void* pBody = POINTER_SHIFT(pCont->body, sizeof(SMsgHead));
+ int32_t len = pCont->bodyLen - sizeof(SMsgHead);
+ code = tqExtractDropCtbDataBlock(pBody, len, ver, (void**)pItem, 0);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (!*pItem) {
+ continue;
+ } else {
+ tqDebug("s-task:%s drop ctb msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver);
+ }
+ } else {
+ terrno = code;
+ return code;
+ }
} else {
tqError("s-task:%s invalid msg type:%d, ver:%" PRId64, id, pCont->msgType, ver);
return TSDB_CODE_STREAM_INTERNAL_ERROR;
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index be41f7e99e..3f4ff7f3d9 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -53,6 +53,7 @@ static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode);
static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs);
static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode,
int64_t earlyTs);
+static int32_t doWaitForDstTableDropped(SVnode* pVnode, SStreamTask* pTask, const char* dstTableName);
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr, bool newSubTableRule) {
@@ -138,7 +139,7 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
return 0;
}
-static int32_t encodeCreateChildTableForRPC(SVCreateTbBatchReq* pReqs, int32_t vgId, void** pBuf, int32_t* contLen) {
+static int32_t encodeCreateChildTableForRPC(void* pReqs, int32_t vgId, void** pBuf, int32_t* contLen) {
int32_t ret = 0;
tEncodeSize(tEncodeSVCreateTbBatchReq, pReqs, *contLen, ret);
@@ -170,17 +171,50 @@ end:
return ret;
}
-static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
+static int32_t encodeDropChildTableForRPC(void* pReqs, int32_t vgId, void** ppBuf, int32_t *contLen) {
+ int32_t code = 0;
+ SEncoder ec = {0};
+ tEncodeSize(tEncodeSVDropTbBatchReq, pReqs, *contLen, code);
+ if (code < 0) {
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+ *contLen += sizeof(SMsgHead);
+ *ppBuf = rpcMallocCont(*contLen);
+
+ if (!*ppBuf) {
+ code = terrno;
+ goto end;
+ }
+
+ ((SMsgHead*)(*ppBuf))->vgId = vgId;
+ ((SMsgHead*)(*ppBuf))->contLen = htonl(*contLen);
+
+ tEncoderInit(&ec, POINTER_SHIFT(*ppBuf, sizeof(SMsgHead)), (*contLen) - sizeof(SMsgHead));
+ code = tEncodeSVDropTbBatchReq(&ec, pReqs);
+ tEncoderClear(&ec);
+ if (code < 0) {
+ rpcFreeCont(*ppBuf);
+ *ppBuf = NULL;
+ *contLen = 0;
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+end:
+ return code;
+}
+
+static int32_t tqPutReqToQueue(SVnode* pVnode, void* pReqs, int32_t(*encoder)(void* pReqs, int32_t vgId, void** ppBuf, int32_t *contLen), tmsg_t msgType) {
void* buf = NULL;
int32_t tlen = 0;
- int32_t code = encodeCreateChildTableForRPC(pReqs, TD_VID(pVnode), &buf, &tlen);
+ int32_t code = encoder(pReqs, TD_VID(pVnode), &buf, &tlen);
if (code) {
tqError("vgId:%d failed to encode create table msg, create table failed, code:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
- SRpcMsg msg = {.msgType = TDMT_VND_CREATE_TABLE, .pCont = buf, .contLen = tlen};
+ SRpcMsg msg = {.msgType = msgType, .pCont = buf, .contLen = tlen};
code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg);
if (code) {
tqError("failed to put into write-queue since %s", terrstr());
@@ -388,7 +422,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
}
reqs.nReqs = taosArrayGetSize(reqs.pArray);
- code = tqPutReqToQueue(pVnode, &reqs);
+ code = tqPutReqToQueue(pVnode, &reqs, encodeCreateChildTableForRPC, TDMT_VND_CREATE_TABLE);
if (code != TSDB_CODE_SUCCESS) {
tqError("s-task:%s failed to send create table msg", id);
}
@@ -399,6 +433,61 @@ _end:
return code;
}
+static int32_t doBuildAndSendDropTableMsg(SVnode* pVnode, char* pStbFullname, SSDataBlock* pDataBlock,
+ SStreamTask* pTask, int64_t suid) {
+ int32_t lino = 0;
+ int32_t code = 0;
+ int32_t rows = pDataBlock->info.rows;
+ const char* id = pTask->id.idStr;
+ SVDropTbBatchReq batchReq = {0};
+ SVDropTbReq req = {0};
+
+ if (rows <= 0 || rows > 1 || pTask->subtableWithoutMd5 == 0) return TSDB_CODE_SUCCESS;
+
+ batchReq.pArray = taosArrayInit(rows, sizeof(SVDropTbReq));
+ if (!batchReq.pArray) return terrno;
+ batchReq.nReqs = rows;
+ req.suid = suid;
+ req.igNotExists = true;
+
+ SColumnInfoData* pTbNameCol = taosArrayGet(pDataBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
+ char tbName[TSDB_TABLE_NAME_LEN + 1] = {0};
+ int32_t i = 0;
+ void* pData = colDataGetVarData(pTbNameCol, i);
+ memcpy(tbName, varDataVal(pData), varDataLen(pData));
+ tbName[varDataLen(pData) + 1] = 0;
+ req.name = tbName;
+ if (taosArrayPush(batchReq.pArray, &req) == NULL) {
+ TSDB_CHECK_CODE(terrno, lino, _exit);
+ }
+
+ SMetaReader mr = {0};
+ metaReaderDoInit(&mr, pVnode->pMeta, META_READER_LOCK);
+
+ code = metaGetTableEntryByName(&mr, tbName);
+ if (TSDB_CODE_SUCCESS == code && isValidDstChildTable(&mr, TD_VID(pVnode), tbName, pTask->outputInfo.tbSink.stbUid)) {
+ STableSinkInfo* pTableSinkInfo = NULL;
+ bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, &pTableSinkInfo);
+ if (alreadyCached) {
+ pTableSinkInfo->uid = mr.me.uid;
+ }
+ }
+ metaReaderClear(&mr);
+ tqDebug("s-task:%s build drop %d table(s) msg", id, rows);
+ code = tqPutReqToQueue(pVnode, &batchReq, encodeDropChildTableForRPC, TDMT_VND_DROP_TABLE);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+
+ code = doWaitForDstTableDropped(pVnode, pTask, tbName);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+_exit:
+ if (batchReq.pArray) {
+ taosArrayDestroy(batchReq.pArray);
+ }
+ return code;
+}
+
int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks) {
const char* id = pTask->id.idStr;
int32_t vgId = TD_VID(pVnode);
@@ -807,6 +896,40 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
return TSDB_CODE_SUCCESS;
}
+static int32_t doWaitForDstTableDropped(SVnode* pVnode, SStreamTask* pTask, const char* dstTableName) {
+ int32_t vgId = TD_VID(pVnode);
+ int64_t suid = pTask->outputInfo.tbSink.stbUid;
+ const char* id = pTask->id.idStr;
+
+ while (1) {
+ if (streamTaskShouldStop(pTask)) {
+ tqDebug("s-task:%s task will stop, quit from waiting for table:%s drop", id, dstTableName);
+ return TSDB_CODE_STREAM_EXEC_CANCELLED;
+ }
+ SMetaReader mr = {0};
+ metaReaderDoInit(&mr, pVnode->pMeta, META_READER_LOCK);
+ int32_t code = metaGetTableEntryByName(&mr, dstTableName);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ metaReaderClear(&mr);
+ break;
+ } else if (TSDB_CODE_SUCCESS == code) {
+ if (isValidDstChildTable(&mr, vgId, dstTableName, suid)) {
+ metaReaderClear(&mr);
+ taosMsleep(100);
+ tqDebug("s-task:%s wait 100ms for table:%s drop", id, dstTableName);
+ } else {
+ metaReaderClear(&mr);
+ break;
+ }
+ } else {
+ tqError("s-task:%s failed to wait for table:%s drop", id, dstTableName);
+ metaReaderClear(&mr);
+ return terrno;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
int32_t nameLen = strlen(pDstTableName);
(*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1);
@@ -1032,7 +1155,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
}
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
- if (!onlySubmitData) {
+ if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) {
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
numOfBlocks);
@@ -1052,6 +1175,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
code = doBuildAndSendCreateTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
continue;
+ } else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) {
+ code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
} else {
code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
}
diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c
index e066938fc0..a92049e5f3 100644
--- a/source/dnode/vnode/src/tq/tqUtil.c
+++ b/source/dnode/vnode/src/tq/tqUtil.c
@@ -572,7 +572,7 @@ int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp*
return 0;
}
-int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type) {
+int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type, EStreamType blockType) {
int32_t code = 0;
int32_t line = 0;
SDecoder* pCoder = &(SDecoder){0};
@@ -593,7 +593,7 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void*
}
SSDataBlock* pDelBlock = NULL;
- code = createSpecialDataBlock(STREAM_DELETE_DATA, &pDelBlock);
+ code = createSpecialDataBlock(blockType, &pDelBlock);
TSDB_CHECK_CODE(code, line, END);
code = blockDataEnsureCapacity(pDelBlock, numOfTables);
@@ -751,3 +751,45 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
return TSDB_CODE_SUCCESS;
}
+
+int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type) {
+ int32_t code = 0;
+ int32_t lino = 0;
+ SDecoder dc = {0};
+ SVDropTbBatchReq batchReq = {0};
+ tDecoderInit(&dc, (uint8_t*)data, len);
+ code = tDecodeSVDropTbBatchReq(&dc, &batchReq);
+ TSDB_CHECK_CODE(code, lino, _exit);
+ if (batchReq.nReqs <= 0) goto _exit;
+
+ SSDataBlock* pBlock = NULL;
+ code = createSpecialDataBlock(STREAM_DROP_CHILD_TABLE, &pBlock);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+ code = blockDataEnsureCapacity(pBlock, batchReq.nReqs);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+ pBlock->info.rows = batchReq.nReqs;
+ pBlock->info.version = ver;
+ for (int32_t i = 0; i < batchReq.nReqs; ++i) {
+ SVDropTbReq* pReq = batchReq.pReqs + i;
+ SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ TSDB_CHECK_NULL(pCol, code, lino, _exit, terrno);
+ code = colDataSetVal(pCol, i, (const char* )&pReq->uid, false);
+ TSDB_CHECK_CODE(code, lino, _exit);
+ }
+
+ code = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0, pRefBlock);
+ TSDB_CHECK_CODE(code, lino, _exit);
+ ((SStreamRefDataBlock*)(*pRefBlock))->type = STREAM_INPUT__REF_DATA_BLOCK;
+ ((SStreamRefDataBlock*)(*pRefBlock))->pBlock = pBlock;
+
+_exit:
+ tDecoderClear(&dc);
+ if (TSDB_CODE_SUCCESS != code) {
+ tqError("faled to extract drop ctb data block, line:%d code:%s", lino, tstrerror(code));
+ blockDataCleanup(pBlock);
+ taosMemoryFree(pBlock);
+ }
+ return code;
+}
diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c
index f31dd28847..3f67503454 100644
--- a/source/dnode/vnode/src/tqCommon/tqCommon.c
+++ b/source/dnode/vnode/src/tqCommon/tqCommon.c
@@ -828,14 +828,25 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
}
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
- SStreamTaskRunReq* pReq = pMsg->pCont;
+ int32_t code = 0;
+ int32_t vgId = pMeta->vgId;
+ char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ int32_t len = pMsg->contLen - sizeof(SMsgHead);
+ SDecoder decoder;
- int32_t type = pReq->reqType;
- int32_t vgId = pMeta->vgId;
- int32_t code = 0;
+ SStreamTaskRunReq req = {0};
+ tDecoderInit(&decoder, (uint8_t*)msg, len);
+ if ((code = tDecodeStreamTaskRunReq(&decoder, &req)) < 0) {
+ tqError("vgId:%d failed to decode task run req, code:%s", pMeta->vgId, tstrerror(code));
+ tDecoderClear(&decoder);
+ return TSDB_CODE_SUCCESS;
+ }
+ tDecoderClear(&decoder);
+
+ int32_t type = req.reqType;
if (type == STREAM_EXEC_T_START_ONE_TASK) {
- code = streamMetaStartOneTask(pMeta, pReq->streamId, pReq->taskId);
+ code = streamMetaStartOneTask(pMeta, req.streamId, req.taskId);
return 0;
} else if (type == STREAM_EXEC_T_START_ALL_TASKS) {
code = streamMetaStartAllTasks(pMeta);
@@ -847,11 +858,11 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead
code = streamMetaStopAllTasks(pMeta);
return 0;
} else if (type == STREAM_EXEC_T_ADD_FAILED_TASK) {
- code = streamMetaAddFailedTask(pMeta, pReq->streamId, pReq->taskId);
+ code = streamMetaAddFailedTask(pMeta, req.streamId, req.taskId);
return code;
} else if (type == STREAM_EXEC_T_RESUME_TASK) { // task resume to run after idle for a while
SStreamTask* pTask = NULL;
- code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask);
+ code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask);
if (pTask != NULL && (code == 0)) {
char* pStatus = NULL;
@@ -873,7 +884,7 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead
}
SStreamTask* pTask = NULL;
- code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask);
+ code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask);
if ((pTask != NULL) && (code == 0)) { // even in halt status, the data in inputQ must be processed
char* p = NULL;
if (streamTaskReadyToRun(pTask, &p)) {
@@ -890,7 +901,7 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead
return 0;
} else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec.
// todo add one function to handle this
- tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, pReq->taskId);
+ tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, req.taskId);
return code;
}
}
@@ -939,7 +950,7 @@ int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) {
}
int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, char* pMsg) {
- SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg;
+ SVResetStreamTaskReq* pReq = (SVResetStreamTaskReq*)pMsg;
SStreamTask* pTask = NULL;
int32_t code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask);
@@ -954,17 +965,13 @@ int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, char* pMsg) {
streamMutexLock(&pTask->lock);
streamTaskClearCheckInfo(pTask, true);
+ streamTaskSetFailedCheckpointId(pTask, pReq->chkptId);
+
// clear flag set during do checkpoint, and open inputQ for all upstream tasks
SStreamTaskState pState = streamTaskGetStatus(pTask);
if (pState.state == TASK_STATUS__CK) {
- int32_t tranId = 0;
- int64_t activeChkId = 0;
- streamTaskGetActiveCheckpointInfo(pTask, &tranId, &activeChkId);
-
- tqDebug("s-task:%s reset task status from checkpoint, current checkpointingId:%" PRId64 ", transId:%d",
- pTask->id.idStr, activeChkId, tranId);
-
streamTaskSetStatusReady(pTask);
+ tqDebug("s-task:%s reset checkpoint status to ready", pTask->id.idStr);
} else if (pState.state == TASK_STATUS__UNINIT) {
// tqDebug("s-task:%s start task by checking downstream tasks", pTask->id.idStr);
// tqStreamTaskRestoreCheckpoint(pMeta, pTask->id.streamId, pTask->id.taskId);
@@ -980,25 +987,36 @@ int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, char* pMsg) {
}
int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
- SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont;
+ SRetrieveChkptTriggerReq req = {0};
+ SStreamTask* pTask = NULL;
+ char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ int32_t len = pMsg->contLen - sizeof(SMsgHead);
+ SDecoder decoder = {0};
- SStreamTask* pTask = NULL;
- int32_t code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->upstreamTaskId, &pTask);
+ tDecoderInit(&decoder, (uint8_t*)msg, len);
+ if (tDecodeRetrieveChkptTriggerReq(&decoder, &req) < 0) {
+ tDecoderClear(&decoder);
+ tqError("vgId:%d invalid retrieve checkpoint-trigger req received", pMeta->vgId);
+ return TSDB_CODE_INVALID_MSG;
+ }
+ tDecoderClear(&decoder);
+
+ int32_t code = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId, &pTask);
if (pTask == NULL || (code != 0)) {
- tqError("vgId:%d process retrieve checkpoint trigger, checkpointId:%" PRId64
+ tqError("vgId:%d process retrieve checkpoint-trigger, checkpointId:%" PRId64
" from s-task:0x%x, failed to acquire task:0x%x, it may have been dropped already",
- pMeta->vgId, pReq->checkpointId, (int32_t)pReq->downstreamTaskId, pReq->upstreamTaskId);
+ pMeta->vgId, req.checkpointId, (int32_t)req.downstreamTaskId, req.upstreamTaskId);
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
}
tqDebug("s-task:0x%x recv retrieve checkpoint-trigger msg from downstream s-task:0x%x, checkpointId:%" PRId64,
- pReq->upstreamTaskId, (int32_t)pReq->downstreamTaskId, pReq->checkpointId);
+ req.upstreamTaskId, (int32_t)req.downstreamTaskId, req.checkpointId);
if (pTask->status.downstreamReady != 1) {
tqError("s-task:%s not ready for checkpoint-trigger retrieve from 0x%x, since downstream not ready",
- pTask->id.idStr, (int32_t)pReq->downstreamTaskId);
+ pTask->id.idStr, (int32_t)req.downstreamTaskId);
- code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info,
+ code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info,
TSDB_CODE_STREAM_TASK_IVLD_STATUS);
streamMetaReleaseTask(pMeta, pTask);
return code;
@@ -1010,19 +1028,19 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg)
int64_t checkpointId = 0;
streamTaskGetActiveCheckpointInfo(pTask, &transId, &checkpointId);
- if (checkpointId != pReq->checkpointId) {
+ if (checkpointId != req.checkpointId) {
tqError("s-task:%s invalid checkpoint-trigger retrieve msg from 0x%" PRIx64 ", current checkpointId:%" PRId64
" req:%" PRId64,
- pTask->id.idStr, pReq->downstreamTaskId, checkpointId, pReq->checkpointId);
+ pTask->id.idStr, req.downstreamTaskId, checkpointId, req.checkpointId);
streamMetaReleaseTask(pMeta, pTask);
return TSDB_CODE_INVALID_MSG;
}
- if (streamTaskAlreadySendTrigger(pTask, pReq->downstreamNodeId)) {
+ if (streamTaskAlreadySendTrigger(pTask, req.downstreamNodeId)) {
// re-send the lost checkpoint-trigger msg to downstream task
tqDebug("s-task:%s re-send checkpoint-trigger to:0x%x, checkpointId:%" PRId64 ", transId:%d", pTask->id.idStr,
- (int32_t)pReq->downstreamTaskId, checkpointId, transId);
- code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info,
+ (int32_t)req.downstreamTaskId, checkpointId, transId);
+ code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info,
TSDB_CODE_SUCCESS);
} else { // not send checkpoint-trigger yet, wait
int32_t recv = 0, total = 0;
@@ -1036,7 +1054,7 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg)
"sending checkpoint-source/trigger",
pTask->id.idStr, recv, total);
}
- code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info,
+ code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info,
TSDB_CODE_ACTION_IN_PROGRESS);
}
} else { // upstream not recv the checkpoint-source/trigger till now
@@ -1048,7 +1066,7 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg)
"s-task:%s not recv checkpoint-source from mnode or checkpoint-trigger from upstream yet, wait for all "
"upstream sending checkpoint-source/trigger",
pTask->id.idStr);
- code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info,
+ code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info,
TSDB_CODE_ACTION_IN_PROGRESS);
}
@@ -1057,23 +1075,34 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg)
}
int32_t tqStreamTaskProcessRetrieveTriggerRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) {
- SCheckpointTriggerRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ SCheckpointTriggerRsp rsp = {0};
+ SStreamTask* pTask = NULL;
+ char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ int32_t len = pMsg->contLen - sizeof(SMsgHead);
+ SDecoder decoder = {0};
- SStreamTask* pTask = NULL;
- int32_t code = streamMetaAcquireTask(pMeta, pRsp->streamId, pRsp->taskId, &pTask);
+ tDecoderInit(&decoder, (uint8_t*)msg, len);
+ if (tDecodeCheckpointTriggerRsp(&decoder, &rsp) < 0) {
+ tDecoderClear(&decoder);
+ tqError("vgId:%d invalid retrieve checkpoint-trigger rsp received", pMeta->vgId);
+ return TSDB_CODE_INVALID_MSG;
+ }
+ tDecoderClear(&decoder);
+
+ int32_t code = streamMetaAcquireTask(pMeta, rsp.streamId, rsp.taskId, &pTask);
if (pTask == NULL || (code != 0)) {
tqError(
"vgId:%d process retrieve checkpoint-trigger, failed to acquire task:0x%x, it may have been dropped already",
- pMeta->vgId, pRsp->taskId);
+ pMeta->vgId, rsp.taskId);
return code;
}
tqDebug(
- "s-task:%s recv re-send checkpoint-trigger msg from through retrieve/rsp channel, upstream:0x%x, "
- "checkpointId:%" PRId64 ", transId:%d",
- pTask->id.idStr, pRsp->upstreamTaskId, pRsp->checkpointId, pRsp->transId);
+ "s-task:%s recv re-send checkpoint-trigger msg through retrieve/rsp channel, upstream:0x%x, checkpointId:%" PRId64
+ ", transId:%d",
+ pTask->id.idStr, rsp.upstreamTaskId, rsp.checkpointId, rsp.transId);
- code = streamTaskProcessCheckpointTriggerRsp(pTask, pRsp);
+ code = streamTaskProcessCheckpointTriggerRsp(pTask, &rsp);
streamMetaReleaseTask(pMeta, pTask);
return code;
}
@@ -1186,10 +1215,12 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
streamMutexUnlock(&pHTask->lock);
code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode);
+ tqDebug("s-task:%s resume complete, code:%s", pHTask->id.idStr, tstrerror(code));
+
streamMetaReleaseTask(pMeta, pHTask);
}
- return code;
+ return TSDB_CODE_SUCCESS;
}
int32_t tqStreamTasksGetTotalNum(SStreamMeta* pMeta) { return taosArrayGetSize(pMeta->pTaskList); }
@@ -1201,7 +1232,23 @@ int32_t doProcessDummyRspMsg(SStreamMeta* UNUSED_PARAM(pMeta), SRpcMsg* pMsg) {
}
int32_t tqStreamProcessStreamHbRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) {
- return streamProcessHeartbeatRsp(pMeta, pMsg->pCont);
+ SMStreamHbRspMsg rsp = {0};
+ int32_t code = 0;
+ SDecoder decoder;
+ char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
+ int32_t len = pMsg->contLen - sizeof(SMsgHead);
+
+ tDecoderInit(&decoder, (uint8_t*)msg, len);
+ code = tDecodeStreamHbRsp(&decoder, &rsp);
+ if (code < 0) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ tDecoderClear(&decoder);
+ tqError("vgId:%d failed to parse hb rsp msg, code:%s", pMeta->vgId, tstrerror(terrno));
+ return terrno;
+ }
+
+ tDecoderClear(&decoder);
+ return streamProcessHeartbeatRsp(pMeta, &rsp);
}
int32_t tqStreamProcessReqCheckpointRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) { return doProcessDummyRspMsg(pMeta, pMsg); }
@@ -1235,7 +1282,7 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
SRestoreCheckpointInfo req = {0};
tDecoderInit(&decoder, (uint8_t*)msg, len);
- if (tDecodeRestoreCheckpointInfo(&decoder, &req) < 0) {
+ if ((code = tDecodeRestoreCheckpointInfo(&decoder, &req)) < 0) {
tqError("vgId:%d failed to decode set consensus checkpointId req, code:%s", vgId, tstrerror(code));
tDecoderClear(&decoder);
return TSDB_CODE_SUCCESS;
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index c7626dcf36..0f524e22d7 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -25,82 +25,109 @@
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
static int32_t setFirstLastResColToNull(SColumnInfoData* pCol, int32_t row) {
- char* buf = taosMemoryCalloc(1, pCol->info.bytes);
- if (buf == NULL) {
- return terrno;
- }
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ char* buf = NULL;
+ SFirstLastRes* pRes = NULL;
- SFirstLastRes* pRes = (SFirstLastRes*)((char*)buf + VARSTR_HEADER_SIZE);
+ TSDB_CHECK_NULL(pCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ buf = taosMemoryCalloc(1, pCol->info.bytes);
+ TSDB_CHECK_NULL(buf, code, lino, _end, terrno);
+
+ pRes = (SFirstLastRes*)((char*)buf + VARSTR_HEADER_SIZE);
pRes->bytes = 0;
pRes->hasResult = true;
pRes->isNull = true;
varDataSetLen(buf, pCol->info.bytes - VARSTR_HEADER_SIZE);
- int32_t code = colDataSetVal(pCol, row, buf, false);
- taosMemoryFree(buf);
+ code = colDataSetVal(pCol, row, buf, false);
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (buf != NULL) {
+ taosMemoryFreeClear(buf);
+ }
return code;
}
static int32_t saveOneRowForLastRaw(SLastCol* pColVal, SCacheRowsReader* pReader, const int32_t slotId,
- SColumnInfoData* pColInfoData, int32_t numOfRows) {
- SColVal* pVal = &pColVal->colVal;
- int32_t code = 0;
+ SColumnInfoData* pColInfoData, int32_t numOfRows) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SColVal* pVal = NULL;
+
+ TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pVal = &pColVal->colVal;
// allNullRow = false;
if (IS_VAR_DATA_TYPE(pColVal->colVal.value.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
code = colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else {
code = colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
+ TSDB_CHECK_CODE(code, lino, _end);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds,
const int32_t* dstSlotIds, void** pRes, const char* idStr) {
- int32_t numOfRows = pBlock->info.rows;
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t numOfRows = 0;
+ SArray* funcTypeBlockArray = NULL;
+
+ TSDB_CHECK_NULL(pBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ if (pReader->numOfCols > 0) {
+ TSDB_CHECK_NULL(slotIds, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(dstSlotIds, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRes, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ }
+
+ numOfRows = pBlock->info.rows;
if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) {
uint64_t ts = TSKEY_MIN;
SFirstLastRes* p = NULL;
col_id_t colId = -1;
- SArray* funcTypeBlockArray = taosArrayInit(pReader->numOfCols, sizeof(int32_t));
- if (funcTypeBlockArray == NULL) {
- return terrno;
- }
+ funcTypeBlockArray = taosArrayInit(pReader->numOfCols, sizeof(int32_t));
+ TSDB_CHECK_NULL(funcTypeBlockArray, code, lino, _end, terrno);
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
- if (pColInfoData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
int32_t funcType = FUNCTION_TYPE_CACHE_LAST;
if (pReader->pFuncTypeList != NULL && taosArrayGetSize(pReader->pFuncTypeList) > i) {
void* pVal = taosArrayGet(pReader->pFuncTypeList, i);
- if (pVal == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
- funcType = *(int32_t*) pVal;
+ funcType = *(int32_t*)pVal;
pVal = taosArrayGet(pReader->pFuncTypeList, i);
- if (pVal == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
void* px = taosArrayInsert(funcTypeBlockArray, dstSlotIds[i], pVal);
- if (px == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
if (slotIds[i] == -1) {
@@ -110,24 +137,18 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
}
code = setFirstLastResColToNull(pColInfoData, numOfRows);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
continue;
}
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
- if (pColVal == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
colId = pColVal->colVal.cid;
if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) {
code = saveOneRowForLastRaw(pColVal, pReader, slotId, pColInfoData, numOfRows);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
continue;
}
@@ -154,22 +175,16 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
p->hasResult = true;
varDataSetLen(pRes[i], pColInfoData->info.bytes - VARSTR_HEADER_SIZE);
code = colDataSetVal(pColInfoData, numOfRows, (const char*)pRes[i], false);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
for (int32_t idx = 0; idx < taosArrayGetSize(pBlock->pDataBlock); ++idx) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, idx);
- if (pCol == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (idx < funcTypeBlockArray->size) {
void* pVal = taosArrayGet(funcTypeBlockArray, idx);
- if (pVal == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
int32_t funcType = *(int32_t*)pVal;
if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) {
@@ -182,17 +197,13 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
colDataSetNULL(pCol, numOfRows);
} else {
code = colDataSetVal(pCol, numOfRows, (const char*)&ts, false);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
continue;
} else if (pReader->numOfCols == 1 && idx != dstSlotIds[0] && (pCol->info.colId == colId || colId == -1)) {
if (p && !p->isNull) {
code = colDataSetVal(pCol, numOfRows, p->buf, false);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
colDataSetNULL(pCol, numOfRows);
}
@@ -201,13 +212,10 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
// pBlock->info.rows += allNullRow ? 0 : 1;
++pBlock->info.rows;
- taosArrayDestroy(funcTypeBlockArray);
} else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
- if (pColInfoData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
int32_t slotId = slotIds[i];
if (slotId == -1) {
@@ -216,47 +224,53 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
- if (pColVal == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
code = saveOneRowForLastRaw(pColVal, pReader, slotId, pColInfoData, numOfRows);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
// pBlock->info.rows += allNullRow ? 0 : 1;
++pBlock->info.rows;
} else {
tsdbError("invalid retrieve type:%d, %s", pReader->type, idStr);
- return TSDB_CODE_INVALID_PARA;
+ code = TSDB_CODE_INVALID_PARA;
+ TSDB_CHECK_CODE(code, lino, _end);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (funcTypeBlockArray != NULL) {
+ taosArrayDestroy(funcTypeBlockArray);
+ }
return code;
}
static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* idstr) {
- int32_t numOfTables = p->numOfTables;
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t numOfTables = 0;
+
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ numOfTables = p->numOfTables;
if (suid != 0) {
code = metaGetTbTSchemaNotNull(p->pVnode->pMeta, suid, -1, 1, &p->pSchema);
if (TSDB_CODE_SUCCESS != code) {
tsdbWarn("stable:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", suid, idstr);
- if(code == TSDB_CODE_NOT_FOUND) {
- return TSDB_CODE_PAR_TABLE_NOT_EXIST;
- } else {
- return code;
+ if (code == TSDB_CODE_NOT_FOUND) {
+ code = TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else {
for (int32_t i = 0; i < numOfTables; ++i) {
uint64_t uid = p->pTableList[i].uid;
code = metaGetTbTSchemaMaybeNull(p->pVnode->pMeta, uid, -1, 1, &p->pSchema);
- if(code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (p->pSchema != NULL) {
break;
}
@@ -267,33 +281,52 @@ static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* id
// all queried tables have been dropped already, return immediately.
if (p->pSchema == NULL) {
tsdbWarn("all queried tables has been dropped, try next group, %s", idstr);
- return TSDB_CODE_PAR_TABLE_NOT_EXIST;
+ code = TSDB_CODE_PAR_TABLE_NOT_EXIST;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t tsdbReuseCacherowsReader(void* reader, void* pTableIdList, int32_t numOfTables) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SCacheRowsReader* pReader = (SCacheRowsReader*)reader;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pReader->pTableList = pTableIdList;
pReader->numOfTables = numOfTables;
pReader->lastTs = INT64_MIN;
destroySttBlockReader(pReader->pLDataIterArray, NULL);
pReader->pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
+ TSDB_CHECK_NULL(pReader->pLDataIterArray, code, lino, _end, terrno);
- return (pReader->pLDataIterArray != NULL) ? TSDB_CODE_SUCCESS : terrno;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr,
SArray* pFuncTypeList, SColumnInfo* pPkCol, int32_t numOfPks) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SCacheRowsReader* p = NULL;
+
+ TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*pReader = NULL;
- SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
- if (p == NULL) {
- return terrno;
- }
+ p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
p->type = type;
p->pVnode = pVnode;
@@ -307,12 +340,13 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
p->rowKey.numOfPKs = numOfPks;
if (numOfPks > 0) {
+ TSDB_CHECK_NULL(pPkCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
p->rowKey.pks[0].type = pPkCol->type;
if (IS_VAR_DATA_TYPE(pPkCol->type)) {
p->rowKey.pks[0].pData = taosMemoryCalloc(1, pPkCol->bytes);
if (p->rowKey.pks[0].pData == NULL) {
- taosMemoryFree(p);
- return terrno;
+ taosMemoryFreeClear(p);
+ TSDB_CHECK_NULL(p->rowKey.pks[0].pData, code, lino, _end, terrno);
}
}
@@ -321,48 +355,46 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
if (numOfTables == 0) {
*pReader = p;
- return TSDB_CODE_SUCCESS;
+ p = NULL;
+ goto _end;
}
p->pTableList = pTableIdList;
p->numOfTables = numOfTables;
- int32_t code = setTableSchema(p, suid, idstr);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbCacherowsReaderClose(p);
- return code;
- }
+ code = setTableSchema(p, suid, idstr);
+ TSDB_CHECK_CODE(code, lino, _end);
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
- if (p->transferBuf == NULL) {
- tsdbCacherowsReaderClose(p);
- return terrno;
- }
+ TSDB_CHECK_NULL(p->transferBuf, code, lino, _end, terrno);
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
- if (p->transferBuf[i] == NULL) {
- tsdbCacherowsReaderClose(p);
- return terrno;
- }
+ TSDB_CHECK_NULL(p->transferBuf[i], code, lino, _end, terrno);
}
}
- p->idstr = taosStrdup(idstr);
- if (idstr != NULL && p->idstr == NULL) {
- tsdbCacherowsReaderClose(p);
- return terrno;
+ if (idstr != NULL) {
+ p->idstr = taosStrdup(idstr);
+ TSDB_CHECK_NULL(p->idstr, code, lino, _end, terrno);
}
code = taosThreadMutexInit(&p->readerMutex, NULL);
- if (code) {
- tsdbCacherowsReaderClose(p);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
p->lastTs = INT64_MIN;
*pReader = p;
+ p = NULL;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ *pReader = NULL;
+ }
+ if (p != NULL) {
+ tsdbCacherowsReaderClose(p);
+ }
return code;
}
@@ -393,6 +425,7 @@ void tsdbCacherowsReaderClose(void* pReader) {
if (p->pLDataIterArray) {
destroySttBlockReader(p->pLDataIterArray, NULL);
+ p->pLDataIterArray = NULL;
}
if (p->pFileReader) {
@@ -401,7 +434,7 @@ void tsdbCacherowsReaderClose(void* pReader) {
}
taosMemoryFree((void*)p->idstr);
- (void) taosThreadMutexDestroy(&p->readerMutex);
+ (void)taosThreadMutexDestroy(&p->readerMutex);
if (p->pTableMap) {
void* pe = NULL;
@@ -443,39 +476,32 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) {
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, const int32_t* dstSlotIds,
SArray* pTableUidList, bool* pGotAll) {
- if (pReader == NULL || pResBlock == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
-
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
bool hasRes = false;
SArray* pRow = NULL;
void** pRes = NULL;
- SCacheRowsReader* pr = pReader;
+ SCacheRowsReader* pr = NULL;
int32_t pkBufLen = 0;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pResBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pr = pReader;
+
pr->pReadSnap = NULL;
pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol));
- if (pRow == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(pRow, code, lino, _end, terrno);
pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES);
- if (pRes == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(pRes, code, lino, _end, terrno);
pkBufLen = (pr->rowKey.numOfPKs > 0) ? pr->pkColumn.bytes : 0;
for (int32_t j = 0; j < pr->numOfCols; ++j) {
int32_t bytes = (slotIds[j] == -1) ? 1 : pr->pSchema->columns[slotIds[j]].bytes;
pRes[j] = taosMemoryCalloc(1, sizeof(SFirstLastRes) + bytes + pkBufLen + VARSTR_HEADER_SIZE);
- if (pRes[j] == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(pRes[j], code, lino, _end, terrno);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]);
p->ts = INT64_MIN;
@@ -483,9 +509,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
(void)taosThreadMutexLock(&pr->readerMutex);
code = tsdbTakeReadSnap2((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap, pr->idstr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
int8_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
@@ -494,20 +518,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
SArray* pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
- if (pLastCols == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(pLastCols, code, lino, _end, terrno);
for (int32_t i = 0; i < pr->numOfCols; ++i) {
int32_t slotId = slotIds[i];
if (slotId == -1) {
SLastCol p = {.rowKey.ts = INT64_MIN, .colVal.value.type = TSDB_DATA_TYPE_BOOL, .colVal.flag = CV_FLAG_NULL};
void* px = taosArrayPush(pLastCols, &p);
- if (px == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
continue;
}
struct STColumn* pCol = &pr->pSchema->columns[slotId];
@@ -518,29 +536,19 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
for (int32_t j = 0; j < pr->rowKey.numOfPKs; j++) {
p.rowKey.pks[j].type = pr->pkColumn.type;
if (IS_VAR_DATA_TYPE(pr->pkColumn.type)) {
-
p.rowKey.pks[j].pData = taosMemoryCalloc(1, pr->pkColumn.bytes);
- if (p.rowKey.pks[j].pData == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(p.rowKey.pks[j].pData, code, lino, _end, terrno);
}
}
}
if (IS_VAR_DATA_TYPE(pCol->type)) {
p.colVal.value.pData = taosMemoryCalloc(pCol->bytes, sizeof(char));
- if (p.colVal.value.pData == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(p.colVal.value.pData, code, lino, _end, terrno);
}
void* px = taosArrayPush(pLastCols, &p);
- if (px == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
int64_t st = taosGetTimestampUs();
@@ -549,11 +557,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
tb_uid_t uid = pTableList[i].uid;
code = tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype);
- if (code == -1) {// fix the invalid return code
+ if (code == -1) { // fix the invalid return code
code = 0;
- } else if (code != 0) {
- goto _end;
}
+ TSDB_CHECK_CODE(code, lino, _end);
if (TARRAY_SIZE(pRow) <= 0 || COL_VAL_IS_NONE(&((SLastCol*)TARRAY_DATA(pRow))[0].colVal)) {
taosArrayClearEx(pRow, tsdbCacheFreeSLastColItem);
@@ -600,10 +607,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
if (k == 0) {
if (TARRAY_SIZE(pTableUidList) == 0) {
void* px = taosArrayPush(pTableUidList, &uid);
- if (px == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
} else {
taosArraySet(pTableUidList, 0, &uid);
}
@@ -654,9 +658,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
if (hasRes) {
code = saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
- if (code) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
taosArrayDestroyEx(pLastCols, tsdbCacheFreeSLastColItem);
@@ -666,11 +668,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
tb_uid_t uid = pTableList[i].uid;
if ((code = tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype)) != 0) {
- if (code == -1) {// fix the invalid return code
+ if (code == -1) { // fix the invalid return code
code = 0;
- } else if (code != 0) {
- goto _end;
}
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (TARRAY_SIZE(pRow) <= 0 || COL_VAL_IS_NONE(&((SLastCol*)TARRAY_DATA(pRow))[0].colVal)) {
@@ -679,17 +680,12 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
code = saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
- if (code) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
taosArrayClearEx(pRow, tsdbCacheFreeSLastColItem);
void* px = taosArrayPush(pTableUidList, &uid);
- if (px == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
++pr->tableIndex;
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@@ -702,6 +698,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
} else {
code = TSDB_CODE_INVALID_PARA;
+ TSDB_CHECK_CODE(code, lino, _end);
}
_end:
@@ -723,5 +720,8 @@ _end:
taosMemoryFree(pRes);
taosArrayDestroy(pRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c
index 95c5daf842..e3c75760c8 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c
@@ -667,7 +667,7 @@ int32_t tsdbCommitBegin(STsdb *tsdb, SCommitInfo *info) {
int64_t nRow = imem->nRow;
int64_t nDel = imem->nDel;
- if (nRow == 0 && nDel == 0) {
+ if ((nRow == 0 && nDel == 0) || (tsBypassFlag & TSDB_BYPASS_RB_TSDB_COMMIT)) {
(void)taosThreadMutexLock(&tsdb->mutex);
tsdb->imem = NULL;
(void)taosThreadMutexUnlock(&tsdb->mutex);
diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c
index 720ba68414..f51ffe0c83 100644
--- a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c
+++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c
@@ -972,7 +972,7 @@ static int32_t tsdbDataFileWriteBrinRecord(SDataFileWriter *writer, const SBrinR
break;
}
- if ((writer->brinBlock->numOfRecords) >= writer->config->maxRow) {
+ if ((writer->brinBlock->numOfRecords) >= 256) {
TAOS_CHECK_GOTO(tsdbDataFileWriteBrinBlock(writer), &lino, _exit);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index eb22335311..5b26d17519 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -122,6 +122,10 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitTbData *pSubmi
tb_uid_t suid = pSubmitTbData->suid;
tb_uid_t uid = pSubmitTbData->uid;
+ if (tsBypassFlag & TSDB_BYPASS_RB_TSDB_WRITE_MEM) {
+ goto _err;
+ }
+
// create/get STbData to op
code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData);
if (code) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c
index c4971e27cf..ac8e8505e4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead2.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c
@@ -24,13 +24,14 @@
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
#define getCurrentKeyInSttBlock(_r) (&((_r)->currentKey))
-#define tColRowGetKeyDeepCopy(_pBlock, _irow, _slotId, _pKey) \
- do { \
- (_pKey)->ts = (_pBlock)->aTSKEY[(_irow)]; \
- (_pKey)->numOfPKs = 0; \
- if ((_slotId) != -1) { \
- tColRowGetPriamyKeyDeepCopy(_pBlock, _irow, _slotId, _pKey); \
- } \
+#define tColRowGetKeyDeepCopy(_pBlock, _irow, _slotId, _pKey) \
+ do { \
+ (_pKey)->ts = (_pBlock)->aTSKEY[(_irow)]; \
+ (_pKey)->numOfPKs = 0; \
+ if ((_slotId) != -1) { \
+ code = tColRowGetPriamyKeyDeepCopy(_pBlock, _irow, _slotId, _pKey); \
+ TSDB_CHECK_CODE(code, lino, _end); \
+ } \
} while (0)
#define outOfTimeWindow(_ts, _window) (((_ts) > (_window)->ekey) || ((_ts) < (_window)->skey))
@@ -45,23 +46,23 @@ typedef struct {
bool moreThanCapcity;
} SDataBlockToLoadInfo;
-static int32_t getCurrentBlockInfo(SDataBlockIter* pBlockIter, SFileDataBlockInfo** pInfo, const char* idStr);
-static int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
- STsdbReader* pReader);
-static void getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader, TSDBROW** pRes);
-static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, SRowKey* pKey,
- STsdbReader* pReader);
-static int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo,
- SRowMerger* pMerger, int32_t pkSrcSlot, SVersionRange* pVerRange, const char* id);
-static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, SRowKey* pCurKey, SArray* pDelList,
- STsdbReader* pReader);
-static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pTSRow,
- STableBlockScanInfo* pScanInfo);
-static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
- int32_t rowIndex);
-static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
-static bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order,
- SVersionRange* pVerRange, bool hasPk);
+static int32_t getCurrentBlockInfo(SDataBlockIter* pBlockIter, SFileDataBlockInfo** pInfo, const char* idStr);
+static int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
+ STsdbReader* pReader);
+static int32_t getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader, TSDBROW** pRes);
+static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, SRowKey* pKey,
+ STsdbReader* pReader);
+static int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo,
+ SRowMerger* pMerger, int32_t pkSrcSlot, SVersionRange* pVerRange, const char* id);
+static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, SRowKey* pCurKey, SArray* pDelList,
+ STsdbReader* pReader);
+static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pTSRow,
+ STableBlockScanInfo* pScanInfo);
+static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
+ int32_t rowIndex);
+static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
+static int32_t hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order,
+ SVersionRange* pVerRange, bool hasPk, bool* dropped);
static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, SRowKey* pKey, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
TSDBROW* pResRow, STsdbReader* pReader, bool* freeTSRow);
@@ -77,10 +78,10 @@ static void getTsdbByRetentions(SVnode* pVnode, SQueryTableDataCond* pCond, SRet
int8_t* pLevel, STsdb** pTsdb);
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
static int32_t doBuildDataBlock(STsdbReader* pReader);
-static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
+static int32_t getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader, TSDBKEY* key);
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
static bool hasDataInSttBlock(STableBlockScanInfo* pInfo);
-static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
+static int32_t initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
static int32_t resetTableListIndex(SReaderStatus* pStatus, const char* id);
static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t* pMinKey);
@@ -111,9 +112,17 @@ FORCE_INLINE int32_t pkCompEx(SRowKey* p1, SRowKey* p2) {
}
}
-static void tColRowGetPriamyKeyDeepCopy(SBlockData* pBlock, int32_t irow, int32_t slotId, SRowKey* pKey) {
- SColData* pColData = &pBlock->aColData[slotId];
+static int32_t tColRowGetPriamyKeyDeepCopy(SBlockData* pBlock, int32_t irow, int32_t slotId, SRowKey* pKey) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SColData* pColData = NULL;
SColVal cv;
+
+ TSDB_CHECK_CONDITION((pBlock != NULL) && (pBlock->aColData != NULL), code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pColData = &pBlock->aColData[slotId];
+
tColDataGetValue(pColData, irow, &cv);
pKey->numOfPKs = 1;
@@ -123,8 +132,14 @@ static void tColRowGetPriamyKeyDeepCopy(SBlockData* pBlock, int32_t irow, int32_
pKey->pks[0].val = cv.value.val;
} else {
pKey->pks[0].nData = cv.value.nData;
- (void)memcpy(pKey->pks[0].pData, cv.value.pData, cv.value.nData);
+ TAOS_MEMCPY(pKey->pks[0].pData, cv.value.pData, cv.value.nData);
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// for test purpose, todo remove it
@@ -135,10 +150,20 @@ static int32_t tGetPrimaryKeyIndex(uint8_t* p, SPrimaryKeyIndex* index) {
return n;
}
-static void tRowGetPrimaryKeyDeepCopy(SRow* pRow, SRowKey* pKey) {
+static int32_t tRowGetPrimaryKeyDeepCopy(SRow* pRow, SRowKey* pKey) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SPrimaryKeyIndex indices[TD_MAX_PK_COLS];
+ uint8_t* data = NULL;
+ int32_t len = 0;
- uint8_t* data = pRow->data;
+ TSDB_CHECK_NULL(pRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ if (pRow->numOfPKs > 0) {
+ TSDB_CHECK_NULL(pRow->data, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ }
+
+ data = pRow->data;
for (int32_t i = 0; i < pRow->numOfPKs; i++) {
data += tGetPrimaryKeyIndex(data, &indices[i]);
}
@@ -154,16 +179,29 @@ static void tRowGetPrimaryKeyDeepCopy(SRow* pRow, SRowKey* pKey) {
if (IS_VAR_DATA_TYPE(indices[i].type)) {
tdata += tGetU32v(tdata, &pKey->pks[i].nData);
- (void)memcpy(pKey->pks[i].pData, tdata, pKey->pks[i].nData);
+ TAOS_MEMCPY(pKey->pks[i].pData, tdata, pKey->pks[i].nData);
} else {
- (void)memcpy(&pKey->pks[i].val, data + indices[i].offset, tDataTypes[pKey->pks[i].type].bytes);
+ TAOS_MEMCPY(&pKey->pks[i].val, data + indices[i].offset, tDataTypes[pKey->pks[i].type].bytes);
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pCols, const int32_t* pSlotIdList,
int32_t numOfCols) {
- bool initSucc = true;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pSupInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ if (numOfCols > 0) {
+ TSDB_CHECK_NULL(pSlotIdList, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pCols, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ }
pSupInfo->pk.pk = 0;
pSupInfo->numOfPks = 0;
@@ -173,10 +211,7 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC
pSupInfo->numOfCols = numOfCols;
pSupInfo->colId = taosMemoryMalloc(numOfCols * (sizeof(int16_t) * 2 + POINTER_BYTES));
- if (pSupInfo->colId == NULL) {
- taosMemoryFree(pSupInfo->colId);
- return terrno;
- }
+ TSDB_CHECK_NULL(pSupInfo->colId, code, lino, _end, terrno);
pSupInfo->slotId = (int16_t*)((char*)pSupInfo->colId + (sizeof(int16_t) * numOfCols));
pSupInfo->buildBuf = (char**)((char*)pSupInfo->slotId + (sizeof(int16_t) * numOfCols));
@@ -184,36 +219,47 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC
pSupInfo->colId[i] = pCols[i].colId;
pSupInfo->slotId[i] = pSlotIdList[i];
- if (IS_VAR_DATA_TYPE(pCols[i].type)) {
- pSupInfo->buildBuf[i] = taosMemoryMalloc(pCols[i].bytes);
- if (pSupInfo->buildBuf[i] == NULL) {
- tsdbError("failed to prepare memory for set columnId slot list, size:%d, code:out of memory", pCols[i].bytes);
- initSucc = false;
- }
- } else {
- pSupInfo->buildBuf[i] = NULL;
- }
-
if (pCols[i].pk) {
pSupInfo->pk = pCols[i];
pSupInfo->pkSrcSlot = i - 1;
pSupInfo->pkDstSlot = pSlotIdList[i];
pSupInfo->numOfPks += 1;
}
+
+ if (IS_VAR_DATA_TYPE(pCols[i].type)) {
+ pSupInfo->buildBuf[i] = taosMemoryMalloc(pCols[i].bytes);
+ if (pSupInfo->buildBuf[i] == NULL) {
+ tsdbError("failed to prepare memory for set columnId slot list, size:%d, code: %s", pCols[i].bytes,
+ tstrerror(terrno));
+ }
+ TSDB_CHECK_NULL(pSupInfo->buildBuf[i], code, lino, _end, terrno);
+ } else {
+ pSupInfo->buildBuf[i] = NULL;
+ }
}
- return (initSucc)? TSDB_CODE_SUCCESS:TSDB_CODE_OUT_OF_MEMORY;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInfo) {
- int32_t i = 0, j = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t i = 0, j = 0;
+ STColumn* pTCol = NULL;
+
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pSupInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
while (i < pSchema->numOfCols && j < pSupInfo->numOfCols) {
- STColumn* pTCol = &pSchema->columns[i];
+ pTCol = &pSchema->columns[i];
if (pTCol->colId == pSupInfo->colId[j]) {
if (!IS_BSMA_ON(pTCol) && (PRIMARYKEY_TIMESTAMP_COL_ID != pTCol->colId)) {
pSupInfo->smaValid = false;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
i += 1;
@@ -221,33 +267,62 @@ static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInf
} else if (pTCol->colId < pSupInfo->colId[j]) { // do nothing
i += 1;
} else {
- return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
+ code = TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
-static bool isEmptyQueryTimeWindow(STimeWindow* pWindow) { return pWindow->skey > pWindow->ekey; }
+static bool isEmptyQueryTimeWindow(STimeWindow* pWindow) {
+ return (pWindow == NULL) || (pWindow->skey > pWindow->ekey);
+}
// Update the query time window according to the data time to live(TTL) information, in order to avoid to return
// the expired data to client, even it is queried already.
-static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) {
- int64_t earlyTs = tsdbGetEarliestTs(pTsdb);
- STimeWindow win = *pWindow;
- if (win.skey < earlyTs) {
- win.skey = earlyTs;
+static int32_t updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow, STimeWindow* out) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int64_t earlyTs = 0;
+
+ TSDB_CHECK_NULL(pTsdb, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pWindow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(out, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ earlyTs = tsdbGetEarliestTs(pTsdb);
+ *out = *pWindow;
+ if (out->skey < earlyTs) {
+ out->skey = earlyTs;
}
- return win;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// init file iterator
static int32_t initFilesetIterator(SFilesetIter* pIter, TFileSetArray* pFileSetArray, STsdbReader* pReader) {
- SBlockLoadSuppInfo* pInfo = &pReader->suppInfo;
- size_t numOfFileset = TARRAY2_SIZE(pFileSetArray);
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SBlockLoadSuppInfo* pInfo = NULL;
+ SSttBlockReader* pSttReader = NULL;
+ size_t numOfFileset = 0;
+ bool asc = false;
+
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pFileSetArray, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pInfo = &pReader->suppInfo;
+ numOfFileset = TARRAY2_SIZE(pFileSetArray);
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
pIter->index = asc ? -1 : numOfFileset;
pIter->order = pReader->info.order;
@@ -258,11 +333,11 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, TFileSetArray* pFileSetA
pIter->pSttBlockReader = taosMemoryCalloc(1, sizeof(struct SSttBlockReader));
if (pIter->pSttBlockReader == NULL) {
tsdbError("failed to prepare the last block iterator, since:%s %s", tstrerror(terrno), pReader->idStr);
- return terrno;
}
+ TSDB_CHECK_NULL(pIter->pSttBlockReader, code, lino, _end, terrno);
}
- SSttBlockReader* pSttReader = pIter->pSttBlockReader;
+ pSttReader = pIter->pSttBlockReader;
pSttReader->order = pReader->info.order;
pSttReader->window = pReader->info.window;
pSttReader->verRange = pReader->info.verRange;
@@ -276,31 +351,46 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, TFileSetArray* pFileSetA
} else {
tsdbDebug("init fileset iterator, total files:%d %s", pIter->numOfFiles, pReader->idStr);
}
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bool* hasNext) {
- bool asc = ASCENDING_TRAVERSE(pIter->order);
- int32_t step = asc ? 1 : -1;
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool asc = false;
+ int32_t step = 0;
+ SReadCostSummary* pCost = NULL;
+ STFileObj** pFileObj = NULL;
+
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(hasNext, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pIter->order);
+ step = asc ? 1 : -1;
+ *hasNext = false;
pIter->index += step;
if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) {
*hasNext = false;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
- SReadCostSummary* pCost = &pReader->cost;
+ pCost = &pReader->cost;
+ TSDB_CHECK_NULL(pIter->pSttBlockReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
pIter->pSttBlockReader->uid = 0;
tMergeTreeClose(&pIter->pSttBlockReader->mergeTree);
destroySttBlockReader(pReader->status.pLDataIterArray, &pCost->sttCost);
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
- if (pReader->status.pLDataIterArray == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pReader->status.pLDataIterArray, code, lino, _end, terrno);
// check file the time range of coverage
STimeWindow win = {0};
@@ -310,9 +400,10 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
tsdbDataFileReaderClose(&pReader->pFileReader);
}
+ TSDB_CHECK_CONDITION(pIter->index < pIter->pFilesetList->size, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
pReader->status.pCurrentFileset = pIter->pFilesetList->data[pIter->index];
- STFileObj** pFileObj = pReader->status.pCurrentFileset->farr;
+ pFileObj = pReader->status.pCurrentFileset->farr;
if (pFileObj[0] != NULL || pFileObj[3] != NULL) {
SDataFileReaderConfig conf = {.tsdb = pReader->pTsdb, .szPage = pReader->pTsdb->pVnode->config.tsdbPageSize};
@@ -339,9 +430,7 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
}
code = tsdbDataFileReaderOpen(filesName, &conf, &pReader->pFileReader);
- if (code != TSDB_CODE_SUCCESS) {
- goto _err;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pReader->cost.headFileLoad += 1;
}
@@ -354,14 +443,14 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %s", pReader,
pReader->info.window.skey, pReader->info.window.ekey, pReader->idStr);
*hasNext = false;
- return TSDB_CODE_SUCCESS;
+ break;
}
if ((asc && (win.ekey < pReader->info.window.skey)) || ((!asc) && (win.skey > pReader->info.window.ekey))) {
pIter->index += step;
if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) {
*hasNext = false;
- return TSDB_CODE_SUCCESS;
+ break;
}
continue;
}
@@ -370,17 +459,26 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
pReader->info.window.ekey, pReader->idStr);
*hasNext = true;
- return TSDB_CODE_SUCCESS;
+ break;
}
-_err:
- *hasNext = false;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
-bool shouldFreePkBuf(SBlockLoadSuppInfo* pSupp) { return (pSupp->numOfPks > 0) && IS_VAR_DATA_TYPE(pSupp->pk.type); }
+bool shouldFreePkBuf(SBlockLoadSuppInfo* pSupp) {
+ return (pSupp != NULL) && (pSupp->numOfPks > 0) && IS_VAR_DATA_TYPE(pSupp->pk.type);
+}
int32_t resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, bool needFree, const char* id) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pIter->order = order;
pIter->index = -1;
pIter->numOfBlocks = 0;
@@ -389,100 +487,165 @@ int32_t resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, bool needFr
pIter->blockList = taosArrayInit(4, sizeof(SFileDataBlockInfo));
if (pIter->blockList == NULL) {
tsdbError("%s failed to reset block iter, func:%s at line:%d code:%s", id, __func__, __LINE__, tstrerror(terrno));
- return terrno;
}
+ TSDB_CHECK_NULL(pIter->blockList, code, lino, _end, terrno);
} else {
clearDataBlockIterator(pIter, needFree);
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void initReaderStatus(SReaderStatus* pStatus) {
+ if (pStatus == NULL) {
+ return;
+ }
pStatus->pTableIter = NULL;
pStatus->loadFromFile = true;
}
static int32_t createResBlock(SQueryTableDataCond* pCond, int32_t capacity, SSDataBlock** pResBlock) {
- QRY_PARAM_CHECK(pResBlock);
-
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SSDataBlock* pBlock = NULL;
- int32_t code = createDataBlock(&pBlock);
- if (code != 0) {
- return code;
- }
+
+ TSDB_CHECK_NULL(pCond, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(capacity >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pResBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = createDataBlock(&pBlock);
+ TSDB_CHECK_CODE(code, lino, _end);
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = pCond->colList[i];
code = blockDataAppendColInfo(pBlock, &colInfo);
- if (code != TSDB_CODE_SUCCESS) {
- taosMemoryFree(pBlock);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
code = blockDataEnsureCapacity(pBlock, capacity);
- if (code != TSDB_CODE_SUCCESS) {
- taosMemoryFree(pBlock);
- }
+ TSDB_CHECK_CODE(code, lino, _end);
*pResBlock = pBlock;
+ pBlock = NULL;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (pBlock) {
+ taosArrayDestroy(pBlock->pDataBlock);
+ taosMemoryFreeClear(pBlock);
+ }
return code;
}
static int32_t tsdbInitReaderLock(STsdbReader* pReader) {
- int32_t code = taosThreadMutexInit(&pReader->readerMutex, NULL);
- tsdbTrace("tsdb/read: %p, post-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = taosThreadMutexInit(&pReader->readerMutex, NULL);
+ tsdbTrace("tsdb/read: %p, post-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t tsdbUninitReaderLock(STsdbReader* pReader) {
int32_t code = TSDB_CODE_SUCCESS;
- tsdbTrace("tsdb/read: %p, pre-uninit read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
+ int32_t lino = 0;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ tsdbTrace("tsdb/read: %p, pre-uninit read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
code = taosThreadMutexDestroy(&pReader->readerMutex);
tsdbTrace("tsdb/read: %p, post-uninit read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t tsdbAcquireReader(STsdbReader* pReader) {
- int32_t code = -1;
- tsdbTrace("tsdb/read: %s, pre-take read mutex: %p, code: %d", pReader->idStr, &pReader->readerMutex, code);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ TSDB_CHECK_CONDITION((pReader != NULL) && (pReader->idStr != NULL), code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ tsdbTrace("tsdb/read: %s, pre-take read mutex: %p, code: %d", pReader->idStr, &pReader->readerMutex, code);
code = taosThreadMutexLock(&pReader->readerMutex);
- if (code != 0) {
+ if (code != TSDB_CODE_SUCCESS) {
tsdbError("tsdb/read:%p, failed to lock reader mutex, code:%s", pReader->idStr, tstrerror(code));
} else {
tsdbTrace("tsdb/read: %s, post-take read mutex: %p, code: %d", pReader->idStr, &pReader->readerMutex, code);
}
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t tsdbTryAcquireReader(STsdbReader* pReader) {
- int32_t code = taosThreadMutexTryLock(&pReader->readerMutex);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = taosThreadMutexTryLock(&pReader->readerMutex);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
} else {
tsdbTrace("tsdb/read: %p, post-trytask read mutex: %p", pReader, &pReader->readerMutex);
}
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t tsdbReleaseReader(STsdbReader* pReader) {
- int32_t code = taosThreadMutexUnlock(&pReader->readerMutex);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = taosThreadMutexUnlock(&pReader->readerMutex);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("tsdb/read: %p post-untake read mutex:%p failed, code:%d", pReader, &pReader->readerMutex, code);
} else {
tsdbTrace("tsdb/read: %p, post-untake read mutex: %p", pReader, &pReader->readerMutex);
}
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
void tsdbReleaseDataBlock2(STsdbReader* pReader) {
+ if (pReader == NULL) return;
+
SReaderStatus* pStatus = &pReader->status;
if (!pStatus->composedDataBlock) {
(void) tsdbReleaseReader(pReader);
@@ -491,35 +654,34 @@ void tsdbReleaseDataBlock2(STsdbReader* pReader) {
static int32_t initResBlockInfo(SResultBlockInfo* pResBlockInfo, int64_t capacity, SSDataBlock* pResBlock,
SQueryTableDataCond* pCond, SBlockLoadSuppInfo* pSup) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SSDataBlock* p = NULL;
+
+ TSDB_CHECK_NULL(pResBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION((pResBlockInfo->pResBlock != NULL) || (pSup != NULL), code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pResBlockInfo->capacity = capacity;
pResBlockInfo->pResBlock = pResBlock;
- int32_t code = 0;
if (pResBlockInfo->pResBlock == NULL) {
pResBlockInfo->freeBlock = true;
pResBlockInfo->pResBlock = NULL;
code = createResBlock(pCond, pResBlockInfo->capacity, &pResBlockInfo->pResBlock);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pSup->numOfPks > 0) {
- SSDataBlock* p = pResBlockInfo->pResBlock;
+ p = pResBlockInfo->pResBlock;
p->info.pks[0].type = pSup->pk.type;
p->info.pks[1].type = pSup->pk.type;
if (IS_VAR_DATA_TYPE(pSup->pk.type)) {
p->info.pks[0].pData = taosMemoryCalloc(1, pSup->pk.bytes);
- if (p->info.pks[0].pData == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(p->info.pks[0].pData, code, lino, _end, terrno);
p->info.pks[1].pData = taosMemoryCalloc(1, pSup->pk.bytes);
- if (p->info.pks[1].pData == NULL) {
- taosMemoryFreeClear(p->info.pks[0].pData);
- return terrno;
- }
+ TSDB_CHECK_NULL(p->info.pks[0].pData, code, lino, _end, terrno);
p->info.pks[0].nData = pSup->pk.bytes;
p->info.pks[1].nData = pSup->pk.bytes;
@@ -529,18 +691,28 @@ static int32_t initResBlockInfo(SResultBlockInfo* pResBlockInfo, int64_t capacit
pResBlockInfo->freeBlock = false;
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void** ppReader, int32_t capacity,
SSDataBlock* pResBlock, const char* idstr) {
- int32_t code = 0;
- int8_t level = 0;
- STsdbReader* pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader));
- if (pReader == NULL) {
- code = terrno;
- goto _end;
- }
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int8_t level = 0;
+ STsdbReader* pReader = NULL;
+ SBlockLoadSuppInfo* pSup = NULL;
+
+ TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pCond, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(ppReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *ppReader = NULL;
+ pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader));
+ TSDB_CHECK_NULL(pReader, code, lino, _end, terrno);
if (VND_IS_TSMA(pVnode)) {
tsdbDebug("vgId:%d, tsma is selected to query, %s", TD_VID(pVnode), idstr);
@@ -552,13 +724,14 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void
pReader->info.suid = pCond->suid;
pReader->info.order = pCond->order;
pReader->info.verRange = getQueryVerRange(pVnode, pCond, level);
- pReader->info.window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
+ code = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows, &pReader->info.window);
+ TSDB_CHECK_CODE(code, lino, _end);
- pReader->idStr = (idstr != NULL) ? taosStrdup(idstr) : NULL;
- if (idstr != NULL && pReader->idStr == NULL) {
- code = terrno;
- goto _end;
+ if (idstr == NULL) {
+ idstr = "";
}
+ pReader->idStr = taosStrdup(idstr);
+ TSDB_CHECK_NULL(pReader->idStr, code, lino, _end, terrno);
pReader->type = pCond->type;
pReader->bFilesetDelimited = false;
@@ -566,81 +739,80 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void
if (pCond->numOfCols <= 0) {
tsdbError("vgId:%d, invalid column number %d in query cond, %s", TD_VID(pVnode), pCond->numOfCols, idstr);
- code = TSDB_CODE_INVALID_PARA;
- goto _end;
+ TSDB_CHECK_CONDITION(pCond->numOfCols > 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
}
// allocate buffer in order to load data blocks from file
- SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
+ pSup = &pReader->suppInfo;
pSup->tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
code = setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = initResBlockInfo(&pReader->resBlockInfo, capacity, pResBlock, pCond, pSup);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = tBlockDataCreate(&pReader->status.fileBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->suppInfo.colId[0] != PRIMARYKEY_TIMESTAMP_COL_ID) {
tsdbError("the first column isn't primary timestamp, %d, %s", pReader->suppInfo.colId[0], pReader->idStr);
- code = TSDB_CODE_INVALID_PARA;
- goto _end;
+ TSDB_CHECK_CONDITION(pReader->suppInfo.colId[0] == PRIMARYKEY_TIMESTAMP_COL_ID, code, lino, _end,
+ TSDB_CODE_INVALID_PARA);
}
pReader->status.pPrimaryTsCol = taosArrayGet(pReader->resBlockInfo.pResBlock->pDataBlock, pSup->slotId[0]);
- if (pReader->status.pPrimaryTsCol == NULL) {
- code = terrno;
- goto _end;
- }
+ TSDB_CHECK_NULL(pReader->status.pPrimaryTsCol, code, lino, _end, terrno);
int32_t type = pReader->status.pPrimaryTsCol->info.type;
if (type != TSDB_DATA_TYPE_TIMESTAMP) {
tsdbError("the first column isn't primary timestamp in result block, actual: %s, %s", tDataTypes[type].name,
pReader->idStr);
- code = TSDB_CODE_INVALID_PARA;
- goto _end;
+ TSDB_CHECK_CONDITION(type == TSDB_DATA_TYPE_TIMESTAMP, code, lino, _end, TSDB_CODE_INVALID_PARA);
}
code = tsdbInitReaderLock(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = tsem_init(&pReader->resumeAfterSuspend, 0, 0);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
*ppReader = pReader;
- return code;
+ pReader = NULL;
_end:
- tsdbReaderClose2(pReader);
- *ppReader = NULL;
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (pReader) {
+ tsdbReaderClose2(pReader);
+ }
return code;
}
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileReader, SArray* pIndexList) {
- int64_t st = taosGetTimestampUs();
- int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
- if (pFileReader == NULL) {
- return TSDB_CODE_SUCCESS;
- }
-
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t st = 0;
+ int32_t et1 = 0;
+ int32_t et2 = 0;
+ int32_t numOfTables = 0;
const TBrinBlkArray* pBlkArray = NULL;
+ STableUidList* pList = NULL;
+ SBrinBlk* pBrinBlk = NULL;
- int32_t code = tsdbDataFileReadBrinBlk(pFileReader, &pBlkArray);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ if (pFileReader == NULL) {
+ goto _end;
}
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pIndexList, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ st = taosGetTimestampUs();
+ numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
+
+ code = tsdbDataFileReadBrinBlk(pFileReader, &pBlkArray);
+ TSDB_CHECK_CODE(code, lino, _end);
+
#if 0
LRUHandle* handle = NULL;
@@ -659,12 +831,12 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead
#endif
// todo binary search to the start position
- int64_t et1 = taosGetTimestampUs();
+ et1 = taosGetTimestampUs();
- SBrinBlk* pBrinBlk = NULL;
- STableUidList* pList = &pReader->status.uidList;
+ pList = &pReader->status.uidList;
int32_t i = 0;
+ int32_t j = 0;
while (i < TARRAY2_SIZE(pBlkArray)) {
pBrinBlk = &pBlkArray->data[i];
if (pBrinBlk->maxTbid.suid < pReader->info.suid) {
@@ -676,11 +848,11 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead
break;
}
- if (!(pBrinBlk->minTbid.suid <= pReader->info.suid && pBrinBlk->maxTbid.suid >= pReader->info.suid)) {
- tsdbError("tsdb failed at: %s %d", __func__, __LINE__);
- return TSDB_CODE_INTERNAL_ERROR;
- }
- if (pBrinBlk->maxTbid.suid == pReader->info.suid && pBrinBlk->maxTbid.uid < pList->tableUidList[0]) {
+ TSDB_CHECK_CONDITION(
+ (pBrinBlk->minTbid.suid <= pReader->info.suid) && (pBrinBlk->maxTbid.suid >= pReader->info.suid), code, lino,
+ _end, TSDB_CODE_INTERNAL_ERROR);
+
+ if (pBrinBlk->maxTbid.suid == pReader->info.suid && pBrinBlk->maxTbid.uid < pList->tableUidList[j]) {
i += 1;
continue;
}
@@ -689,47 +861,63 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead
break;
}
- void* p1 = taosArrayPush(pIndexList, pBrinBlk);
- if (p1 == NULL) {
- return terrno;
- }
+ const void* p1 = taosArrayPush(pIndexList, pBrinBlk);
+ TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
i += 1;
+ if (pBrinBlk->maxTbid.suid == pReader->info.suid) {
+ while (j < numOfTables && pList->tableUidList[j] < pBrinBlk->maxTbid.uid) {
+ j++;
+ }
+ if (j >= numOfTables) {
+ break;
+ }
+ }
}
- int64_t et2 = taosGetTimestampUs();
+ et2 = taosGetTimestampUs();
tsdbDebug("load block index for %d/%d tables completed, elapsed time:%.2f ms, set BrinBlk:%.2f ms, size:%.2f Kb %s",
numOfTables, (int32_t)pBlkArray->size, (et1 - st) / 1000.0, (et2 - et1) / 1000.0,
pBlkArray->size * sizeof(SBrinBlk) / 1024.0, pReader->idStr);
pReader->cost.headFileLoadTime += (et1 - st) / 1000.0;
-//_end:
- // tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, SBlockNumber* pBlockNum,
SArray* pTableScanInfoList) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int64_t st = 0;
+ bool asc = false;
+ STimeWindow w = {0};
+ SBrinRecordIter iter = {0};
+ int32_t numOfTables = 0;
+ SBrinRecord* pRecord = NULL;
int32_t k = 0;
size_t sizeInDisk = 0;
- int64_t st = taosGetTimestampUs();
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- STimeWindow w = pReader->info.window;
- SBrinRecord* pRecord = NULL;
- int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
- SBrinRecordIter iter = {0};
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockNum, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pTableScanInfoList, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ st = taosGetTimestampUs();
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ w = pReader->info.window;
+ numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
// clear info for the new file
cleanupInfoForNextFileset(pReader->status.pTableMap);
initBrinRecordIter(&iter, pReader->pFileReader, pIndexList);
while (1) {
- int32_t code = getNextBrinRecord(&iter, &pRecord);
- if (code != TSDB_CODE_SUCCESS) {
- clearBrinBlockIter(&iter);
- return code;
- }
+ code = getNextBrinRecord(&iter, &pRecord);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pRecord == NULL) {
break;
@@ -760,18 +948,12 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
continue;
}
- if (!(pRecord->suid == pReader->info.suid && uid == pRecord->uid)) {
- tsdbError("tsdb failed at: %s:%d", __func__, __LINE__);
- clearBrinBlockIter(&iter);
- return TSDB_CODE_INTERNAL_ERROR;
- }
+ TSDB_CHECK_CONDITION((pRecord->suid == pReader->info.suid) && (uid == pRecord->uid), code, lino, _end,
+ TSDB_CODE_INTERNAL_ERROR);
STableBlockScanInfo* pScanInfo = NULL;
code = getTableBlockScanInfo(pReader->status.pTableMap, uid, &pScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- clearBrinBlockIter(&iter);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// here we should find the first timestamp that is greater than the lastProcKey
// the window is an open interval NOW.
@@ -813,31 +995,21 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
if (pScanInfo->pBlockList == NULL) {
pScanInfo->pBlockList = taosArrayInit(4, sizeof(SFileDataBlockInfo));
- if (pScanInfo->pBlockList == NULL) {
- clearBrinBlockIter(&iter);
- return terrno;
- }
+ TSDB_CHECK_NULL(pScanInfo->pBlockList, code, lino, _end, terrno);
}
if (pScanInfo->pBlockIdxList == NULL) {
pScanInfo->pBlockIdxList = taosArrayInit(4, sizeof(STableDataBlockIdx));
- if (pScanInfo->pBlockIdxList == NULL) {
- clearBrinBlockIter(&iter);
- return terrno;
- }
+ TSDB_CHECK_NULL(pScanInfo->pBlockIdxList, code, lino, _end, terrno);
}
SFileDataBlockInfo blockInfo = {.tbBlockIdx = TARRAY_SIZE(pScanInfo->pBlockList)};
code = recordToBlockInfo(&blockInfo, pRecord);
- if (code != TSDB_CODE_SUCCESS) {
- clearBrinBlockIter(&iter);
- return code;
- }
- void* p1 = taosArrayPush(pScanInfo->pBlockList, &blockInfo);
- if (p1 == NULL) {
- clearBrinBlockIter(&iter);
- return terrno;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ sizeInDisk += blockInfo.blockSize;
+
+ const void* p1 = taosArrayPush(pScanInfo->pBlockList, &blockInfo);
+ TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
// todo: refactor to record the fileset skey/ekey
if (pScanInfo->filesetWindow.skey > pRecord->firstKey.key.ts) {
@@ -851,27 +1023,18 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
pBlockNum->numOfBlocks += 1;
if (taosArrayGetSize(pTableScanInfoList) == 0) {
p1 = taosArrayPush(pTableScanInfoList, &pScanInfo);
+ TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
} else {
STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList);
- if (p == NULL) {
- clearBrinBlockIter(&iter);
- tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr);
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
if ((*p)->uid != uid) {
p1 = taosArrayPush(pTableScanInfoList, &pScanInfo);
+ TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
}
}
-
- if (p1 == NULL) {
- clearBrinBlockIter(&iter);
- return terrno;
- }
}
- clearBrinBlockIter(&iter);
-
pBlockNum->numOfSttFiles = pReader->status.pCurrentFileset->lvlArr->size;
int32_t total = pBlockNum->numOfSttFiles + pBlockNum->numOfBlocks;
@@ -885,43 +1048,69 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
pReader->cost.numOfBlocks += total;
pReader->cost.headFileLoadTime += el;
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ clearBrinBlockIter(&iter);
+ return code;
}
static void setBlockAllDumped(SFileBlockDumpInfo* pDumpInfo, int64_t maxKey, int32_t order) {
- pDumpInfo->allDumped = true;
+ if (pDumpInfo != NULL) {
+ pDumpInfo->allDumped = true;
+ }
}
-static void updateLastKeyInfo(SRowKey* pKey, SFileDataBlockInfo* pBlockInfo, SDataBlockInfo* pInfo, int32_t numOfPks,
- bool asc) {
+static int32_t updateLastKeyInfo(SRowKey* pKey, SFileDataBlockInfo* pBlockInfo, SDataBlockInfo* pInfo, int32_t numOfPks,
+ bool asc) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pKey->ts = asc ? pInfo->window.ekey : pInfo->window.skey;
pKey->numOfPKs = numOfPks;
if (pKey->numOfPKs <= 0) {
- return;
+ goto _end;
}
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (IS_NUMERIC_TYPE(pKey->pks[0].type)) {
pKey->pks[0].val = asc ? pBlockInfo->lastPk.val : pBlockInfo->firstPk.val;
} else {
uint8_t* p = asc ? pBlockInfo->lastPk.pData : pBlockInfo->firstPk.pData;
pKey->pks[0].nData = asc ? varDataLen(pBlockInfo->lastPk.pData) : varDataLen(pBlockInfo->firstPk.pData);
- (void)memcpy(pKey->pks[0].pData, p, pKey->pks[0].nData);
+ TAOS_MEMCPY(pKey->pks[0].pData, p, pKey->pks[0].nData);
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_t colIndex, SColVal* pColVal,
SBlockLoadSuppInfo* pSup) {
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pColVal, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (IS_VAR_DATA_TYPE(pColVal->value.type)) {
if (!COL_VAL_IS_VALUE(pColVal)) {
colDataSetNULL(pColInfoData, rowIndex);
} else {
+ TSDB_CHECK_NULL(pSup, code, lino, _end, TSDB_CODE_INVALID_PARA);
varDataSetLen(pSup->buildBuf[colIndex], pColVal->value.nData);
if ((pColVal->value.nData + VARSTR_HEADER_SIZE) > pColInfoData->info.bytes) {
tsdbWarn("column cid:%d actual data len %d is bigger than schema len %d", pColVal->cid, pColVal->value.nData,
pColInfoData->info.bytes);
- return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
+ code = TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pColVal->value.nData > 0) { // pData may be null, if nData is 0
@@ -929,31 +1118,46 @@ static int32_t doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int
}
code = colDataSetVal(pColInfoData, rowIndex, pSup->buildBuf[colIndex], false);
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else {
code = colDataSetVal(pColInfoData, rowIndex, (const char*)&pColVal->value.val, !COL_VAL_IS_VALUE(pColVal));
+ TSDB_CHECK_CODE(code, lino, _end);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t getCurrentBlockInfo(SDataBlockIter* pBlockIter, SFileDataBlockInfo** pInfo, const char* id) {
- *pInfo = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
- if (pBlockIter->blockList == NULL) {
- return TSDB_CODE_FAILED;
- }
+ TSDB_CHECK_NULL(pBlockIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockIter->blockList, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *pInfo = NULL;
size_t num = TARRAY_SIZE(pBlockIter->blockList);
if (num == 0) {
- if (num != pBlockIter->numOfBlocks) {
- tsdbError("tsdb read failed at: %s:%d %s", __func__, __LINE__, id);
- }
- return TSDB_CODE_FAILED;
+ // Some callers would attempt to call this function. Filter out certain normal cases and return directly to avoid
+ // generating excessive unnecessary error logs.
+ TSDB_CHECK_CONDITION(num == pBlockIter->numOfBlocks, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ return TSDB_CODE_INVALID_PARA;
}
*pInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index);
- return (*pInfo) != NULL? TSDB_CODE_SUCCESS:TSDB_CODE_FAILED;
+ TSDB_CHECK_NULL(*pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t doBinarySearchKey(const TSKEY* keyList, int num, int pos, TSKEY key, int order) {
@@ -962,7 +1166,7 @@ static int32_t doBinarySearchKey(const TSKEY* keyList, int num, int pos, TSKEY k
s = pos;
// check
- if (!(pos >= 0 && pos < num && num > 0)) {
+ if (!(keyList != NULL && pos >= 0 && pos < num && num > 0)) {
return -1;
}
if (order == TSDB_ORDER_ASC) {
@@ -1025,8 +1229,15 @@ static int32_t findFirstPos(const int64_t* pTsList, int32_t num, int32_t startPo
static int32_t getEndPosInDataBlock(STsdbReader* pReader, SBlockData* pBlockData, SBrinRecord* pRecord, int32_t pos) {
// NOTE: reverse the order to find the end position in data block
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
int32_t endPos = -1;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ bool asc = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRecord, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
if (asc && pReader->info.window.ekey >= pRecord->lastKey.key.ts) {
endPos = pRecord->numRow - 1;
@@ -1036,7 +1247,7 @@ static int32_t getEndPosInDataBlock(STsdbReader* pReader, SBlockData* pBlockData
int64_t key = asc ? pReader->info.window.ekey : pReader->info.window.skey;
endPos = doBinarySearchKey(pBlockData->aTSKEY, pRecord->numRow, pos, key, pReader->info.order);
if (endPos == -1) {
- return endPos;
+ goto _end;
}
endPos = findFirstPos(pBlockData->aTSKEY, pRecord->numRow, endPos, asc);
@@ -1063,16 +1274,28 @@ static int32_t getEndPosInDataBlock(STsdbReader* pReader, SBlockData* pBlockData
endPos = i;
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return endPos;
}
-static void copyPrimaryTsCol(const SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo, SColumnInfoData* pColData,
- int32_t dumpedRows, bool asc) {
+static int32_t copyPrimaryTsCol(const SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo, SColumnInfoData* pColData,
+ int32_t dumpedRows, bool asc) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_CONDITION((pBlockData != NULL) && (pBlockData->aTSKEY != NULL), code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pDumpInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(dumpedRows >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
if (asc) {
- (void)memcpy(pColData->pData, &pBlockData->aTSKEY[pDumpInfo->rowIndex], dumpedRows * sizeof(int64_t));
+ TAOS_MEMCPY(pColData->pData, &pBlockData->aTSKEY[pDumpInfo->rowIndex], dumpedRows * sizeof(int64_t));
} else {
int32_t startIndex = pDumpInfo->rowIndex - dumpedRows + 1;
- (void)memcpy(pColData->pData, &pBlockData->aTSKEY[startIndex], dumpedRows * sizeof(int64_t));
+ TAOS_MEMCPY(pColData->pData, &pBlockData->aTSKEY[startIndex], dumpedRows * sizeof(int64_t));
// todo: opt perf by extract the loop
// reverse the array list
@@ -1084,12 +1307,28 @@ static void copyPrimaryTsCol(const SBlockData* pBlockData, SFileBlockDumpInfo* p
pts[dumpedRows - j - 1] = t;
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// a faster version of copy procedure.
-static void copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo, SColumnInfoData* pColData,
- int32_t dumpedRows, bool asc) {
+static int32_t copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo, SColumnInfoData* pColData,
+ int32_t dumpedRows, bool asc) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
uint8_t* p = NULL;
+ int32_t step = asc ? 1 : -1;
+
+ TSDB_CHECK_NULL(pData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pDumpInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(dumpedRows >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ TSDB_CHECK_CONDITION(pData->type < TSDB_DATA_TYPE_MAX, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (asc) {
p = pData->pData + tDataTypes[pData->type].bytes * pDumpInfo->rowIndex;
} else {
@@ -1097,12 +1336,10 @@ static void copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo
p = pData->pData + tDataTypes[pData->type].bytes * startIndex;
}
- int32_t step = asc ? 1 : -1;
-
// make sure it is aligned to 8bit, the allocated memory address is aligned to 256bit
// 1. copy data in a batch model
- (void)memcpy(pColData->pData, p, dumpedRows * tDataTypes[pData->type].bytes);
+ TAOS_MEMCPY(pColData->pData, p, dumpedRows * tDataTypes[pData->type].bytes);
// 2. reverse the array list in case of descending order scan data block
if (!asc) {
@@ -1173,6 +1410,12 @@ static void copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo
}
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void blockInfoToRecord(SBrinRecord* record, SFileDataBlockInfo* pBlockInfo, SBlockLoadSuppInfo* pSupp) {
@@ -1211,38 +1454,54 @@ static void blockInfoToRecord(SBrinRecord* record, SFileDataBlockInfo* pBlockInf
}
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastProcKey) {
- SReaderStatus* pStatus = &pReader->status;
- SDataBlockIter* pBlockIter = &pStatus->blockIter;
- SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
-
- SBlockData* pBlockData = &pStatus->fileBlockData;
- SFileDataBlockInfo* pBlockInfo = NULL;
- SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
- int32_t numOfOutputCols = pSupInfo->numOfCols;
int32_t code = TSDB_CODE_SUCCESS;
- int64_t st = taosGetTimestampUs();
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- int32_t step = asc ? 1 : -1;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SDataBlockIter* pBlockIter = NULL;
+ SBlockLoadSuppInfo* pSupInfo = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SBlockData* pBlockData = NULL;
+ SFileDataBlockInfo* pBlockInfo = NULL;
+ SSDataBlock* pResBlock = NULL;
+ int32_t numOfOutputCols = 0;
+ int64_t st = 0;
+ bool asc = false;
+ int32_t step = 0;
+ SColVal cv = {0};
+ SBrinRecord tmp;
+ SBrinRecord* pRecord = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pBlockIter = &pStatus->blockIter;
+ pSupInfo = &pReader->suppInfo;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ pBlockData = &pStatus->fileBlockData;
+ pResBlock = pReader->resBlockInfo.pResBlock;
+ numOfOutputCols = pSupInfo->numOfCols;
+ st = taosGetTimestampUs();
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ step = asc ? 1 : -1;
code = getCurrentBlockInfo(pBlockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- SColVal cv = {0};
- SBrinRecord tmp;
blockInfoToRecord(&tmp, pBlockInfo, pSupInfo);
- SBrinRecord* pRecord = &tmp;
+ pRecord = &tmp;
// no data exists, return directly.
if (pBlockData->nRow == 0 || pBlockData->aTSKEY == 0) {
tsdbWarn("%p no need to copy since no data in blockData, table uid:%" PRIu64 " has been dropped, %s", pReader,
pBlockInfo->uid, pReader->idStr);
pResBlock->info.rows = 0;
- return 0;
+ goto _end;
}
+ TSDB_CHECK_CONDITION((pDumpInfo->rowIndex >= 0) && (pDumpInfo->rowIndex < pRecord->numRow), code, lino, _end,
+ TSDB_CODE_INVALID_PARA);
+
// row index of dump info remain the initial position, let's find the appropriate start position.
if (((pDumpInfo->rowIndex == 0) && asc) || ((pDumpInfo->rowIndex == (pRecord->numRow - 1)) && (!asc))) {
if (asc && pReader->info.window.skey <= pRecord->firstKey.key.ts &&
@@ -1263,14 +1522,15 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
"-%" PRId64 ", minVer:%" PRId64 ", maxVer:%" PRId64 " %s",
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pRecord->firstKey.key.ts, pRecord->lastKey.key.ts,
pRecord->minVer, pRecord->maxVer, pReader->idStr);
- return TSDB_CODE_INVALID_PARA;
+ code = TSDB_CODE_INVALID_PARA;
+ TSDB_CHECK_CODE(code, lino, _end);
}
pDumpInfo->rowIndex = findFirstPos(pBlockData->aTSKEY, pRecord->numRow, pDumpInfo->rowIndex, (!asc));
- if (!(pReader->info.verRange.minVer <= pRecord->maxVer && pReader->info.verRange.maxVer >= pRecord->minVer)) {
- tsdbError("tsdb failed at: %s:%d", __func__, __LINE__);
- return TSDB_CODE_INVALID_PARA;
- }
+
+ TSDB_CHECK_CONDITION(
+ (pReader->info.verRange.minVer <= pRecord->maxVer && pReader->info.verRange.maxVer >= pRecord->minVer), code,
+ lino, _end, TSDB_CODE_INVALID_PARA);
// find the appropriate start position that satisfies the version requirement.
if ((pReader->info.verRange.maxVer >= pRecord->minVer && pReader->info.verRange.maxVer < pRecord->maxVer) ||
@@ -1299,7 +1559,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
int32_t endIndex = getEndPosInDataBlock(pReader, pBlockData, pRecord, pDumpInfo->rowIndex);
if (endIndex == -1) {
setBlockAllDumped(pDumpInfo, pReader->info.window.ekey, pReader->info.order);
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
endIndex += step;
@@ -1308,19 +1568,18 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
dumpedRows = pReader->resBlockInfo.capacity;
} else if (dumpedRows <= 0) { // no qualified rows in current data block, quit directly.
setBlockAllDumped(pDumpInfo, pReader->info.window.ekey, pReader->info.order);
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
int32_t i = 0;
int32_t rowIndex = 0;
SColumnInfoData* pColData = taosArrayGet(pResBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (pSupInfo->colId[i] == PRIMARYKEY_TIMESTAMP_COL_ID) {
- copyPrimaryTsCol(pBlockData, pDumpInfo, pColData, dumpedRows, asc);
+ code = copyPrimaryTsCol(pBlockData, pDumpInfo, pColData, dumpedRows, asc);
+ TSDB_CHECK_CODE(code, lino, _end);
i += 1;
}
@@ -1334,22 +1593,19 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
colIndex += 1;
} else if (pData->cid == pSupInfo->colId[i]) {
pColData = taosArrayGet(pResBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (pData->flag == HAS_NONE || pData->flag == HAS_NULL || pData->flag == (HAS_NULL | HAS_NONE)) {
colDataSetNNULL(pColData, 0, dumpedRows);
} else {
if (IS_MATHABLE_TYPE(pColData->info.type)) {
- copyNumericCols(pData, pDumpInfo, pColData, dumpedRows, asc);
+ code = copyNumericCols(pData, pDumpInfo, pColData, dumpedRows, asc);
+ TSDB_CHECK_CODE(code, lino, _end);
} else { // varchar/nchar type
for (int32_t j = pDumpInfo->rowIndex; rowIndex < dumpedRows; j += step) {
tColDataGetValue(pData, j, &cv);
code = doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
}
@@ -1358,9 +1614,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
i += 1;
} else { // the specified column does not exist in file block, fill with null data
pColData = taosArrayGet(pResBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
colDataSetNNULL(pColData, 0, dumpedRows);
i += 1;
@@ -1370,9 +1624,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
// fill the mis-matched columns with null value
while (i < numOfOutputCols) {
pColData = taosArrayGet(pResBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
colDataSetNNULL(pColData, 0, dumpedRows);
i += 1;
@@ -1406,63 +1658,76 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pRecord->firstKey.key.ts, pRecord->lastKey.key.ts,
dumpedRows, unDumpedRows, pRecord->minVer, pRecord->maxVer, pBlockInfo->uid, elapsedTime, pReader->idStr);
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static FORCE_INLINE STSchema* getTableSchemaImpl(STsdbReader* pReader, uint64_t uid) {
- if (pReader->info.pSchema != NULL) {
- terrno = TSDB_CODE_INVALID_PARA;
- tsdbError("tsdb invalid input param at: %s:%d", __func__, __LINE__);
- return NULL;
- }
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
- int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->info.suid, uid, -1, &pReader->info.pSchema);
+ TSDB_CHECK_CONDITION((pReader != NULL) && (pReader->info.pSchema == NULL), code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->info.suid, uid, -1, &pReader->info.pSchema);
if (code != TSDB_CODE_SUCCESS || pReader->info.pSchema == NULL) {
terrno = code;
tsdbError("failed to get table schema, uid:%" PRIu64 ", it may have been dropped, ver:-1, %s", uid, pReader->idStr);
- return NULL;
}
+ TSDB_CHECK_CODE(code, lino, _end);
+ TSDB_CHECK_NULL(pReader->info.pSchema, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
code = tsdbRowMergerInit(&pReader->status.merger, pReader->info.pSchema);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
terrno = code;
- tsdbError("failed to init merger, code:%s, %s", tstrerror(code), pReader->idStr);
return NULL;
}
-
return pReader->info.pSchema;
}
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData,
uint64_t uid) {
- int32_t code = 0;
- STSchema* pSchema = pReader->info.pSchema;
- int64_t st = taosGetTimestampUs();
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STSchema* pSchema = NULL;
SFileDataBlockInfo* pBlockInfo = NULL;
- SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
+ SBlockLoadSuppInfo* pSup = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ int64_t st = 0;
+ SBrinRecord tmp;
+ SBrinRecord* pRecord = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pSchema = pReader->info.pSchema;
+ st = taosGetTimestampUs();
+ pSup = &pReader->suppInfo;
tBlockDataReset(pBlockData);
if (pReader->info.pSchema == NULL) {
pSchema = getTableSchemaImpl(pReader, uid);
if (pSchema == NULL) {
- code = terrno;
tsdbError("%p table uid:%" PRIu64 " failed to get tableschema, code:%s, %s", pReader, uid, tstrerror(code),
pReader->idStr);
- return code;
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, terrno);
}
}
code = getCurrentBlockInfo(pBlockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
- SBrinRecord tmp;
blockInfoToRecord(&tmp, pBlockInfo, pSup);
- SBrinRecord* pRecord = &tmp;
+ pRecord = &tmp;
code = tsdbDataFileReadBlockDataByColumn(pReader->pFileReader, pRecord, pBlockData, pSchema, &pSup->colId[1],
pSup->numOfCols - 1);
if (code != TSDB_CODE_SUCCESS) {
@@ -1470,7 +1735,7 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
", rows:%d, code:%s %s",
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->firstKey, pBlockInfo->lastKey,
pBlockInfo->numRow, tstrerror(code), pReader->idStr);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
@@ -1483,6 +1748,10 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
pReader->cost.blockLoadTime += elapsedTime;
pDumpInfo->allDumped = false;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -1496,69 +1765,83 @@ static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* p
(pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer);
}
-static bool getNeighborBlockOfTable(SDataBlockIter* pBlockIter, SFileDataBlockInfo* pBlockInfo,
- STableBlockScanInfo* pScanInfo, int32_t* nextIndex, int32_t order,
- SBrinRecord* pRecord, SBlockLoadSuppInfo* pSupInfo) {
- bool asc = ASCENDING_TRAVERSE(order);
- int32_t step = asc ? 1 : -1;
+static int32_t getNeighborBlockOfTable(SDataBlockIter* pBlockIter, SFileDataBlockInfo* pBlockInfo,
+ STableBlockScanInfo* pScanInfo, int32_t* nextIndex, int32_t order,
+ SBrinRecord* pRecord, SBlockLoadSuppInfo* pSupInfo, bool* res) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool asc = false;
+ int32_t step = 0;
+ STableDataBlockIdx* pTableDataBlockIdx = NULL;
+ SFileDataBlockInfo* p = NULL;
+
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(res, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *res = false;
+ asc = ASCENDING_TRAVERSE(order);
+ step = asc ? 1 : -1;
if (asc && pBlockInfo->tbBlockIdx >= taosArrayGetSize(pScanInfo->pBlockIdxList) - 1) {
- return false;
+ *res = false;
+ } else if (!asc && pBlockInfo->tbBlockIdx == 0) {
+ *res = false;
+ } else {
+ TSDB_CHECK_NULL(pBlockIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(nextIndex, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRecord, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pSupInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pTableDataBlockIdx = taosArrayGet(pScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step);
+ TSDB_CHECK_NULL(pTableDataBlockIdx, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex);
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ blockInfoToRecord(pRecord, p, pSupInfo);
+
+ *nextIndex = pBlockInfo->tbBlockIdx + step;
+ *res = true;
}
- if (!asc && pBlockInfo->tbBlockIdx == 0) {
- return false;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
-
- STableDataBlockIdx* pTableDataBlockIdx = taosArrayGet(pScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step);
- if (pTableDataBlockIdx == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
-
- SFileDataBlockInfo* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex);
- if (p == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
-
- blockInfoToRecord(pRecord, p, pSupInfo);
-
- *nextIndex = pBlockInfo->tbBlockIdx + step;
- return true;
+ return code;
}
static int32_t setFileBlockActiveInBlockIter(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t index,
int32_t step) {
- int32_t code = TSDB_CODE_SUCCESS;
- if (index < 0 || index >= pBlockIter->numOfBlocks) {
- return TSDB_CODE_FAILED;
- }
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ const void* p = NULL;
+ SFileDataBlockInfo fblock;
+ SFileDataBlockInfo* pBlockInfo = NULL;
+ STableBlockScanInfo* pBlockScanInfo = NULL;
+ STableDataBlockIdx* pTableDataBlockIdx = NULL;
- void* p = taosArrayGet(pBlockIter->blockList, index);
- if (p == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pBlockIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION((index >= 0) && (index < pBlockIter->numOfBlocks), code, lino, _end, TSDB_CODE_INVALID_PARA);
- SFileDataBlockInfo fblock = *(SFileDataBlockInfo*) p;
+ p = taosArrayGet(pBlockIter->blockList, index);
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ fblock = *(SFileDataBlockInfo*)p;
pBlockIter->index += step;
if (index != pBlockIter->index) {
if (index > pBlockIter->index) {
for (int32_t i = index - 1; i >= pBlockIter->index; --i) {
- SFileDataBlockInfo* pBlockInfo = taosArrayGet(pBlockIter->blockList, i);
- if (pBlockInfo == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pBlockInfo = taosArrayGet(pBlockIter->blockList, i);
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
- STableBlockScanInfo* pBlockScanInfo = NULL;
code = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, &pBlockScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- STableDataBlockIdx* pTableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx);
- if (pTableDataBlockIdx == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pTableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx);
+ TSDB_CHECK_NULL(pTableDataBlockIdx, code, lino, _end, TSDB_CODE_INVALID_PARA);
pTableDataBlockIdx->globalIndex = i + 1;
@@ -1566,21 +1849,14 @@ static int32_t setFileBlockActiveInBlockIter(STsdbReader* pReader, SDataBlockIte
}
} else if (index < pBlockIter->index) {
for (int32_t i = index + 1; i <= pBlockIter->index; ++i) {
- SFileDataBlockInfo* pBlockInfo = taosArrayGet(pBlockIter->blockList, i);
- if (pBlockInfo == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pBlockInfo = taosArrayGet(pBlockIter->blockList, i);
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
- STableBlockScanInfo* pBlockScanInfo = NULL;
code = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, &pBlockScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- STableDataBlockIdx* pTableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx);
- if (pTableDataBlockIdx == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pTableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx);
+ TSDB_CHECK_NULL(pTableDataBlockIdx, code, lino, _end, TSDB_CODE_INVALID_PARA);
pTableDataBlockIdx->globalIndex = i - 1;
taosArraySet(pBlockIter->blockList, i - 1, pBlockInfo);
@@ -1588,21 +1864,21 @@ static int32_t setFileBlockActiveInBlockIter(STsdbReader* pReader, SDataBlockIte
}
taosArraySet(pBlockIter->blockList, pBlockIter->index, &fblock);
- STableBlockScanInfo* pBlockScanInfo = NULL;
+ pBlockScanInfo = NULL;
code = getTableBlockScanInfo(pReader->status.pTableMap, fblock.uid, &pBlockScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- STableDataBlockIdx* pTableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, fblock.tbBlockIdx);
- if (pTableDataBlockIdx == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pTableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, fblock.tbBlockIdx);
+ TSDB_CHECK_NULL(pTableDataBlockIdx, code, lino, _end, TSDB_CODE_INVALID_PARA);
pTableDataBlockIdx->globalIndex = pBlockIter->index;
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// todo: this attribute could be acquired during extractin the global ordered block list.
@@ -1672,15 +1948,26 @@ static bool keyOverlapFileBlock(TSDBKEY key, SFileDataBlockInfo* pBlock, SVersio
(pBlock->minVer <= pVerRange->maxVer);
}
-static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo* pBlockInfo,
- STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, STsdbReader* pReader) {
+static int32_t getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo* pBlockInfo,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SBrinRecord rec = {0};
int32_t neighborIndex = 0;
- int32_t order = pReader->info.order;
- SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
+ int32_t order = 0;
+ SBlockLoadSuppInfo* pSupInfo = NULL;
+ SBrinRecord pRecord;
+ bool hasNeighbor = false;
- bool hasNeighbor =
- getNeighborBlockOfTable(&pReader->status.blockIter, pBlockInfo, pScanInfo, &neighborIndex, order, &rec, pSupInfo);
+ TSDB_CHECK_NULL(pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ order = pReader->info.order;
+ pSupInfo = &pReader->suppInfo;
+
+ code = getNeighborBlockOfTable(&pReader->status.blockIter, pBlockInfo, pScanInfo, &neighborIndex, order, &rec,
+ pSupInfo, &hasNeighbor);
+ TSDB_CHECK_CODE(code, lino, _end);
// overlap with neighbor
if (hasNeighbor) {
@@ -1688,7 +1975,6 @@ static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo*
overlapWithNeighborBlock2(pBlockInfo, &rec, order, pSupInfo->pk.type, pSupInfo->numOfPks);
}
- SBrinRecord pRecord;
blockInfoToRecord(&pRecord, pBlockInfo, pSupInfo);
// has duplicated ts of different version in this block
@@ -1704,6 +1990,12 @@ static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo*
pInfo->moreThanCapcity = pBlockInfo->numRow > pReader->resBlockInfo.capacity;
pInfo->partiallyRequired = dataBlockPartiallyRequired(&pReader->info.window, &pReader->info.verRange, pBlockInfo);
pInfo->overlapWithKeyInBuf = keyOverlapFileBlock(keyInBuf, pBlockInfo, &pReader->info.verRange);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// 1. the version of all rows should be less than the endVersion
@@ -1712,17 +2004,23 @@ static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo*
// 4. output buffer should be large enough to hold all rows in current block
// 5. delete info should not overlap with current block data
// 6. current block should not contain the duplicated ts
-static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pScanInfo,
- TSDBKEY keyInBuf) {
+static int32_t fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pScanInfo,
+ TSDBKEY keyInBuf, bool* load) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SDataBlockToLoadInfo info = {0};
- getBlockToLoadInfo(&info, pBlockInfo, pScanInfo, keyInBuf, pReader);
- bool loadDataBlock =
- (info.overlapWithNeighborBlock || info.hasDupTs || info.partiallyRequired || info.overlapWithKeyInBuf ||
- info.moreThanCapcity || info.overlapWithDelInfo || info.overlapWithSttBlock);
+ TSDB_CHECK_NULL(load, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *load = false;
+ code = getBlockToLoadInfo(&info, pBlockInfo, pScanInfo, keyInBuf, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ *load = (info.overlapWithNeighborBlock || info.hasDupTs || info.partiallyRequired || info.overlapWithKeyInBuf ||
+ info.moreThanCapcity || info.overlapWithDelInfo || info.overlapWithSttBlock);
// log the reason why load the datablock for profile
- if (loadDataBlock) {
+ if (*load) {
tsdbDebug("%p uid:%" PRIu64
" need to load the datablock, overlapneighbor:%d, hasDup:%d, partiallyRequired:%d, "
"overlapWithKey:%d, greaterThanBuf:%d, overlapWithDel:%d, overlapWithSttBlock:%d, %s",
@@ -1731,45 +2029,76 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlock
pReader->idStr);
}
- return loadDataBlock;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
-static bool isCleanFileDataBlock(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pScanInfo,
- TSDBKEY keyInBuf) {
+static int32_t isCleanFileDataBlock(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, bool* res) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SDataBlockToLoadInfo info = {0};
- getBlockToLoadInfo(&info, pBlockInfo, pScanInfo, keyInBuf, pReader);
- bool isCleanFileBlock = !(info.overlapWithNeighborBlock || info.hasDupTs || info.overlapWithKeyInBuf ||
- info.overlapWithDelInfo || info.overlapWithSttBlock);
- return isCleanFileBlock;
+
+ TSDB_CHECK_NULL(res, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *res = false;
+ code = getBlockToLoadInfo(&info, pBlockInfo, pScanInfo, keyInBuf, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ *res = !(info.overlapWithNeighborBlock || info.hasDupTs || info.overlapWithKeyInBuf || info.overlapWithDelInfo ||
+ info.overlapWithSttBlock);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t initRowMergeIfNeeded(STsdbReader* pReader, int64_t uid) {
- SRowMerger* pMerger = &pReader->status.merger;
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowMerger* pMerger = NULL;
+ STSchema* ps = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
if (pMerger->pArray == NULL) {
- STSchema* ps = getTableSchemaImpl(pReader, uid);
- if (ps == NULL) {
- return terrno;
- }
+ ps = getTableSchemaImpl(pReader, uid);
+ TSDB_CHECK_NULL(ps, code, lino, _end, terrno);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, int64_t endKey) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int64_t st = 0;
+ SSDataBlock* pBlock = NULL;
+
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
if (!(pBlockScanInfo->iiter.hasVal || pBlockScanInfo->iter.hasVal)) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
- int32_t code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
- if (code != 0) {
- return code;
- }
+ code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
+ TSDB_CHECK_CODE(code, lino, _end);
- int64_t st = taosGetTimestampUs();
- SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
+ st = taosGetTimestampUs();
+ pBlock = pReader->resBlockInfo.pResBlock;
code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
double el = (taosGetTimestampUs() - st) / 1000.0;
updateComposedBlockInfo(pReader, el, pBlockScanInfo);
@@ -1780,43 +2109,65 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
pReader->idStr);
pReader->cost.buildmemBlock += el;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
-static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, SRowKey* pKey,
- SFileBlockDumpInfo* pDumpInfo, bool* copied) {
+static int32_t tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, SRowKey* pKey,
+ SFileBlockDumpInfo* pDumpInfo, bool* copied) {
// opt version
// 1. it is not a border point
// 2. the direct next point is not an duplicated timestamp
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool asc = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pDumpInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(copied, code, lino, _end, TSDB_CODE_INVALID_PARA);
*copied = false;
- bool asc = (pReader->info.order == TSDB_ORDER_ASC);
+ asc = (pReader->info.order == TSDB_ORDER_ASC);
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && asc) || (pDumpInfo->rowIndex > 0 && (!asc))) {
int32_t step = ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1;
SRowKey nextRowKey;
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
tColRowGetKey(pBlockData, pDumpInfo->rowIndex + step, &nextRowKey);
if (pkCompEx(pKey, &nextRowKey) != 0) { // merge is not needed
code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pDumpInfo->rowIndex += step;
*copied = true;
}
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, int32_t pkSrcSlot,
SVersionRange* pVerRange) {
- int32_t code = 0;
- int32_t order = pSttBlockReader->order;
- int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1;
- SRowKey* pNextProc = &pScanInfo->sttKeyInfo.nextProcKey;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t order = 0;
+ int32_t step = 0;
+ SRowKey* pNextProc = NULL;
+
+ TSDB_CHECK_NULL(pSttBlockReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ order = pSttBlockReader->order;
+ step = ASCENDING_TRAVERSE(order) ? 1 : -1;
+ pNextProc = &pScanInfo->sttKeyInfo.nextProcKey;
while (1) {
bool hasVal = false;
@@ -1824,7 +2175,7 @@ static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBloc
if (code) {
tsdbError("failed to iter the next row in stt-file merge tree, code:%s, %s", tstrerror(code),
pSttBlockReader->mergeTree.idStr);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (!hasVal) { // the next value will be the accessed key in stt
@@ -1839,7 +2190,7 @@ static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBloc
memset(pNextProc->pks[0].pData, 0, pNextProc->pks[0].nData);
}
}
- return code;
+ goto _end;
}
TSDBROW* pRow = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
@@ -1855,17 +2206,24 @@ static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBloc
tColRowGetKeyDeepCopy(pRow->pBlockData, pRow->iRow, pkSrcSlot, pNextProc);
if (pScanInfo->delSkyline != NULL && TARRAY_SIZE(pScanInfo->delSkyline) > 0) {
- if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->sttBlockDelIndex, key, ver, order, pVerRange,
- pSttBlockReader->numOfPks > 0)) {
+ bool dropped = false;
+ code = hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->sttBlockDelIndex, key, ver, order, pVerRange,
+ pSttBlockReader->numOfPks > 0, &dropped);
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (!dropped) {
pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA;
- return code;
+ goto _end;
}
} else {
pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA;
- return code;
+ goto _end;
}
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -1876,89 +2234,113 @@ static void doUnpinSttBlock(SSttBlockReader* pSttBlockReader) { tMergeTreeUnpinS
static int32_t tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SSttBlockReader* pSttBlockReader,
STableBlockScanInfo* pScanInfo, SRowKey* pSttKey, STsdbReader* pReader,
bool* copied) {
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowKey* pNext = NULL;
+
+ TSDB_CHECK_NULL(pSttBlockReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(copied, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*copied = false;
// avoid the fetch next row replace the referenced stt block in buffer
doPinSttBlock(pSttBlockReader);
code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange);
doUnpinSttBlock(pSttBlockReader);
-
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (hasDataInSttBlock(pScanInfo)) {
- SRowKey* pNext = getCurrentKeyInSttBlock(pSttBlockReader);
+ pNext = getCurrentKeyInSttBlock(pSttBlockReader);
if (pkCompEx(pSttKey, pNext) != 0) {
code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, fRow->pBlockData, fRow->iRow);
*copied = (code == TSDB_CODE_SUCCESS);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else {
code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, fRow->pBlockData, fRow->iRow);
*copied = (code == TSDB_CODE_SUCCESS);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STSchema* ps = NULL;
+ void** p = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
// always set the newest schema version in pReader->info.pSchema
if (pReader->info.pSchema == NULL) {
- STSchema* ps = getTableSchemaImpl(pReader, uid);
- if (ps == NULL) {
- return NULL;
- }
+ ps = getTableSchemaImpl(pReader, uid);
+ TSDB_CHECK_NULL(ps, code, lino, _end, terrno);
}
if (pReader->info.pSchema && sversion == pReader->info.pSchema->version) {
- return pReader->info.pSchema;
+ ps = pReader->info.pSchema;
+ goto _end;
}
- void** p = tSimpleHashGet(pReader->pSchemaMap, &sversion, sizeof(sversion));
+ p = tSimpleHashGet(pReader->pSchemaMap, &sversion, sizeof(sversion));
if (p != NULL) {
- return *(STSchema**)p;
+ ps = *(STSchema**)p;
+ goto _end;
}
- STSchema* ptr = NULL;
- int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->info.suid, uid, sversion, &ptr);
+ code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->info.suid, uid, sversion, &ps);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = tSimpleHashPut(pReader->pSchemaMap, &sversion, sizeof(sversion), &ps, POINTER_BYTES);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
terrno = code;
return NULL;
- } else {
- code = tSimpleHashPut(pReader->pSchemaMap, &sversion, sizeof(sversion), &ptr, POINTER_BYTES);
- if (code != TSDB_CODE_SUCCESS) {
- terrno = code;
- return NULL;
- }
- return ptr;
}
+ return ps;
}
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
SIterInfo* pIter, SSttBlockReader* pSttBlockReader) {
- SRowMerger* pMerger = &pReader->status.merger;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowMerger* pMerger = NULL;
SRow* pTSRow = NULL;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- int32_t pkSrcSlot = pReader->suppInfo.pkSrcSlot;
+ SBlockData* pBlockData = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SRowKey* pSttKey = NULL;
+ int32_t pkSrcSlot = 0;
+ SRowKey k = {0};
+ STSchema* pSchema = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
+ pBlockData = &pReader->status.fileBlockData;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pkSrcSlot = pReader->suppInfo.pkSrcSlot;
- SRowKey* pSttKey = NULL;
if (hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) {
pSttKey = getCurrentKeyInSttBlock(pSttBlockReader);
}
- SRowKey k = {0};
tRowGetKeyEx(pRow, &k);
- STSchema* pSchema = NULL;
if (pRow->type == TSDBROW_ROW_FMT) {
pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
- if (pSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, terrno);
}
SRowKey* pfKey = &(SRowKey){0};
@@ -1971,10 +2353,8 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
// merge is not initialized yet, due to the fact that the pReader->info.pSchema is not initialized
- int32_t code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
- if (code != 0) {
- return code;
- }
+ code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
+ TSDB_CHECK_CODE(code, lino, _end);
SRowKey minKey = k;
if (pReader->info.order == TSDB_ORDER_ASC) {
@@ -2002,79 +2382,83 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
// file block ---> stt block -----> mem
if (pkCompEx(&minKey, pfKey) == 0) {
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pfKey, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pkCompEx(&minKey, pSttKey) == 0) {
TSDBROW* fRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
code = tsdbRowMergerAdd(pMerger, fRow1, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
- code = doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ code = doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange,
+ pReader->idStr);
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pkCompEx(&minKey, &k) == 0) {
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, &k, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
- taosMemoryFree(pTSRow);
+ taosMemoryFreeClear(pTSRow);
tsdbRowMergerClear(pMerger);
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader* pSttBlockReader, SRowKey* pKey,
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SRowMerger* pMerger = &pReader->status.merger;
int32_t code = TSDB_CODE_SUCCESS;
- int32_t pkSrcSlot = pReader->suppInfo.pkSrcSlot;
+ int32_t lino = 0;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SRowMerger* pMerger = NULL;
+ SRow* pTSRow = NULL;
+ int32_t pkSrcSlot = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pMerger = &pReader->status.merger;
+ pkSrcSlot = pReader->suppInfo.pkSrcSlot;
// merge is not initialized yet, due to the fact that the pReader->info.pSchema is not initialized
code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
- if (code != 0) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
bool dataInDataFile = hasDataInFileBlock(pBlockData, pDumpInfo);
bool dataInSttFile = hasDataInSttBlock(pBlockScanInfo);
if (dataInDataFile && (!dataInSttFile)) {
// no stt file block available, only data block exists
- return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ code = mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if ((!dataInDataFile) && dataInSttFile) {
// no data in data file exists
- return mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader);
+ code = mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if (pBlockScanInfo->cleanSttBlocks && pReader->info.execMode == READER_EXEC_ROWS) {
// opt model for count data in stt file, which is not overlap with data blocks in files.
- return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ code = mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
// row in both stt file blocks and data file blocks
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
SRowKey* pSttKey = getCurrentKeyInSttBlock(pSttBlockReader);
@@ -2083,74 +2467,90 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader*
if (ASCENDING_TRAVERSE(pReader->info.order)) {
if (ret < 0) { // asc
- return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ code = mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if (ret > 0) {
- return mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader);
+ code = mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else { // desc
if (ret > 0) {
- return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ code = mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if (ret < 0) {
- return mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader);
+ code = mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
+ if (ret != 0) {
+ goto _end;
+ }
// pKey == pSttKey
tRowKeyAssign(&pBlockScanInfo->lastProcKey, pKey);
// the following for key == sttKey->key.ts
// file block ------> stt block
- SRow* pTSRow = NULL;
+
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
TSDBROW* pRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// pSttKey will be changed when sttBlockReader iterates to the next row, so use pKey instead.
- code = doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange,
+ pReader->idStr);
+ TSDB_CHECK_CODE(code, lino, _end);
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
- taosMemoryFree(pTSRow);
+ taosMemoryFreeClear(pTSRow);
tsdbRowMergerClear(pMerger);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
SSttBlockReader* pSttBlockReader) {
- SRowMerger* pMerger = &pReader->status.merger;
- SRow* pTSRow = NULL;
int32_t code = TSDB_CODE_SUCCESS;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SArray* pDelList = pBlockScanInfo->delSkyline;
- int32_t pkSrcSlot = pReader->suppInfo.pkSrcSlot;
+ int32_t lino = 0;
+ SRowMerger* pMerger = NULL;
+ SRow* pTSRow = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SArray* pDelList = NULL;
+ int32_t pkSrcSlot = 0;
TSDBROW* pRow = NULL;
TSDBROW* piRow = NULL;
+ SRowKey* pSttKey = NULL;
- getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader, &pRow);
- getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader, &piRow);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pDelList = pBlockScanInfo->delSkyline;
+ pkSrcSlot = pReader->suppInfo.pkSrcSlot;
+
+ code = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader, &pRow);
+ TSDB_CHECK_CODE(code, lino, _end);
+ code = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader, &piRow);
+ TSDB_CHECK_CODE(code, lino, _end);
- SRowKey* pSttKey = NULL;
if (hasDataInSttBlock(pBlockScanInfo) && (!pBlockScanInfo->cleanSttBlocks)) {
pSttKey = getCurrentKeyInSttBlock(pSttBlockReader);
}
@@ -2169,24 +2569,18 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
STSchema* pSchema = NULL;
if (pRow->type == TSDBROW_ROW_FMT) {
pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
- if (pSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, terrno);
}
STSchema* piSchema = NULL;
if (piRow->type == TSDBROW_ROW_FMT) {
piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
- if (piSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, terrno);
}
// merge is not initialized yet, due to the fact that the pReader->info.pSchema is not initialized
code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
- if (code != 0) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
SRowKey minKey = k;
if (ASCENDING_TRAVERSE(pReader->info.order)) {
@@ -2221,72 +2615,70 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
if (pkCompEx(&minKey, pfKey) == 0) {
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pfKey, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pkCompEx(&minKey, pSttKey) == 0) {
TSDBROW* pRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- code = doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = doMergeRowsInSttBlock(pSttBlockReader, pBlockScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange,
+ pReader->idStr);
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pkCompEx(&minKey, &ik) == 0) {
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, &ik, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pkCompEx(&minKey, &k) == 0) {
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, &k, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
- taosMemoryFree(pTSRow);
+ taosMemoryFreeClear(pTSRow);
tsdbRowMergerClear(pMerger);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t doInitMemDataIter(STsdbReader* pReader, STbData** pData, STableBlockScanInfo* pBlockScanInfo, STsdbRowKey* pKey,
SMemTable* pMem, SIterInfo* pIter, const char* type) {
int32_t code = TSDB_CODE_SUCCESS;
- int32_t backward = (!ASCENDING_TRAVERSE(pReader->info.order));
+ int32_t lino = 0;
+ int32_t backward = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(type, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ backward = (!ASCENDING_TRAVERSE(pReader->info.order));
pIter->hasVal = false;
if (pMem != NULL) {
+ TSDB_CHECK_NULL(pData, code, lino, _end, TSDB_CODE_INVALID_PARA);
*pData = tsdbGetTbDataFromMemTable(pMem, pReader->info.suid, pBlockScanInfo->uid);
if ((*pData) != NULL) {
@@ -2301,23 +2693,30 @@ int32_t doInitMemDataIter(STsdbReader* pReader, STbData** pData, STableBlockScan
} else {
tsdbError("%p uid:%" PRIu64 ", failed to create iterator for %s, code:%s, %s", pReader, pBlockScanInfo->uid,
type, tstrerror(code), pReader->idStr);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
} else {
tsdbDebug("%p uid:%" PRIu64 ", no data in %s, %s", pReader, pBlockScanInfo->uid, type, pReader->idStr);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
-static void doForwardDataIter(SRowKey* pKey, SIterInfo* pIter, STableBlockScanInfo* pBlockScanInfo,
- STsdbReader* pReader) {
- SRowKey rowKey = {0};
+static int32_t doForwardDataIter(SRowKey* pKey, SIterInfo* pIter, STableBlockScanInfo* pBlockScanInfo,
+ STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowKey rowKey = {0};
TSDBROW* pRow = NULL;
while (1) {
- getValidMemRow(pIter, pBlockScanInfo->delSkyline, pReader, &pRow);
+ code = getValidMemRow(pIter, pBlockScanInfo->delSkyline, pReader, &pRow);
+ TSDB_CHECK_CODE(code, lino, _end);
if (!pIter->hasVal) {
break;
}
@@ -2330,27 +2729,56 @@ static void doForwardDataIter(SRowKey* pKey, SIterInfo* pIter, STableBlockScanIn
break;
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// handle the open interval issue. Find the first row key that is greater than the given one.
-static void forwardDataIter(SRowKey* pKey, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
- doForwardDataIter(pKey, &pBlockScanInfo->iter, pBlockScanInfo, pReader);
- doForwardDataIter(pKey, &pBlockScanInfo->iiter, pBlockScanInfo, pReader);
+static int32_t forwardDataIter(SRowKey* pKey, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = doForwardDataIter(pKey, &pBlockScanInfo->iter, pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+ code = doForwardDataIter(pKey, &pBlockScanInfo->iiter, pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
STbData* d = NULL;
STbData* di = NULL;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ bool asc = false;
bool forward = true;
- STsdbReadSnap* pSnap = pReader->pReadSnap;
- STimeWindow* pWindow = &pReader->info.window;
+ STsdbReadSnap* pSnap = NULL;
+ STimeWindow* pWindow = NULL;
+ STsdbRowKey startKey;
+
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ pSnap = pReader->pReadSnap;
+ pWindow = &pReader->info.window;
if (pBlockScanInfo->iterInit) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
- STsdbRowKey startKey;
startKey.key = pBlockScanInfo->lastProcKey;
startKey.version = asc ? pReader->info.verRange.minVer : pReader->info.verRange.maxVer;
if ((asc && (startKey.key.ts < pWindow->skey)) || ((!asc) && startKey.key.ts > pWindow->ekey)) {
@@ -2358,80 +2786,114 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
forward = false;
}
- int32_t code = doInitMemDataIter(pReader, &d, pBlockScanInfo, &startKey, pSnap->pMem, &pBlockScanInfo->iter, "mem");
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = doInitMemDataIter(pReader, &d, pBlockScanInfo, &startKey, pSnap->pMem, &pBlockScanInfo->iter, "mem");
+ TSDB_CHECK_CODE(code, lino, _end);
code = doInitMemDataIter(pReader, &di, pBlockScanInfo, &startKey, pSnap->pIMem, &pBlockScanInfo->iiter, "imem");
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = loadMemTombData(&pBlockScanInfo->pMemDelData, d, di, pReader->info.verRange.maxVer);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (forward) {
- forwardDataIter(&startKey.key, pBlockScanInfo, pReader);
+ code = forwardDataIter(&startKey.key, pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
}
pBlockScanInfo->iterInit = true;
- return TSDB_CODE_SUCCESS;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
-static bool isValidFileBlockRow(SBlockData* pBlockData, int32_t rowIndex, STableBlockScanInfo* pBlockScanInfo, bool asc,
- STsdbReaderInfo* pInfo, STsdbReader* pReader) {
+static int32_t isValidFileBlockRow(SBlockData* pBlockData, int32_t rowIndex, STableBlockScanInfo* pBlockScanInfo,
+ bool asc, STsdbReaderInfo* pInfo, STsdbReader* pReader, bool* valid) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(valid, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *valid = false;
// it is an multi-table data block
if (pBlockData->aUid != NULL) {
uint64_t uid = pBlockData->aUid[rowIndex];
if (uid != pBlockScanInfo->uid) { // move to next row
- return false;
+ *valid = false;
+ goto _end;
}
}
+ TSDB_CHECK_NULL(pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
// check for version and time range
int64_t ver = pBlockData->aVersion[rowIndex];
if (ver > pInfo->verRange.maxVer || ver < pInfo->verRange.minVer) {
- return false;
+ *valid = false;
+ goto _end;
}
int64_t ts = pBlockData->aTSKEY[rowIndex];
if (ts > pInfo->window.ekey || ts < pInfo->window.skey) {
- return false;
+ *valid = false;
+ goto _end;
}
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
if ((asc && (ts < pBlockScanInfo->lastProcKey.ts)) || ((!asc) && (ts > pBlockScanInfo->lastProcKey.ts))) {
- return false;
+ *valid = false;
+ goto _end;
}
if (ts == pBlockScanInfo->lastProcKey.ts) { // todo opt perf
SRowKey nextRowKey; // lazy eval
tColRowGetKey(pBlockData, rowIndex, &nextRowKey);
if (pkCompEx(&pBlockScanInfo->lastProcKey, &nextRowKey) == 0) {
- return false;
+ *valid = false;
+ goto _end;
}
}
if (pBlockScanInfo->delSkyline != NULL && TARRAY_SIZE(pBlockScanInfo->delSkyline) > 0) {
- bool dropped = hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, ts, ver, pInfo->order,
- &pInfo->verRange, pReader->suppInfo.numOfPks > 0);
+ bool dropped = false;
+ code = hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, ts, ver, pInfo->order,
+ &pInfo->verRange, pReader->suppInfo.numOfPks > 0, &dropped);
+ TSDB_CHECK_CODE(code, lino, _end);
if (dropped) {
- return false;
+ *valid = false;
+ goto _end;
}
}
- return true;
+ *valid = true;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
- int32_t order = pReader->info.order;
- bool asc = ASCENDING_TRAVERSE(order);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t order = 0;
+ bool asc = false;
+ int64_t st = 0;
+ SSttDataInfoForTable info = (SSttDataInfoForTable){0};
+
+ TSDB_CHECK_NULL(pSttBlockReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ order = pReader->info.order;
+ asc = ASCENDING_TRAVERSE(order);
// the stt block reader has been initialized for this table.
if (pSttBlockReader->uid == pScanInfo->uid) {
- return;
+ goto _end;
}
if (pSttBlockReader->uid != 0) {
@@ -2447,7 +2909,7 @@ static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
pScanInfo->sttKeyInfo.status = STT_FILE_NO_DATA;
tsdbDebug("uid:%" PRIu64 " set no stt-file data after stt-block retrieved, %s", pScanInfo->uid, pReader->idStr);
}
- return;
+ goto _end;
}
STimeWindow w = pSttBlockReader->window;
@@ -2457,7 +2919,7 @@ static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
w.ekey = pScanInfo->sttKeyInfo.nextProcKey.ts;
}
- int64_t st = taosGetTimestampUs();
+ st = taosGetTimestampUs();
tsdbDebug("init stt block reader, window:%" PRId64 "-%" PRId64 ", uid:%" PRIu64 ", %s", w.skey, w.ekey,
pScanInfo->uid, pReader->idStr);
@@ -2481,32 +2943,17 @@ static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
.rspRows = (pReader->info.execMode == READER_EXEC_ROWS),
};
- SSttDataInfoForTable info = {.pKeyRangeList = taosArrayInit(4, sizeof(SSttKeyRange))};
- if (info.pKeyRangeList == NULL) {
- pReader->code = terrno;
- return;
- }
+ info.pKeyRangeList = taosArrayInit(4, sizeof(SSttKeyRange));
+ TSDB_CHECK_NULL(info.pKeyRangeList, code, lino, _end, terrno);
- int32_t code = tMergeTreeOpen2(&pSttBlockReader->mergeTree, &conf, &info);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(info.pKeyRangeList);
- pReader->code = code;
- return;
- }
+ code = tMergeTreeOpen2(&pSttBlockReader->mergeTree, &conf, &info);
+ TSDB_CHECK_CODE(code, lino, _end);
code = initMemDataIterator(pScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(info.pKeyRangeList);
- pReader->code = code;
- return;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = initDelSkylineIterator(pScanInfo, pReader->info.order, &pReader->cost);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(info.pKeyRangeList);
- pReader->code = code;
- return;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (conf.rspRows) {
pScanInfo->cleanSttBlocks = isCleanSttBlock(info.pKeyRangeList, &pReader->info.window, pScanInfo, order);
@@ -2536,23 +2983,35 @@ static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
} else { // not clean stt blocks
INIT_KEYRANGE(&pScanInfo->sttRange); // reset the time window
code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange);
+ if (code != TSDB_CODE_SUCCESS) {
+ pScanInfo->sttBlockReturned = false;
+ TSDB_CHECK_CODE(code, lino, _end);
+ }
}
} else {
pScanInfo->cleanSttBlocks = false;
INIT_KEYRANGE(&pScanInfo->sttRange); // reset the time window
code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange);
+ if (code != TSDB_CODE_SUCCESS) {
+ pScanInfo->sttBlockReturned = false;
+ TSDB_CHECK_CODE(code, lino, _end);
+ }
}
pScanInfo->sttBlockReturned = false;
- taosArrayDestroy(info.pKeyRangeList);
-
int64_t el = taosGetTimestampUs() - st;
pReader->cost.initSttBlockReader += (el / 1000.0);
tsdbDebug("init stt block reader completed, elapsed time:%" PRId64 "us %s", el, pReader->idStr);
- if (code != 0) {
- pReader->code = code;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ if (pReader) {
+ pReader->code = code;
+ }
}
+ taosArrayDestroy(info.pKeyRangeList);
}
static bool hasDataInSttBlock(STableBlockScanInfo* pInfo) { return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA; }
@@ -2566,57 +3025,71 @@ bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo*
int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, SRowKey* pKey,
STsdbReader* pReader) {
- SRowMerger* pMerger = &pReader->status.merger;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRow* pTSRow = NULL;
+ SRowMerger* pMerger = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
bool copied = false;
- int32_t code = tryCopyDistinctRowFromFileBlock(pReader, pBlockData, pKey, pDumpInfo, &copied);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ code = tryCopyDistinctRowFromFileBlock(pReader, pBlockData, pKey, pDumpInfo, &copied);
+ TSDB_CHECK_CODE(code, lino, _end);
// merge is not initialized yet, due to the fact that the pReader->info.pSchema is not initialized
code = initRowMergeIfNeeded(pReader, pBlockScanInfo->uid);
- if (code != 0) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
tRowKeyAssign(&pBlockScanInfo->lastProcKey, pKey);
if (copied) {
- return TSDB_CODE_SUCCESS;
- } else {
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
-
- SRow* pTSRow = NULL;
- code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = tsdbRowMergerGetRow(pMerger, &pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
-
- taosMemoryFree(pTSRow);
- tsdbRowMergerClear(pMerger);
- return code;
+ goto _end;
}
+
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+
+ code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pKey, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
+
+ taosMemoryFreeClear(pTSRow);
+ tsdbRowMergerClear(pMerger);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t mergeRowsInSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
- bool copied = false;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SRow* pTSRow = NULL;
- int32_t pkSrcSlot = pReader->suppInfo.pkSrcSlot;
- SRowMerger* pMerger = &pReader->status.merger;
+ int32_t pkSrcSlot = 0;
+ SRowMerger* pMerger = NULL;
+ bool copied = false;
+
+ TSDB_CHECK_NULL(pSttBlockReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pkSrcSlot = pReader->suppInfo.pkSrcSlot;
+ pMerger = &pReader->status.merger;
// let's record the last processed key
tRowKeyAssign(&pScanInfo->lastProcKey, getCurrentKeyInSttBlock(pSttBlockReader));
@@ -2630,51 +3103,54 @@ int32_t mergeRowsInSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanIn
pReader->idStr);
}
- int32_t code =
- tryCopyDistinctRowFromSttBlock(&fRow, pSttBlockReader, pScanInfo, &pScanInfo->lastProcKey, pReader, &copied);
- if (code) {
- return code;
- }
+ code = tryCopyDistinctRowFromSttBlock(&fRow, pSttBlockReader, pScanInfo, &pScanInfo->lastProcKey, pReader, &copied);
+ TSDB_CHECK_CODE(code, lino, _end);
if (copied) {
- return TSDB_CODE_SUCCESS;
- } else {
- code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- TSDBROW* pRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
- code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = doMergeRowsInSttBlock(pSttBlockReader, pScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = tsdbRowMergerGetRow(pMerger, &pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pScanInfo);
-
- taosMemoryFree(pTSRow);
- tsdbRowMergerClear(pMerger);
- return code;
+ goto _end;
}
+
+ code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ TSDBROW* pRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
+ code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = doMergeRowsInSttBlock(pSttBlockReader, pScanInfo, pMerger, pkSrcSlot, &pReader->info.verRange, pReader->idStr);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pScanInfo);
+
+ taosMemoryFreeClear(pTSRow);
+ tsdbRowMergerClear(pMerger);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
SBlockData* pBlockData, SSttBlockReader* pSttBlockReader) {
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ TSDBROW* pRow = NULL;
+ TSDBROW* piRow = NULL;
+ SRowKey* pKey = &(SRowKey){0};
- TSDBROW *pRow = NULL, *piRow = NULL;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
- SRowKey* pKey = &(SRowKey){0};
if (hasDataInFileBlock(pBlockData, pDumpInfo)) {
tColRowGetKey(pBlockData, pDumpInfo->rowIndex, pKey);
} else {
@@ -2682,46 +3158,65 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
}
if (pBlockScanInfo->iter.hasVal) {
- getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader, &pRow);
+ code = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader, &pRow);
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pBlockScanInfo->iiter.hasVal) {
- getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader, &piRow);
+ code = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader, &piRow);
+ TSDB_CHECK_CODE(code, lino, _end);
}
- // two levels of mem-table does contain the valid rows
if (pRow != NULL && piRow != NULL) {
- return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pSttBlockReader);
+ // two levels of mem-table does contain the valid rows
+ code = doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pSttBlockReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+ } else if (pBlockScanInfo->iiter.hasVal) {
+ // imem + file + stt block
+ code = doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, pSttBlockReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+ } else if (pBlockScanInfo->iter.hasVal) {
+ // mem + file + stt block
+ code = doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, pSttBlockReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+ } else {
+ // files data blocks + stt block
+ code = mergeFileBlockAndSttBlock(pReader, pSttBlockReader, pKey, pBlockScanInfo, pBlockData);
+ TSDB_CHECK_CODE(code, lino, _end);
}
- // imem + file + stt block
- if (pBlockScanInfo->iiter.hasVal) {
- return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, pSttBlockReader);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
-
- // mem + file + stt block
- if (pBlockScanInfo->iter.hasVal) {
- return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, pSttBlockReader);
- }
-
- // files data blocks + stt block
- return mergeFileBlockAndSttBlock(pReader, pSttBlockReader, pKey, pBlockScanInfo, pBlockData);
+ return code;
}
static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pBlockScanInfo,
STsdbReader* pReader, bool* loadNeighbor) {
int32_t code = TSDB_CODE_SUCCESS;
- int32_t order = pReader->info.order;
- SDataBlockIter* pIter = &pReader->status.blockIter;
- SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1;
+ int32_t lino = 0;
+ int32_t order = 0;
+ SDataBlockIter* pIter = NULL;
+ SBlockLoadSuppInfo* pSupInfo = NULL;
+ int32_t step = 0;
int32_t nextIndex = -1;
SBrinRecord rec = {0};
+ bool hasNeighbor = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(loadNeighbor, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ order = pReader->info.order;
+ pIter = &pReader->status.blockIter;
+ pSupInfo = &pReader->suppInfo;
+ step = ASCENDING_TRAVERSE(order) ? 1 : -1;
*loadNeighbor = false;
- bool hasNeighbor = getNeighborBlockOfTable(pIter, pBlockInfo, pBlockScanInfo, &nextIndex, order, &rec, pSupInfo);
+ code = getNeighborBlockOfTable(pIter, pBlockInfo, pBlockScanInfo, &nextIndex, order, &rec, pSupInfo, &hasNeighbor);
+ TSDB_CHECK_CODE(code, lino, _end);
if (!hasNeighbor) { // do nothing
- return code;
+ goto _end;
}
// load next block
@@ -2731,28 +3226,27 @@ static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlock
// 1. find the next neighbor block in the scan block list
STableDataBlockIdx* tableDataBlockIdx = taosArrayGet(pBlockScanInfo->pBlockIdxList, nextIndex);
- if (tableDataBlockIdx == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(tableDataBlockIdx, code, lino, _end, TSDB_CODE_INVALID_PARA);
// 2. remove it from the scan block list
int32_t neighborIndex = tableDataBlockIdx->globalIndex;
code = setFileBlockActiveInBlockIter(pReader, pBlockIter, neighborIndex, step);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// 3. load the neighbor block, and set it to be the currently accessed file data block
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pBlockInfo->uid);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// 4. check the data values
- initBlockDumpInfo(pReader, pBlockIter);
+ code = initBlockDumpInfo(pReader, pBlockIter);
+ TSDB_CHECK_CODE(code, lino, _end);
*loadNeighbor = true;
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -2772,15 +3266,27 @@ void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInf
}
static int32_t buildComposedDataBlock(STsdbReader* pReader) {
- int32_t code = TSDB_CODE_SUCCESS;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- int64_t st = taosGetTimestampUs();
- int32_t step = asc ? 1 : -1;
- double el = 0;
- SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
- SFileDataBlockInfo* pBlockInfo = NULL;
- SSttBlockReader* pSttBlockReader = pReader->status.fileIter.pSttBlockReader;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool asc = false;
+ int64_t st = 0;
+ double el = 0;
+ int32_t step = 0;
+ SSDataBlock* pResBlock = NULL;
+ SFileDataBlockInfo* pBlockInfo = NULL;
+ SSttBlockReader* pSttBlockReader = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ STableBlockScanInfo* pBlockScanInfo = NULL;
+ TSDBKEY keyInBuf;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ st = taosGetTimestampUs();
+ step = asc ? 1 : -1;
+ pResBlock = pReader->resBlockInfo.pResBlock;
+ pSttBlockReader = pReader->status.fileIter.pSttBlockReader;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
code = getCurrentBlockInfo(&pReader->status.blockIter, &pBlockInfo, pReader->idStr);
if (code != TSDB_CODE_SUCCESS) {
@@ -2792,38 +3298,41 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
return code;
}
- STableBlockScanInfo* pBlockScanInfo = NULL;
code = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, &pBlockScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
+ code = getCurrentKeyInBuf(pBlockScanInfo, pReader, &keyInBuf);
+ TSDB_CHECK_CODE(code, lino, _end);
// it is a clean block, load it directly
int64_t cap = pReader->resBlockInfo.capacity;
- bool directCopy = isCleanFileDataBlock(pReader, pBlockInfo, pBlockScanInfo, keyInBuf) &&
- (pBlockInfo->numRow <= cap) && (pBlockScanInfo->sttKeyInfo.status == STT_FILE_NO_DATA) &&
+ bool isClean = false;
+ code = isCleanFileDataBlock(pReader, pBlockInfo, pBlockScanInfo, keyInBuf, &isClean);
+ TSDB_CHECK_CODE(code, lino, _end);
+ bool directCopy = isClean && (pBlockInfo->numRow <= cap) && (pBlockScanInfo->sttKeyInfo.status == STT_FILE_NO_DATA) &&
((asc && ((pBlockInfo->lastKey < keyInBuf.ts) || (keyInBuf.ts == INT64_MIN))) ||
(!asc && (pBlockInfo->lastKey > keyInBuf.ts)));
if (directCopy) {
code = copyBlockDataToSDataBlock(pReader, &pBlockScanInfo->lastProcKey);
+ TSDB_CHECK_CODE(code, lino, _end);
goto _end;
}
SBlockData* pBlockData = &pReader->status.fileBlockData;
initSttBlockReader(pSttBlockReader, pBlockScanInfo, pReader);
- if (pReader->code != 0) {
- code = pReader->code;
- goto _end;
- }
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
while (1) {
bool hasBlockData = false;
{
while (pBlockData->nRow > 0 && pBlockData->uid == pBlockScanInfo->uid) {
// find the first qualified row in data block
- if (isValidFileBlockRow(pBlockData, pDumpInfo->rowIndex, pBlockScanInfo, asc, &pReader->info, pReader)) {
+ bool valid = false;
+ code =
+ isValidFileBlockRow(pBlockData, pDumpInfo->rowIndex, pBlockScanInfo, asc, &pReader->info, pReader, &valid);
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (valid) {
hasBlockData = true;
break;
}
@@ -2833,15 +3342,14 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
if (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0) {
// NOTE: get the new block info
code = getCurrentBlockInfo(&pReader->status.blockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// continue check for the next file block if the last ts in the current block
// is overlapped with the next neighbor block
bool loadNeighbor = false;
code = loadNeighborIfOverlap(pBlockInfo, pBlockScanInfo, pReader, &loadNeighbor);
if ((!loadNeighbor) || (code != 0)) {
+ lino = __LINE__;
setBlockAllDumped(pDumpInfo, pBlockInfo->lastKey, pReader->info.order);
break;
}
@@ -2855,9 +3363,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
}
code = buildComposedDataBlockImpl(pReader, pBlockScanInfo, pBlockData, pSttBlockReader);
- if (code) {
- goto _end;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// currently loaded file data block is consumed
if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) {
@@ -2871,16 +3377,21 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
}
_end:
- el = (taosGetTimestampUs() - st) / 1000.0;
- updateComposedBlockInfo(pReader, el, pBlockScanInfo);
+ if (pReader) {
+ el = (taosGetTimestampUs() - st) / 1000.0;
+ updateComposedBlockInfo(pReader, el, pBlockScanInfo);
+ }
- if (pResBlock->info.rows > 0) {
+ if (pResBlock && pResBlock->info.rows > 0) {
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%" PRId64
", elapsed time:%.2f ms %s",
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, el, pReader->idStr);
}
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -2895,32 +3406,35 @@ int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) {
}
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SReadCostSummary* pCost) {
- int32_t code = 0;
- int32_t newDelDataInFile = taosArrayGetSize(pBlockScanInfo->pFileDelData);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t newDelDataInFile = 0;
+ int64_t st = 0;
+ SArray* pSource = NULL;
+
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ newDelDataInFile = taosArrayGetSize(pBlockScanInfo->pFileDelData);
if (newDelDataInFile == 0 &&
((pBlockScanInfo->delSkyline != NULL) || (TARRAY_SIZE(pBlockScanInfo->pMemDelData) == 0))) {
- return code;
+ goto _end;
}
- int64_t st = taosGetTimestampUs();
+ st = taosGetTimestampUs();
if (pBlockScanInfo->delSkyline != NULL) {
taosArrayClear(pBlockScanInfo->delSkyline);
} else {
pBlockScanInfo->delSkyline = taosArrayInit(4, sizeof(TSDBKEY));
- if (pBlockScanInfo->delSkyline == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pBlockScanInfo->delSkyline, code, lino, _end, terrno);
}
- SArray* pSource = pBlockScanInfo->pFileDelData;
+ pSource = pBlockScanInfo->pFileDelData;
if (pSource == NULL) {
pSource = pBlockScanInfo->pMemDelData;
} else {
- void* p1 = taosArrayAddAll(pSource, pBlockScanInfo->pMemDelData);
- if (p1 == NULL) {
- return terrno;
- }
+ const void* p1 = taosArrayAddAll(pSource, pBlockScanInfo->pMemDelData);
+ TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
}
code = tsdbBuildDeleteSkyline(pSource, 0, taosArrayGetSize(pSource) - 1, pBlockScanInfo->delSkyline);
@@ -2932,28 +3446,45 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t orde
pBlockScanInfo->iiter.index = index;
pBlockScanInfo->fileDelIndex = index;
pBlockScanInfo->sttBlockDelIndex = index;
+ TSDB_CHECK_CODE(code, lino, _end);
+ TSDB_CHECK_NULL(pCost, code, lino, _end, TSDB_CODE_INVALID_PARA);
double el = taosGetTimestampUs() - st;
pCost->createSkylineIterTime = el / 1000.0;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
-TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- TSDBKEY key = {.ts = TSKEY_INITIAL_VAL}, ikey = {.ts = TSKEY_INITIAL_VAL};
-
- bool hasKey = false, hasIKey = false;
+int32_t getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader, TSDBKEY* pkey) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool asc = false;
+ bool hasKey = false;
+ bool hasIKey = false;
TSDBROW* pRow = NULL;
TSDBROW* pIRow = NULL;
+ TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
+ TSDBKEY ikey = {.ts = TSKEY_INITIAL_VAL};
- getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader, &pRow);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pkey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+
+ code = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader, &pRow);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pRow != NULL) {
hasKey = true;
key = TSDBROW_KEY(pRow);
}
- getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader, &pIRow);
+ code = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader, &pIRow);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pIRow != NULL) {
hasIKey = true;
ikey = TSDBROW_KEY(pIRow);
@@ -2962,18 +3493,24 @@ TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader)
if (hasKey) {
if (hasIKey) { // has data in mem & imem
if (asc) {
- return key.ts <= ikey.ts ? key : ikey;
+ *pkey = key.ts <= ikey.ts ? key : ikey;
} else {
- return key.ts <= ikey.ts ? ikey : key;
+ *pkey = key.ts <= ikey.ts ? ikey : key;
}
} else { // no data in imem
- return key;
+ *pkey = key;
}
} else {
// no data in mem & imem, return the initial value
// only imem has data, return ikey
- return ikey;
+ *pkey = ikey;
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void prepareDurationForNextFileSet(STsdbReader* pReader) {
@@ -3015,29 +3552,34 @@ static void prepareDurationForNextFileSet(STsdbReader* pReader) {
}
static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum, SArray* pTableList) {
- SReaderStatus* pStatus = &pReader->status;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SArray* pIndexList = NULL;
+ size_t numOfTables = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockNum, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
pBlockNum->numOfBlocks = 0;
pBlockNum->numOfSttFiles = 0;
- size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
- SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBrinBlk));
- if (pIndexList == NULL) {
- return terrno;
- }
+ numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
+ pIndexList = taosArrayInit(numOfTables, sizeof(SBrinBlk));
+ TSDB_CHECK_NULL(pIndexList, code, lino, _end, terrno);
while (1) {
// only check here, since the iterate data in memory is very fast.
if (pReader->code != TSDB_CODE_SUCCESS) {
tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", tstrerror(pReader->code), pReader->idStr);
- return pReader->code;
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
- bool hasNext = false;
- int32_t code = filesetIteratorNext(&pStatus->fileIter, pReader, &hasNext);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pIndexList);
- return code;
- }
+ bool hasNext = false;
+ code = filesetIteratorNext(&pStatus->fileIter, pReader, &hasNext);
+ TSDB_CHECK_CODE(code, lino, _end);
if (!hasNext) { // no data files on disk
break;
@@ -3045,17 +3587,11 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum, SAr
taosArrayClear(pIndexList);
code = doLoadBlockIndex(pReader, pReader->pFileReader, pIndexList);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pIndexList);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (taosArrayGetSize(pIndexList) > 0 || pReader->status.pCurrentFileset->lvlArr->size > 0) {
code = loadFileBlockBrinInfo(pReader, pIndexList, pBlockNum, pTableList);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pIndexList);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pBlockNum->numOfBlocks + pBlockNum->numOfSttFiles > 0) {
if (pReader->bFilesetDelimited) {
@@ -3068,23 +3604,42 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum, SAr
// no blocks in current file, try next files
}
- taosArrayDestroy(pIndexList);
- return loadDataFileTombDataForAll(pReader);
+ code = loadDataFileTombDataForAll(pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (pIndexList) {
+ taosArrayDestroy(pIndexList);
+ }
+ return code;
}
// pTableIter can be NULL, no need to handle the return value
static int32_t resetTableListIndex(SReaderStatus* pStatus, const char* id) {
- STableUidList* pList = &pStatus->uidList;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STableUidList* pList = NULL;
+
+ TSDB_CHECK_NULL(pStatus, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pList = &pStatus->uidList;
pList->currentIndex = 0;
uint64_t uid = pList->tableUidList[0];
pStatus->pTableIter = tSimpleHashGet(pStatus->pTableMap, &uid, sizeof(uid));
if (pStatus->pTableIter == NULL) {
- tsdbError("%s failed to load tableBlockScanInfo for uid:%"PRId64", code: internal error", id, uid);
- return TSDB_CODE_INTERNAL_ERROR;
+ tsdbError("%s failed to load tableBlockScanInfo for uid:%" PRId64 ", code: internal error", id, uid);
+ TSDB_CHECK_NULL(pStatus->pTableIter, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
}
- return 0;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void resetPreFilesetMemTableListIndex(SReaderStatus* pStatus) {
@@ -3121,18 +3676,26 @@ static bool moveToNextTableForPreFileSetMem(SReaderStatus* pStatus) {
}
static int32_t buildCleanBlockFromSttFiles(STsdbReader* pReader, STableBlockScanInfo* pScanInfo) {
- SReaderStatus* pStatus = &pReader->status;
- SSttBlockReader* pSttBlockReader = pStatus->fileIter.pSttBlockReader;
- SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SSttBlockReader* pSttBlockReader = NULL;
+ SSDataBlock* pResBlock = NULL;
+ SDataBlockInfo* pInfo = NULL;
+ bool asc = false;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
- SDataBlockInfo* pInfo = &pResBlock->info;
- int32_t code = blockDataEnsureCapacity(pResBlock, pScanInfo->numOfRowsInStt);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ pStatus = &pReader->status;
+ pSttBlockReader = pStatus->fileIter.pSttBlockReader;
+ pResBlock = pReader->resBlockInfo.pResBlock;
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ code = blockDataEnsureCapacity(pResBlock, pScanInfo->numOfRowsInStt);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ pInfo = &pResBlock->info;
pInfo->rows = pScanInfo->numOfRowsInStt;
pInfo->id.uid = pScanInfo->uid;
pInfo->dataLoad = 1;
@@ -3156,15 +3719,30 @@ static int32_t buildCleanBlockFromSttFiles(STsdbReader* pReader, STableBlockScan
tsdbDebug("%p uid:%" PRId64 " return clean stt block as one, brange:%" PRId64 "-%" PRId64 " rows:%" PRId64 " %s",
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, pReader->idStr);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
-static void buildCleanBlockFromDataFiles(STsdbReader* pReader, STableBlockScanInfo* pScanInfo,
- SFileDataBlockInfo* pBlockInfo, int32_t blockIndex) {
+static int32_t buildCleanBlockFromDataFiles(STsdbReader* pReader, STableBlockScanInfo* pScanInfo,
+ SFileDataBlockInfo* pBlockInfo, int32_t blockIndex) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SDataBlockInfo* pInfo = NULL;
+ bool asc = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
// whole data block is required, return it directly
- SReaderStatus* pStatus = &pReader->status;
- SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ pStatus = &pReader->status;
+ pInfo = &pReader->resBlockInfo.pResBlock->info;
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
pInfo->rows = pBlockInfo->numRow;
pInfo->id.uid = pScanInfo->uid;
@@ -3188,66 +3766,81 @@ static void buildCleanBlockFromDataFiles(STsdbReader* pReader, STableBlockScanIn
// update the last key for the corresponding table
setComposedBlockFlag(pReader, false);
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->lastKey, pReader->info.order);
- updateLastKeyInfo(&pScanInfo->lastProcKey, pBlockInfo, pInfo, pReader->suppInfo.numOfPks, asc);
+ code = updateLastKeyInfo(&pScanInfo->lastProcKey, pBlockInfo, pInfo, pReader->suppInfo.numOfPks, asc);
+ TSDB_CHECK_CODE(code, lino, _end);
tsdbDebug("%p uid:%" PRIu64
" clean file block retrieved from file, global index:%d, "
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
pReader, pScanInfo->uid, blockIndex, pBlockInfo->tbBlockIdx, pBlockInfo->numRow, pBlockInfo->firstKey,
pBlockInfo->lastKey, pReader->idStr);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
- SSttBlockReader* pSttBlockReader = pStatus->fileIter.pSttBlockReader;
- STableUidList* pUidList = &pStatus->uidList;
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SSttBlockReader* pSttBlockReader = NULL;
+ STableUidList* pUidList = NULL;
+ SSDataBlock* pResBlock = NULL;
+ STableBlockScanInfo* pScanInfo = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pSttBlockReader = pStatus->fileIter.pSttBlockReader;
+ pUidList = &pStatus->uidList;
if (tSimpleHashGetSize(pStatus->pTableMap) == 0) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
- SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
+ pResBlock = pReader->resBlockInfo.pResBlock;
while (1) {
if (pReader->code != TSDB_CODE_SUCCESS) {
tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", tstrerror(pReader->code), pReader->idStr);
- return pReader->code;
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
// load the last data block of current table
- STableBlockScanInfo* pScanInfo = NULL;
if (pStatus->pTableIter == NULL) {
tsdbError("table Iter is null, invalid pScanInfo, try next table %s", pReader->idStr);
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
continue;
} else {
- pScanInfo = *(STableBlockScanInfo**) pStatus->pTableIter;
+ pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
}
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) {
// reset the index in last block when handing a new file
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
continue;
}
initSttBlockReader(pSttBlockReader, pScanInfo, pReader);
- if (pReader->code != TSDB_CODE_SUCCESS) {
- return pReader->code;
- }
-
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
+
if (!hasDataInSttBlock(pScanInfo)) {
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
continue;
@@ -3256,7 +3849,8 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) {
// if only require the total rows, no need to load data from stt file if it is clean stt blocks
if (pReader->info.execMode == READER_EXEC_ROWS && pScanInfo->cleanSttBlocks) {
code = buildCleanBlockFromSttFiles(pReader, pScanInfo);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ break;
}
int64_t st = taosGetTimestampUs();
@@ -3267,9 +3861,7 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) {
}
code = buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pSttBlockReader);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pResBlock->info.rows >= pReader->resBlockInfo.capacity) {
break;
@@ -3284,15 +3876,21 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) {
", elapsed time:%.2f ms %s",
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, el, pReader->idStr);
- return TSDB_CODE_SUCCESS;
+ break;
}
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// current active data block not overlap with the stt-files/stt-blocks
@@ -3306,54 +3904,61 @@ static bool notOverlapWithFiles(SFileDataBlockInfo* pBlockInfo, STableBlockScanI
}
static int32_t doBuildDataBlock(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
- SDataBlockIter* pBlockIter = &pStatus->blockIter;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SDataBlockIter* pBlockIter = NULL;
STableBlockScanInfo* pScanInfo = NULL;
SFileDataBlockInfo* pBlockInfo = NULL;
- SSttBlockReader* pSttBlockReader = pReader->status.fileIter.pSttBlockReader;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- int32_t code = TSDB_CODE_SUCCESS;
+ SSttBlockReader* pSttBlockReader = NULL;
+ TSDBKEY keyInBuf;
+ bool asc = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pBlockIter = &pStatus->blockIter;
+ pSttBlockReader = pReader->status.fileIter.pSttBlockReader;
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
code = getCurrentBlockInfo(pBlockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pBlockInfo->uid, sizeof(pBlockInfo->uid))) {
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->lastKey, pReader->info.order);
- return code;
+ goto _end;
}
- if (pReader->code != TSDB_CODE_SUCCESS) {
- return pReader->code;
- }
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
code = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, &pScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pScanInfo->sttKeyInfo.status == STT_FILE_READER_UNINIT) {
initSttBlockReader(pSttBlockReader, pScanInfo, pReader);
- if (pReader->code != 0) {
- return pReader->code;
- }
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
- TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader);
- if (fileBlockShouldLoad(pReader, pBlockInfo, pScanInfo, keyInBuf)) {
+ code = getCurrentKeyInBuf(pScanInfo, pReader, &keyInBuf);
+ TSDB_CHECK_CODE(code, lino, _end);
+ bool load = false;
+ code = fileBlockShouldLoad(pReader, pBlockInfo, pScanInfo, keyInBuf, &load);
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (load) {
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// build composed data block
code = buildComposedDataBlock(pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if (bufferDataInFileBlockGap(keyInBuf, pBlockInfo, pScanInfo, pReader->info.order)) {
// data in memory that are earlier than current file block and stt blocks
// rows in buffer should be less than the file block in asc, greater than file block in desc
int64_t endKey = getBoarderKeyInFiles(pBlockInfo, pScanInfo, pReader->info.order);
code = buildDataBlockFromBuf(pReader, pScanInfo, endKey);
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
if (notOverlapWithFiles(pBlockInfo, pScanInfo, asc)) {
int64_t keyInStt = pScanInfo->sttKeyInfo.nextProcKey.ts;
@@ -3362,14 +3967,13 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
(!asc && pBlockInfo->firstKey > keyInStt)) {
// the stt blocks may located in the gap of different data block, but the whole sttRange may overlap with the
// data block, so the overlap check is invalid actually.
- buildCleanBlockFromDataFiles(pReader, pScanInfo, pBlockInfo, pBlockIter->index);
+ code = buildCleanBlockFromDataFiles(pReader, pScanInfo, pBlockInfo, pBlockIter->index);
+ TSDB_CHECK_CODE(code, lino, _end);
} else { // clean stt block
- if (!(pReader->info.execMode == READER_EXEC_ROWS && pSttBlockReader->mergeTree.pIter == NULL)) {
- tsdbError("tsdb reader failed at: %s:%d", __func__, __LINE__);
- return TSDB_CODE_INTERNAL_ERROR;
- }
+ TSDB_CHECK_CONDITION((pReader->info.execMode == READER_EXEC_ROWS) && (pSttBlockReader->mergeTree.pIter == NULL),
+ code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
code = buildCleanBlockFromSttFiles(pReader, pScanInfo);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else {
SBlockData* pBData = &pReader->status.fileBlockData;
@@ -3382,21 +3986,16 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
// let's load data from stt files, make sure clear the cleanStt block flag before load the data from stt files
initSttBlockReader(pSttBlockReader, pScanInfo, pReader);
- if (pReader->code != 0) {
- return pReader->code;
- }
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
// no data in stt block, no need to proceed.
while (hasDataInSttBlock(pScanInfo)) {
- if (pScanInfo->sttKeyInfo.status != STT_FILE_HAS_DATA) {
- tsdbError("tsdb reader failed at: %s:%d", __func__, __LINE__);
- return TSDB_CODE_INTERNAL_ERROR;
- }
+ TSDB_CHECK_CONDITION(pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA, code, lino, _end,
+ TSDB_CODE_INTERNAL_ERROR);
code = buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pSttBlockReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pResBlock->info.rows >= pReader->resBlockInfo.capacity) {
break;
@@ -3423,11 +4022,24 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
}
}
- return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code;
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t buildBlockFromBufferSeqForPreFileset(STsdbReader* pReader, int64_t endKey) {
- SReaderStatus* pStatus = &pReader->status;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
tsdbDebug("seq load data blocks from cache that preceeds fileset %d, %s", pReader->status.pCurrentFileset->fid,
pReader->idStr);
@@ -3435,7 +4047,8 @@ static int32_t buildBlockFromBufferSeqForPreFileset(STsdbReader* pReader, int64_
while (1) {
if (pReader->code != TSDB_CODE_SUCCESS) {
tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", tstrerror(pReader->code), pReader->idStr);
- return pReader->code;
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
STableBlockScanInfo** pBlockScanInfo = pStatus->pProcMemTableIter;
@@ -3443,150 +4056,172 @@ static int32_t buildBlockFromBufferSeqForPreFileset(STsdbReader* pReader, int64_
taosHashGet(*pReader->pIgnoreTables, &(*pBlockScanInfo)->uid, sizeof((*pBlockScanInfo)->uid))) {
bool hasNexTable = moveToNextTableForPreFileSetMem(pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
continue;
}
- int32_t code = initMemDataIterator(*pBlockScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = initMemDataIterator(*pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
code = initDelSkylineIterator(*pBlockScanInfo, pReader->info.order, &pReader->cost);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = buildDataBlockFromBuf(pReader, *pBlockScanInfo, endKey);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->resBlockInfo.pResBlock->info.rows > 0) {
- return TSDB_CODE_SUCCESS;
+ break;
}
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTableForPreFileSetMem(pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader, int64_t endKey) {
- SReaderStatus* pStatus = &pReader->status;
- STableUidList* pUidList = &pStatus->uidList;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ STableUidList* pUidList = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pUidList = &pStatus->uidList;
tsdbDebug("seq load data blocks from cache, %s", pReader->idStr);
while (1) {
if (pReader->code != TSDB_CODE_SUCCESS) {
tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", tstrerror(pReader->code), pReader->idStr);
- return pReader->code;
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
STableBlockScanInfo** pBlockScanInfo = pStatus->pTableIter;
if (pBlockScanInfo == NULL || *pBlockScanInfo == NULL) {
- return TSDB_CODE_SUCCESS;
+ break;
}
if (pReader->pIgnoreTables &&
taosHashGet(*pReader->pIgnoreTables, &(*pBlockScanInfo)->uid, sizeof((*pBlockScanInfo)->uid))) {
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
continue;
}
- int32_t code = initMemDataIterator(*pBlockScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = initMemDataIterator(*pBlockScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
code = initDelSkylineIterator(*pBlockScanInfo, pReader->info.order, &pReader->cost);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = buildDataBlockFromBuf(pReader, *pBlockScanInfo, endKey);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->resBlockInfo.pResBlock->info.rows > 0) {
- return TSDB_CODE_SUCCESS;
+ break;
}
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
- return TSDB_CODE_SUCCESS;
+ break;
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// set the correct start position in case of the first/last file block, according to the query time window
-static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
+static int32_t initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SFileDataBlockInfo* pBlockInfo = NULL;
- SReaderStatus* pStatus = &pReader->status;
- SFileBlockDumpInfo* pDumpInfo = &pStatus->fBlockDumpInfo;
+ SReaderStatus* pStatus = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
- int32_t code = getCurrentBlockInfo(pBlockIter, &pBlockInfo, pReader->idStr);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pDumpInfo = &pStatus->fBlockDumpInfo;
+
+ code = getCurrentBlockInfo(pBlockIter, &pBlockInfo, pReader->idStr);
if (code == TSDB_CODE_SUCCESS) {
pDumpInfo->totalRows = pBlockInfo->numRow;
pDumpInfo->rowIndex = ASCENDING_TRAVERSE(pReader->info.order) ? 0 : pBlockInfo->numRow - 1;
} else {
pDumpInfo->totalRows = 0;
pDumpInfo->rowIndex = 0;
+ code = TSDB_CODE_SUCCESS;
}
pDumpInfo->allDumped = false;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
SBlockNumber num = {0};
- SArray* pTableList = taosArrayInit(40, POINTER_BYTES);
- if (pTableList == NULL) {
- return terrno;
- }
+ SArray* pTableList = NULL;
- int32_t code = moveToNextFile(pReader, &num, pTableList);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pTableList);
- return code;
- }
+ pTableList = taosArrayInit(40, POINTER_BYTES);
+ TSDB_CHECK_NULL(pTableList, code, lino, _end, terrno);
+
+ code = moveToNextFile(pReader, &num, pTableList);
+ TSDB_CHECK_CODE(code, lino, _end);
// all data files are consumed, try data in buffer
if (num.numOfBlocks + num.numOfSttFiles == 0) {
pReader->status.loadFromFile = false;
- taosArrayDestroy(pTableList);
- return code;
+ goto _end;
}
// initialize the block iterator for a new fileset
if (num.numOfBlocks > 0) {
code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks, pTableList);
+ TSDB_CHECK_CODE(code, lino, _end);
} else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), pReader->idStr);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = resetTableListIndex(&pReader->status, pReader->idStr);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
- if (code == TSDB_CODE_SUCCESS) { // set the correct start position according to the query time window
- initBlockDumpInfo(pReader, pBlockIter);
- }
+ code = initBlockDumpInfo(pReader, pBlockIter);
+ TSDB_CHECK_CODE(code, lino, _end);
- taosArrayDestroy(pTableList);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (pTableList) {
+ taosArrayDestroy(pTableList);
+ }
return code;
}
@@ -3602,8 +4237,15 @@ typedef enum {
static int32_t doReadDataFromSttFiles(STsdbReader* pReader, ERetrieveType* pReturnType) {
int32_t code = TSDB_CODE_SUCCESS;
- SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
- SDataBlockIter* pBlockIter = &pReader->status.blockIter;
+ int32_t lino = 0;
+ SSDataBlock* pResBlock = NULL;
+ SDataBlockIter* pBlockIter = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReturnType, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pResBlock = pReader->resBlockInfo.pResBlock;
+ pBlockIter = &pReader->status.blockIter;
*pReturnType = TSDB_READ_RETURN;
@@ -3611,89 +4253,96 @@ static int32_t doReadDataFromSttFiles(STsdbReader* pReader, ERetrieveType* pRetu
while (1) {
code = doLoadSttBlockSequentially(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- *pReturnType = TSDB_READ_RETURN;
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pResBlock->info.rows > 0) {
- *pReturnType = TSDB_READ_RETURN;
- return code;
+ goto _end;
}
// all data blocks are checked in this stt file, now let's try the next file set
- if (pReader->status.pTableIter != NULL) {
- code = TSDB_CODE_INTERNAL_ERROR;
- tsdbError("tsdb reader failed at: %s:%d, code:%s", __func__, __LINE__, tstrerror(code));
- return code;
- }
+ TSDB_CHECK_CONDITION(pReader->status.pTableIter == NULL, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
code = initForFirstBlockInFile(pReader, pBlockIter);
+ TSDB_CHECK_CODE(code, lino, _end);
- // error happens or all the data files are completely checked
- if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
- *pReturnType = TSDB_READ_RETURN;
- return code;
+ // all the data files are completely checked
+ if (pReader->status.loadFromFile == false) {
+ goto _end;
}
if (pReader->status.bProcMemPreFileset) {
code = buildFromPreFilesetBuffer(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pResBlock->info.rows > 0) {
pReader->status.processingMemPreFileSet = true;
- *pReturnType = TSDB_READ_RETURN;
- return code;
+ goto _end;
}
}
if (pBlockIter->numOfBlocks > 0) { // there are data blocks existed.
*pReturnType = TSDB_READ_CONTINUE;
- return code;
+ goto _end;
} else { // all blocks in data file are checked, let's check the data in stt-files
code = resetTableListIndex(&pReader->status, pReader->idStr);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t buildBlockFromFiles(STsdbReader* pReader) {
- int32_t code = TSDB_CODE_SUCCESS;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SDataBlockIter* pBlockIter = NULL;
+ SSDataBlock* pResBlock = NULL;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SBlockData* pBlockData = NULL;
+ const char* id = NULL;
+ bool asc = false;
- SDataBlockIter* pBlockIter = &pReader->status.blockIter;
- SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
- const char* id = pReader->idStr;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+
+ pBlockIter = &pReader->status.blockIter;
+ pResBlock = pReader->resBlockInfo.pResBlock;
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pBlockData = &pReader->status.fileBlockData;
+ id = pReader->idStr;
if (pBlockIter->numOfBlocks == 0) {
// let's try to extract data from stt files.
ERetrieveType type = 0;
code = doReadDataFromSttFiles(pReader, &type);
- if (code != 0 || type == TSDB_READ_RETURN) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (type == TSDB_READ_RETURN) {
+ goto _end;
}
code = doBuildDataBlock(pReader);
- if (code != TSDB_CODE_SUCCESS || pResBlock->info.rows > 0) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (pResBlock->info.rows > 0) {
+ goto _end;
}
}
while (1) {
if (fileBlockPartiallyRead(pDumpInfo, asc)) { // file data block is partially loaded
code = buildComposedDataBlock(pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
// current block are exhausted, try the next file block
if (pDumpInfo->allDumped) {
// try next data block in current file
- bool hasNext = blockIteratorNext(&pReader->status.blockIter, pReader->idStr);
+ bool hasNext = blockIteratorNext(&pReader->status.blockIter);
if (hasNext) { // check for the next block in the block accessed order list
- initBlockDumpInfo(pReader, pBlockIter);
+ code = initBlockDumpInfo(pReader, pBlockIter);
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
// all data blocks in files are checked, let's check the data in last files.
// data blocks in current file are exhausted, let's try the next file now
@@ -3703,30 +4352,34 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
tBlockDataReset(pBlockData);
code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), id);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = resetTableListIndex(&pReader->status, id);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
ERetrieveType type = 0;
code = doReadDataFromSttFiles(pReader, &type);
- if (code != 0 || type == TSDB_READ_RETURN) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (type == TSDB_READ_RETURN) {
+ break;
}
}
}
code = doBuildDataBlock(pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
}
- if (code != TSDB_CODE_SUCCESS || pResBlock->info.rows > 0) {
- return code;
+ if (pResBlock->info.rows > 0) {
+ break;
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void getTsdbByRetentions(SVnode* pVnode, SQueryTableDataCond* pCond, SRetention* retentions, const char* idStr,
@@ -3768,7 +4421,7 @@ static void getTsdbByRetentions(SVnode* pVnode, SQueryTableDataCond* pCond, SRet
} else if (level == TSDB_RETENTION_L1) {
*pLevel = TSDB_RETENTION_L1;
tsdbDebug("vgId:%d, rsma level %d is selected to query %s", TD_VID(pVnode), TSDB_RETENTION_L1, str);
- *pTsdb = VND_RSMA1(pVnode);
+ *pTsdb = VND_RSMA1(pVnode);
return;
} else {
*pLevel = TSDB_RETENTION_L2;
@@ -3795,86 +4448,100 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_
return (SVersionRange){.minVer = startVer, .maxVer = endVer};
}
-static int32_t reverseSearchStartPos(const SArray* pDelList, int32_t index, int64_t key, bool asc) {
- size_t num = taosArrayGetSize(pDelList);
- int32_t start = index;
+static int32_t reverseSearchStartPos(const SArray* pDelList, int32_t index, int64_t key, bool asc, int32_t* start) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ size_t num = 0;
+
+ num = taosArrayGetSize(pDelList);
+ *start = index;
if (asc) {
- if (start >= num - 1) {
- start = num - 1;
+ if (*start >= num - 1) {
+ *start = num - 1;
}
- TSDBKEY* p = taosArrayGet(pDelList, start);
- if (p == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDBKEY* p = taosArrayGet(pDelList, *start);
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
- while (p->ts >= key && start > 0) {
- start -= 1;
+ while (p->ts >= key && *start > 0) {
+ *start -= 1;
}
} else {
if (index <= 0) {
- start = 0;
+ *start = 0;
}
- TSDBKEY* p = taosArrayGet(pDelList, start);
- if (p == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDBKEY* p = taosArrayGet(pDelList, *start);
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
- while (p->ts <= key && start < num - 1) {
- start += 1;
+ while (p->ts <= key && *start < num - 1) {
+ *start += 1;
}
}
- return start;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
-bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order,
- SVersionRange* pVerRange, bool hasPk) {
+int32_t hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order,
+ SVersionRange* pVerRange, bool hasPk, bool* dropped) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ size_t num = 0;
+ int32_t step = 0;
+ bool asc = false;
+
+ *dropped = false;
+
if (pDelList == NULL || (TARRAY_SIZE(pDelList) == 0)) {
- return false;
+ goto _end;
}
- size_t num = taosArrayGetSize(pDelList);
- bool asc = ASCENDING_TRAVERSE(order);
- int32_t step = asc ? 1 : -1;
+ num = taosArrayGetSize(pDelList);
+ asc = ASCENDING_TRAVERSE(order);
+ step = asc ? 1 : -1;
if (hasPk) { // handle the case where duplicated timestamps existed.
- *index = reverseSearchStartPos(pDelList, *index, key, asc);
+ code = reverseSearchStartPos(pDelList, *index, key, asc, index);
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (asc) {
if (*index >= num - 1) {
TSDBKEY* last = taosArrayGetLast(pDelList);
if (last == NULL) {
- return false;
+ goto _end;
}
if (key > last->ts) {
- return false;
+ goto _end;
} else if (key == last->ts) {
TSDBKEY* prev = taosArrayGet(pDelList, num - 2);
if (prev == NULL) {
- return false;
+ goto _end;
}
- return (prev->version >= ver && prev->version <= pVerRange->maxVer && prev->version >= pVerRange->minVer);
+ *dropped = (prev->version >= ver && prev->version <= pVerRange->maxVer && prev->version >= pVerRange->minVer);
}
} else {
TSDBKEY* pCurrent = taosArrayGet(pDelList, *index);
TSDBKEY* pNext = taosArrayGet(pDelList, (*index) + 1);
if (pCurrent == NULL || pNext == NULL) {
- return false;
+ goto _end;
}
if (key < pCurrent->ts) {
- return false;
+ goto _end;
}
if (pCurrent->ts <= key && pNext->ts >= key && pCurrent->version >= ver &&
pVerRange->maxVer >= pCurrent->version) {
- return true;
+ *dropped = true;
+ goto _end;
}
while (pNext->ts <= key && (*index) < num - 1) {
@@ -3884,7 +4551,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t
pCurrent = taosArrayGet(pDelList, *index);
pNext = taosArrayGet(pDelList, (*index) + 1);
if (pCurrent == NULL || pNext == NULL) {
- return false;
+ break;
}
// it is not a consecutive deletion range, ignore it
@@ -3894,24 +4561,23 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t
if (pCurrent->ts <= key && pNext->ts >= key && pCurrent->version >= ver &&
pVerRange->maxVer >= pCurrent->version) {
- return true;
+ *dropped = true;
+ break;
}
}
}
-
- return false;
}
} else {
if (*index <= 0) {
TSDBKEY* pFirst = taosArrayGet(pDelList, 0);
if (pFirst == NULL) {
- return false;
+ goto _end;
}
if (key < pFirst->ts) {
- return false;
+ goto _end;
} else if (key == pFirst->ts) {
- return pFirst->version >= ver;
+ *dropped = pFirst->version >= ver;
} else {
tsdbError("unexpected error, key:%" PRId64 ", first:%" PRId64, key, pFirst->ts);
}
@@ -3919,15 +4585,16 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t
TSDBKEY* pCurrent = taosArrayGet(pDelList, *index);
TSDBKEY* pPrev = taosArrayGet(pDelList, (*index) - 1);
if (pCurrent == NULL || pPrev == NULL) {
- return false;
+ goto _end;
}
if (key > pCurrent->ts) {
- return false;
+ goto _end;
}
if (pPrev->ts <= key && pCurrent->ts >= key && pPrev->version >= ver) {
- return true;
+ *dropped = true;
+ goto _end;
}
while (pPrev->ts >= key && (*index) > 1) {
@@ -3937,7 +4604,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t
pCurrent = taosArrayGet(pDelList, *index);
pPrev = taosArrayGet(pDelList, (*index) - 1);
if (pCurrent == NULL || pPrev == NULL) {
- return false;
+ break;
}
// it is not a consecutive deletion range, ignore it
@@ -3946,46 +4613,60 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t
}
if (pPrev->ts <= key && pCurrent->ts >= key && pPrev->version >= ver) {
- return true;
+ *dropped = true;
+ break;
}
}
}
-
- return false;
}
}
- return false;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
-FORCE_INLINE void getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader, TSDBROW** pRes) {
+FORCE_INLINE int32_t getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader, TSDBROW** pRes) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t order = 0;
+ TSDBROW* pRow = NULL;
+ TSDBKEY key;
+
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRes, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*pRes = NULL;
if (!pIter->hasVal) {
- return;
+ goto _end;
}
- int32_t order = pReader->info.order;
- TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ order = pReader->info.order;
+ pRow = tsdbTbDataIterGet(pIter->iter);
- TSDBKEY key;
TSDBROW_INIT_KEY(pRow, key);
if (outOfTimeWindow(key.ts, &pReader->info.window)) {
pIter->hasVal = false;
- return;
+ goto _end;
}
// it is a valid data version
if (key.version <= pReader->info.verRange.maxVer && key.version >= pReader->info.verRange.minVer) {
if (pDelList == NULL || TARRAY_SIZE(pDelList) == 0) {
*pRes = pRow;
- return;
+ goto _end;
} else {
- bool dropped = hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, order, &pReader->info.verRange,
- pReader->suppInfo.numOfPks > 0);
+ bool dropped = false;
+ code = hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, order, &pReader->info.verRange,
+ pReader->suppInfo.numOfPks > 0, &dropped);
+ TSDB_CHECK_CODE(code, lino, _end);
if (!dropped) {
*pRes = pRow;
- return;
+ goto _end;
}
}
}
@@ -3993,7 +4674,7 @@ FORCE_INLINE void getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdb
while (1) {
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
if (!pIter->hasVal) {
- return;
+ goto _end;
}
pRow = tsdbTbDataIterGet(pIter->iter);
@@ -4001,28 +4682,42 @@ FORCE_INLINE void getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdb
TSDBROW_INIT_KEY(pRow, key);
if (outOfTimeWindow(key.ts, &pReader->info.window)) {
pIter->hasVal = false;
- return;
+ goto _end;
}
if (key.version <= pReader->info.verRange.maxVer && key.version >= pReader->info.verRange.minVer) {
if (pDelList == NULL || TARRAY_SIZE(pDelList) == 0) {
*pRes = pRow;
- return;
+ goto _end;
} else {
- bool dropped = hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, order, &pReader->info.verRange,
- pReader->suppInfo.numOfPks > 0);
+ bool dropped = false;
+ code = hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, order, &pReader->info.verRange,
+ pReader->suppInfo.numOfPks > 0, &dropped);
+ TSDB_CHECK_CODE(code, lino, _end);
if (!dropped) {
*pRes = pRow;
- return;
+ goto _end;
}
}
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, SRowKey* pCurKey, SArray* pDelList, STsdbReader* pReader) {
- SRowMerger* pMerger = &pReader->status.merger;
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowMerger* pMerger = NULL;
+
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
while (1) {
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
@@ -4032,7 +4727,8 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, SRowKey* pCurKey, SArra
// data exists but not valid
TSDBROW* pRow = NULL;
- getValidMemRow(pIter, pDelList, pReader, &pRow);
+ code = getValidMemRow(pIter, pDelList, pReader, &pRow);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pRow == NULL) {
break;
}
@@ -4053,23 +4749,28 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, SRowKey* pCurKey, SArra
STSchema* pTSchema = NULL;
if (pRow->type == TSDBROW_ROW_FMT) {
pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, uid);
- if (pTSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pTSchema, code, lino, _end, terrno);
}
code = tsdbRowMergerAdd(pMerger, pRow, pTSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t doMergeRowsInFileBlockImpl(SBlockData* pBlockData, int32_t* rowIndex, SRowKey* pKey, SRowMerger* pMerger,
SVersionRange* pVerRange, int32_t step) {
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(rowIndex, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
while ((*rowIndex) < pBlockData->nRow && (*rowIndex) >= 0) {
SRowKey cur;
tColRowGetKey(pBlockData, (*rowIndex), &cur);
@@ -4085,11 +4786,14 @@ static int32_t doMergeRowsInFileBlockImpl(SBlockData* pBlockData, int32_t* rowIn
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, (*rowIndex));
code = tsdbRowMergerAdd(pMerger, &fRow, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
(*rowIndex) += step;
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -4101,92 +4805,120 @@ typedef enum {
static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanInfo* pScanInfo,
SFileDataBlockInfo* pFBlock, SRowMerger* pMerger, SRowKey* pKey,
CHECK_FILEBLOCK_STATE* state) {
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- SVersionRange* pVerRange = &pReader->info.verRange;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SBlockData* pBlockData = NULL;
+ bool asc = false;
+ SVersionRange* pVerRange = NULL;
bool loadNeighbor = true;
- int32_t step = ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1;
+ int32_t step = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(state, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pBlockData = &pReader->status.fileBlockData;
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ pVerRange = &pReader->info.verRange;
+ step = ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1;
- int32_t code = loadNeighborIfOverlap(pFBlock, pScanInfo, pReader, &loadNeighbor);
*state = CHECK_FILEBLOCK_QUIT;
+ code = loadNeighborIfOverlap(pFBlock, pScanInfo, pReader, &loadNeighbor);
+ TSDB_CHECK_CODE(code, lino, _end);
- if (loadNeighbor && (code == TSDB_CODE_SUCCESS)) {
+ if (loadNeighbor) {
code = doMergeRowsInFileBlockImpl(pBlockData, &pDumpInfo->rowIndex, pKey, pMerger, pVerRange, step);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if ((pDumpInfo->rowIndex >= pDumpInfo->totalRows && asc) || (pDumpInfo->rowIndex < 0 && !asc)) {
*state = CHECK_FILEBLOCK_CONT;
}
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, SRowKey* pKey,
STsdbReader* pReader) {
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SRowMerger* pMerger = &pReader->status.merger;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- int32_t step = asc ? 1 : -1;
- SVersionRange* pRange = &pReader->info.verRange;
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SFileBlockDumpInfo* pDumpInfo = NULL;
+ SRowMerger* pMerger = NULL;
+ bool asc = false;
+ int32_t step = 0;
+ SVersionRange* pRange = NULL;
+
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pDumpInfo = &pReader->status.fBlockDumpInfo;
+ pMerger = &pReader->status.merger;
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ step = asc ? 1 : -1;
+ pRange = &pReader->info.verRange;
pDumpInfo->rowIndex += step;
if ((pDumpInfo->rowIndex <= pBlockData->nRow - 1 && asc) || (pDumpInfo->rowIndex >= 0 && !asc)) {
code = doMergeRowsInFileBlockImpl(pBlockData, &pDumpInfo->rowIndex, pKey, pMerger, pRange, step);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
// all rows are consumed, let's try next file block
if ((pDumpInfo->rowIndex >= pBlockData->nRow && asc) || (pDumpInfo->rowIndex < 0 && !asc)) {
while (1) {
- CHECK_FILEBLOCK_STATE st;
-
SFileDataBlockInfo* pFileBlockInfo = NULL;
code = getCurrentBlockInfo(&pReader->status.blockIter, &pFileBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pFileBlockInfo == NULL) {
- st = CHECK_FILEBLOCK_QUIT;
break;
}
+ CHECK_FILEBLOCK_STATE st = CHECK_FILEBLOCK_QUIT;
code = checkForNeighborFileBlock(pReader, pScanInfo, pFileBlockInfo, pMerger, pKey, &st);
- if (st == CHECK_FILEBLOCK_QUIT || code != TSDB_CODE_SUCCESS) {
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (st == CHECK_FILEBLOCK_QUIT) {
break;
}
}
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, SRowMerger* pMerger,
int32_t pkSrcSlot, SVersionRange* pVerRange, const char* idStr) {
- SRowKey* pRowKey = &pScanInfo->lastProcKey;
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowKey* pRowKey = NULL;
+ SRowKey* pNextKey = NULL;
+
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pRowKey = &pScanInfo->lastProcKey;
while (1) {
code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pkSrcSlot, pVerRange);
- if (code || (!hasDataInSttBlock(pScanInfo))) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (!hasDataInSttBlock(pScanInfo)) {
+ break;
}
- SRowKey* pNextKey = getCurrentKeyInSttBlock(pSttBlockReader);
+ pNextKey = getCurrentKeyInSttBlock(pSttBlockReader);
int32_t ret = pkCompEx(pRowKey, pNextKey);
if (ret == 0) {
TSDBROW* pRow1 = tMergeTreeGetRow(&pSttBlockReader->mergeTree);
code = tsdbRowMergerAdd(pMerger, pRow1, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- break;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
tsdbTrace("uid:%" PRIu64 " last del index:%d, del range:%d, lastKeyInStt:%" PRId64 ", %s", pScanInfo->uid,
pScanInfo->sttBlockDelIndex, (int32_t)taosArrayGetSize(pScanInfo->delSkyline),
@@ -4195,14 +4927,31 @@ int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanI
}
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t doMergeMemTableMultiRows(TSDBROW* pRow, SRowKey* pKey, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
TSDBROW* pResRow, STsdbReader* pReader, bool* freeTSRow) {
- SRowMerger* pMerger = &pReader->status.merger;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowMerger* pMerger = NULL;
TSDBROW* pNextRow = NULL;
- TSDBROW current = *pRow;
+ STSchema* pTSchema = NULL;
+ STSchema* pTSchema1 = NULL;
+ TSDBROW current;
+
+ TSDB_CHECK_NULL(pRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pResRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(freeTSRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
+ current = *pRow;
{ // if the timestamp of the next valid row has a different ts, return current row directly
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
@@ -4210,19 +4959,20 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, SRowKey* pKey, uint64_t uid, SIt
if (!pIter->hasVal) {
*pResRow = *pRow;
*freeTSRow = false;
- return TSDB_CODE_SUCCESS;
+ goto _end;
} else { // has next point in mem/imem
- getValidMemRow(pIter, pDelList, pReader, &pNextRow);
+ code = getValidMemRow(pIter, pDelList, pReader, &pNextRow);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pNextRow == NULL) {
*pResRow = current;
*freeTSRow = false;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
if (TSDBROW_TS(¤t) != TSDBROW_TS(pNextRow)) {
*pResRow = current;
*freeTSRow = false;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
if (pKey->numOfPKs > 0) {
@@ -4231,145 +4981,140 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, SRowKey* pKey, uint64_t uid, SIt
if (pkCompEx(pKey, &nextRowKey) != 0) {
*pResRow = current;
*freeTSRow = false;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
}
}
}
- terrno = 0;
- int32_t code = 0;
-
// start to merge duplicated rows
- STSchema* pTSchema = NULL;
if (current.type == TSDBROW_ROW_FMT) { // get the correct schema for row-wise data in memory
pTSchema = doGetSchemaForTSRow(current.pTSRow->sver, pReader, uid);
- if (pTSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pTSchema, code, lino, _end, terrno);
}
code = tsdbRowMergerAdd(pMerger, ¤t, pTSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- STSchema* pTSchema1 = NULL;
if (pNextRow->type == TSDBROW_ROW_FMT) { // get the correct schema for row-wise data in memory
pTSchema1 = doGetSchemaForTSRow(pNextRow->pTSRow->sver, pReader, uid);
- if (pTSchema1 == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pTSchema1, code, lino, _end, terrno);
}
code = tsdbRowMergerAdd(pMerger, pNextRow, pTSchema1);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(pIter, uid, pKey, pDelList, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = tsdbRowMergerGetRow(pMerger, &pResRow->pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pResRow->type = TSDBROW_ROW_FMT;
tsdbRowMergerClear(pMerger);
*freeTSRow = true;
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t doMergeMemIMemRows(TSDBROW* pRow, SRowKey* pRowKey, TSDBROW* piRow, SRowKey* piRowKey,
STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, SRow** pTSRow) {
- SRowMerger* pMerger = &pReader->status.merger;
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SRowMerger* pMerger = NULL;
+ STSchema* pSchema = NULL;
+ STSchema* piSchema = NULL;
+
+ TSDB_CHECK_NULL(pRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(piRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pMerger = &pReader->status.merger;
- STSchema* pSchema = NULL;
if (pRow->type == TSDBROW_ROW_FMT) {
pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
- if (pSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, terrno);
}
- STSchema* piSchema = NULL;
if (piRow->type == TSDBROW_ROW_FMT) {
piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
- if (piSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(piSchema, code, lino, _end, terrno);
}
if (ASCENDING_TRAVERSE(pReader->info.order)) { // ascending order imem --> mem
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, piRowKey, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, pRowKey, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, pRowKey, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, piRowKey, pBlockScanInfo->delSkyline, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
tRowKeyAssign(&pBlockScanInfo->lastProcKey, pRowKey);
code = tsdbRowMergerGetRow(pMerger, pTSRow);
tsdbRowMergerClear(pMerger);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, TSDBROW* pResRow,
int64_t endKey, bool* freeTSRow) {
- TSDBROW* pRow = NULL;
- TSDBROW* piRow = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ TSDBROW* pRow = NULL;
+ TSDBROW* piRow = NULL;
+ SArray* pDelList = NULL;
+ uint64_t uid = 0;
+ SIterInfo* piter = NULL;
+ SIterInfo* piiter = NULL;
+ SRowKey rowKey = {0};
+ SRowKey irowKey = {0};
+ bool asc = false;
- getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader, &pRow);
- getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader, &piRow);
+ TSDB_CHECK_NULL(pBlockScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pResRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(freeTSRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
- SArray* pDelList = pBlockScanInfo->delSkyline;
- uint64_t uid = pBlockScanInfo->uid;
- SIterInfo* piter = &pBlockScanInfo->iter;
- SIterInfo* piiter = &pBlockScanInfo->iiter;
- SRowKey rowKey = {0}, irowKey = {0};
+ code = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader, &pRow);
+ TSDB_CHECK_CODE(code, lino, _end);
+ code = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader, &piRow);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ pDelList = pBlockScanInfo->delSkyline;
+ uid = pBlockScanInfo->uid;
+ piter = &pBlockScanInfo->iter;
+ piiter = &pBlockScanInfo->iiter;
// todo refactor
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
if (piter->hasVal) {
tRowGetKeyEx(pRow, &rowKey);
if ((rowKey.ts >= endKey && asc) || (rowKey.ts <= endKey && !asc)) {
@@ -4385,71 +5130,76 @@ static int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbRea
}
if (pRow != NULL && piRow != NULL) {
- int32_t code = TSDB_CODE_SUCCESS;
if (rowKey.numOfPKs == 0) {
if ((rowKey.ts > irowKey.ts && asc) || (rowKey.ts < irowKey.ts && (!asc))) { // ik.ts < k.ts
code = doMergeMemTableMultiRows(piRow, &irowKey, uid, piiter, pDelList, pResRow, pReader, freeTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if ((rowKey.ts < irowKey.ts && asc) || (rowKey.ts > irowKey.ts && (!asc))) {
code = doMergeMemTableMultiRows(pRow, &rowKey, uid, piter, pDelList, pResRow, pReader, freeTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
} else { // ik.ts == k.ts
- *freeTSRow = true;
pResRow->type = TSDBROW_ROW_FMT;
code = doMergeMemIMemRows(pRow, &rowKey, piRow, &irowKey, pBlockScanInfo, pReader, &pResRow->pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ *freeTSRow = true;
}
} else {
int32_t ret = pkCompEx(&rowKey, &irowKey);
if (ret != 0) {
if ((ret > 0 && asc) || (ret < 0 && (!asc))) { // ik.ts < k.ts
code = doMergeMemTableMultiRows(piRow, &irowKey, uid, piiter, pDelList, pResRow, pReader, freeTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if ((ret < 0 && asc) || (ret > 0 && (!asc))) {
code = doMergeMemTableMultiRows(pRow, &rowKey, uid, piter, pDelList, pResRow, pReader, freeTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else { // ik.ts == k.ts
- *freeTSRow = true;
pResRow->type = TSDBROW_ROW_FMT;
code = doMergeMemIMemRows(pRow, &rowKey, piRow, &irowKey, pBlockScanInfo, pReader, &pResRow->pTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ *freeTSRow = true;
}
}
-
- return code;
+ } else if (piter->hasVal && pRow != NULL) {
+ code = doMergeMemTableMultiRows(pRow, &rowKey, uid, piter, pDelList, pResRow, pReader, freeTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
+ } else if (piiter->hasVal && piRow != NULL) {
+ code = doMergeMemTableMultiRows(piRow, &irowKey, uid, piiter, pDelList, pResRow, pReader, freeTSRow);
+ TSDB_CHECK_CODE(code, lino, _end);
}
- if (piter->hasVal && pRow != NULL) {
- return doMergeMemTableMultiRows(pRow, &rowKey, uid, piter, pDelList, pResRow, pReader, freeTSRow);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
-
- if (piiter->hasVal && piRow != NULL) {
- return doMergeMemTableMultiRows(piRow, &irowKey, uid, piiter, pDelList, pResRow, pReader, freeTSRow);
- }
-
- return TSDB_CODE_SUCCESS;
+ return code;
}
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pTSRow, STableBlockScanInfo* pScanInfo) {
- int32_t outputRowIndex = pBlock->info.rows;
- int64_t uid = pScanInfo->uid;
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t outputRowIndex = 0;
+ int64_t uid = 0;
+ SBlockLoadSuppInfo* pSupInfo = NULL;
+ STSchema* pSchema = NULL;
+ SColVal colVal = {0};
+ int32_t i = 0, j = 0;
- SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
- if (pSchema == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pTSRow, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
- SColVal colVal = {0};
- int32_t i = 0, j = 0;
+ outputRowIndex = pBlock->info.rows;
+ uid = pScanInfo->uid;
+
+ pSupInfo = &pReader->suppInfo;
+ pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
+ TSDB_CHECK_NULL(pSchema, code, lino, _end, terrno);
if (pSupInfo->colId[i] == PRIMARYKEY_TIMESTAMP_COL_ID) {
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
((int64_t*)pColData->pData)[outputRowIndex] = pTSRow->ts;
i += 1;
@@ -4460,26 +5210,18 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pT
if (colId == pSchema->columns[j].colId) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColInfoData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
code = tRowGet(pTSRow, pSchema, j, &colVal);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doCopyColVal(pColInfoData, outputRowIndex, i, &colVal, pSupInfo);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
i += 1;
j += 1;
} else if (colId < pSchema->columns[j].colId) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColInfoData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
colDataSetNULL(pColInfoData, outputRowIndex);
i += 1;
@@ -4491,9 +5233,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pT
// set null value since current column does not exist in the "pSchema"
while (i < pSupInfo->numOfCols) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pColInfoData == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pColInfoData, code, lino, _end, TSDB_CODE_INVALID_PARA);
colDataSetNULL(pColInfoData, outputRowIndex);
i += 1;
@@ -4501,22 +5241,37 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pT
pBlock->info.dataLoad = 1;
pBlock->info.rows += 1;
- return TSDB_CODE_SUCCESS;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
int32_t rowIndex) {
- int32_t i = 0, j = 0;
- int32_t outputRowIndex = pResBlock->info.rows;
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t i = 0, j = 0;
+ int32_t outputRowIndex = 0;
+ SBlockLoadSuppInfo* pSupInfo = NULL;
+ SColVal cv = {0};
+ int32_t numOfInputCols = 0;
+ int32_t numOfOutputCols = 0;
- SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
+ TSDB_CHECK_NULL(pResBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ outputRowIndex = pResBlock->info.rows;
+
+ pSupInfo = &pReader->suppInfo;
((int64_t*)pReader->status.pPrimaryTsCol->pData)[outputRowIndex] = pBlockData->aTSKEY[rowIndex];
i += 1;
- SColVal cv = {0};
- int32_t numOfInputCols = pBlockData->nColData;
- int32_t numOfOutputCols = pSupInfo->numOfCols;
+ numOfInputCols = pBlockData->nColData;
+ numOfOutputCols = pSupInfo->numOfCols;
while (i < numOfOutputCols && j < numOfInputCols) {
SColData* pData = tBlockDataGetColDataByIdx(pBlockData, j);
@@ -4529,9 +5284,7 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
if (pData->cid == pSupInfo->colId[i]) {
tColDataGetValue(pData, rowIndex, &cv);
code = doCopyColVal(pCol, outputRowIndex, i, &cv, pSupInfo);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
j += 1;
} else if (pData->cid > pCol->info.colId) {
// the specified column does not exist in file block, fill with null data
@@ -4543,9 +5296,7 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
while (i < numOfOutputCols) {
SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, pSupInfo->slotId[i]);
- if (pCol == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pCol, code, lino, _end, TSDB_CODE_INVALID_PARA);
colDataSetNULL(pCol, outputRowIndex);
i += 1;
@@ -4553,21 +5304,29 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
pResBlock->info.dataLoad = 1;
pResBlock->info.rows += 1;
- return TSDB_CODE_SUCCESS;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
STsdbReader* pReader) {
- SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SSDataBlock* pBlock = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pBlock = pReader->resBlockInfo.pResBlock;
do {
TSDBROW row = {.type = -1};
bool freeTSRow = false;
code = tsdbGetNextRowInMem(pBlockScanInfo, pReader, &row, endKey, &freeTSRow);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (row.type == -1) {
break;
@@ -4575,26 +5334,30 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
if (row.type == TSDBROW_ROW_FMT) {
code = doAppendRowFromTSRow(pBlock, pReader, row.pTSRow, pBlockScanInfo);
- if (code == TSDB_CODE_SUCCESS) {
- pBlockScanInfo->lastProcKey.ts = row.pTSRow->ts;
- pBlockScanInfo->lastProcKey.numOfPKs = row.pTSRow->numOfPKs;
- if (row.pTSRow->numOfPKs > 0) {
- tRowGetPrimaryKeyDeepCopy(row.pTSRow, &pBlockScanInfo->lastProcKey);
+ if (code != TSDB_CODE_SUCCESS) {
+ if (freeTSRow) {
+ taosMemoryFreeClear(row.pTSRow);
+ }
+ TSDB_CHECK_CODE(code, lino, _end);
+ }
+ pBlockScanInfo->lastProcKey.ts = row.pTSRow->ts;
+ pBlockScanInfo->lastProcKey.numOfPKs = row.pTSRow->numOfPKs;
+ if (row.pTSRow->numOfPKs > 0) {
+ code = tRowGetPrimaryKeyDeepCopy(row.pTSRow, &pBlockScanInfo->lastProcKey);
+ if (code != TSDB_CODE_SUCCESS) {
+ if (freeTSRow) {
+ taosMemoryFreeClear(row.pTSRow);
+ }
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
if (freeTSRow) {
- taosMemoryFree(row.pTSRow);
- }
-
- if (code) {
- return code;
+ taosMemoryFreeClear(row.pTSRow);
}
} else {
code = doAppendRowFromFileBlock(pBlock, pReader, row.pBlockData, row.iRow);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
tColRowGetKeyDeepCopy(row.pBlockData, row.iRow, pReader->suppInfo.pkSrcSlot, &pBlockScanInfo->lastProcKey);
}
@@ -4609,21 +5372,30 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
}
} while (1);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
// TODO refactor: with createDataBlockScanInfo
int32_t tsdbSetTableList2(STsdbReader* pReader, const void* pTableList, int32_t num) {
- int32_t code = TSDB_CODE_SUCCESS;
- int32_t size = tSimpleHashGetSize(pReader->status.pTableMap);
-
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t size = 0;
STableBlockScanInfo** p = NULL;
+ STableUidList* pUidList = NULL;
int32_t iter = 0;
+ bool acquired = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ size = tSimpleHashGetSize(pReader->status.pTableMap);
code = tsdbAcquireReader(pReader);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ acquired = true;
while ((p = tSimpleHashIterate(pReader->status.pTableMap, p, &iter)) != NULL) {
clearBlockScanInfo(*p);
@@ -4633,21 +5405,15 @@ int32_t tsdbSetTableList2(STsdbReader* pReader, const void* pTableList, int32_t
if (size < num) {
code = ensureBlockScanInfoBuf(&pReader->blockInfoBuf, num);
- if (code) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
char* p1 = taosMemoryRealloc(pReader->status.uidList.tableUidList, sizeof(uint64_t) * num);
- if (p1 == NULL) {
- (void) tsdbReleaseReader(pReader);
- return terrno;
- }
+ TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
pReader->status.uidList.tableUidList = (uint64_t*)p1;
}
- STableUidList* pUidList = &pReader->status.uidList;
+ pUidList = &pReader->status.uidList;
pUidList->currentIndex = 0;
STableKeyInfo* pList = (STableKeyInfo*)pTableList;
@@ -4656,28 +5422,34 @@ int32_t tsdbSetTableList2(STsdbReader* pReader, const void* pTableList, int32_t
STableBlockScanInfo* pInfo = NULL;
code = getPosInBlockInfoBuf(&pReader->blockInfoBuf, i, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = initTableBlockScanInfo(pInfo, pList[i].uid, pReader->status.pTableMap, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
- (void) tsdbReleaseReader(pReader);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (acquired) {
+ (void)tsdbReleaseReader(pReader);
+ }
return code;
}
uint64_t tsdbGetReaderMaxVersion2(STsdbReader* pReader) { return pReader->info.verRange.maxVer; }
static int32_t doOpenReaderImpl(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
- SDataBlockIter* pBlockIter = &pStatus->blockIter;
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SDataBlockIter* pBlockIter = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pBlockIter = &pStatus->blockIter;
if (pReader->bFilesetDelimited) {
getMemTableTimeRange(pReader, &pReader->status.memTableMaxKey, &pReader->status.memTableMinKey);
@@ -4685,15 +5457,11 @@ static int32_t doOpenReaderImpl(STsdbReader* pReader) {
}
code = initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->pfSetArray, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = resetDataBlockIterator(&pStatus->blockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo),
pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pStatus->fileIter.numOfFiles == 0) {
pStatus->loadFromFile = false;
@@ -4703,8 +5471,13 @@ static int32_t doOpenReaderImpl(STsdbReader* pReader) {
if (!pStatus->loadFromFile) {
code = resetTableListIndex(pStatus, pReader->idStr);
+ TSDB_CHECK_CODE(code, lino, _end);
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -4741,55 +5514,56 @@ static int32_t setSharedPtr(STsdbReader* pDst, const STsdbReader* pSrc) {
// ====================================== EXPOSED APIs ======================================
int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
SSDataBlock* pResBlock, void** ppReader, const char* idstr, SHashObj** pIgnoreTables) {
- STimeWindow window = pCond->twindows;
- SVnodeCfg* pConf = &(((SVnode*)pVnode)->config);
- int32_t code = 0;
- int32_t lino = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STimeWindow window = {0};
+ SVnodeCfg* pConf = NULL;
+ STsdbReader* pReader = NULL;
+ int32_t capacity = 0;
- int32_t capacity = pConf->tsdbCfg.maxRows;
+ window = pCond->twindows;
+ pConf = &(((SVnode*)pVnode)->config);
+
+ capacity = pConf->tsdbCfg.maxRows;
if (pResBlock != NULL) {
code = blockDataEnsureCapacity(pResBlock, capacity);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
}
code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
// check for query time window
- STsdbReader* pReader = *ppReader;
+ pReader = *ppReader;
if (isEmptyQueryTimeWindow(&pReader->info.window) && pCond->type == TIMEWINDOW_RANGE_CONTAINED) {
tsdbDebug("%p query window not overlaps with the data set, no result returned, %s", pReader, pReader->idStr);
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) {
// update the SQueryTableDataCond to create inner reader
int32_t order = pCond->order;
if (order == TSDB_ORDER_ASC) {
- pCond->twindows.ekey = window.skey - 1;
- pCond->twindows.skey = INT64_MIN;
+ pCond->twindows = pCond->extTwindows[0];
pCond->order = TSDB_ORDER_DESC;
} else {
- pCond->twindows.skey = window.ekey + 1;
- pCond->twindows.ekey = INT64_MAX;
+ pCond->twindows = pCond->extTwindows[1];
pCond->order = TSDB_ORDER_ASC;
}
// here we only need one more row, so the capacity is set to be ONE.
code = tsdbReaderCreate(pVnode, pCond, (void**)&((STsdbReader*)pReader)->innerReader[0], 1, pResBlock, idstr);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
if (order == TSDB_ORDER_ASC) {
- pCond->twindows.skey = window.ekey + 1;
- pCond->twindows.ekey = INT64_MAX;
+ pCond->twindows = pCond->extTwindows[1];
} else {
- pCond->twindows.skey = INT64_MIN;
- pCond->twindows.ekey = window.ekey - 1;
+ pCond->twindows = pCond->extTwindows[0];
}
pCond->order = order;
code = tsdbReaderCreate(pVnode, pCond, (void**)&((STsdbReader*)pReader)->innerReader[1], 1, pResBlock, idstr);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
}
// NOTE: the endVersion in pCond is the data version not schema version, so pCond->endVersion is not correct here.
@@ -4797,14 +5571,14 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi
// we should proceed in case of tmq processing.
if (pCond->suid != 0) {
code = metaGetTbTSchemaMaybeNull(pReader->pTsdb->pVnode->pMeta, pReader->info.suid, -1, 1, &pReader->info.pSchema);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->info.pSchema == NULL) {
tsdbWarn("failed to get table schema, suid:%" PRIu64 ", ver:-1, %s", pReader->info.suid, pReader->idStr);
}
} else if (numOfTables > 0) {
STableKeyInfo* pKey = pTableList;
code = metaGetTbTSchemaMaybeNull(pReader->pTsdb->pVnode->pMeta, pKey->uid, -1, 1, &pReader->info.pSchema);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->info.pSchema == NULL) {
tsdbWarn("failed to get table schema, uid:%" PRIu64 ", ver:-1, %s", pKey->uid, pReader->idStr);
}
@@ -4812,28 +5586,29 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi
if (pReader->info.pSchema != NULL) {
code = tsdbRowMergerInit(&pReader->status.merger, pReader->info.pSchema);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
}
pReader->pSchemaMap = tSimpleHashInit(8, taosFastHash);
if (pReader->pSchemaMap == NULL) {
tsdbError("failed init schema hash for reader %s", pReader->idStr);
- TSDB_CHECK_NULL(pReader->pSchemaMap, code, lino, _err, terrno);
+ TSDB_CHECK_NULL(pReader->pSchemaMap, code, lino, _end, terrno);
}
tSimpleHashSetFreeFp(pReader->pSchemaMap, freeSchemaFunc);
if (pReader->info.pSchema != NULL) {
code = updateBlockSMAInfo(pReader->info.pSchema, &pReader->suppInfo);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
}
STsdbReader* p = (pReader->innerReader[0] != NULL) ? pReader->innerReader[0] : pReader;
- code = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, &pReader->status.uidList, numOfTables, &pReader->status.pTableMap);
- TSDB_CHECK_CODE(code, lino, _err);
+ code = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, &pReader->status.uidList, numOfTables,
+ &pReader->status.pTableMap);
+ TSDB_CHECK_CODE(code, lino, _end);
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
- TSDB_CHECK_NULL(pReader->status.pLDataIterArray, code, lino, _err, terrno);
+ TSDB_CHECK_NULL(pReader->status.pLDataIterArray, code, lino, _end, terrno);
pReader->flag = READER_STATUS_SUSPEND;
pReader->info.execMode = pCond->notLoadData ? READER_EXEC_ROWS : READER_EXEC_DATA;
@@ -4844,12 +5619,12 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi
pReader, numOfTables, pReader->info.window.skey, pReader->info.window.ekey, pReader->info.verRange.minVer,
pReader->info.verRange.maxVer, pReader->idStr);
- return code;
-
-_err:
- tsdbError("failed to create data reader, error at:%d code:%s %s", lino, tstrerror(code), idstr);
- tsdbReaderClose2(*ppReader);
- *ppReader = NULL; // reset the pointer value.
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s, %s", __func__, lino, tstrerror(code), idstr);
+ tsdbReaderClose2(*ppReader);
+ *ppReader = NULL; // reset the pointer value.
+ }
return code;
}
@@ -4898,8 +5673,7 @@ void tsdbReaderClose2(STsdbReader* pReader) {
size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
if (pReader->status.pTableMap != NULL) {
- destroyAllBlockScanInfo(pReader->status.pTableMap);
- pReader->status.pTableMap = NULL;
+ destroyAllBlockScanInfo(&pReader->status.pTableMap);
}
clearBlockScanInfoBuf(&pReader->blockInfoBuf);
@@ -4918,6 +5692,7 @@ void tsdbReaderClose2(STsdbReader* pReader) {
}
destroySttBlockReader(pReader->status.pLDataIterArray, &pCost->sttCost);
+ pReader->status.pLDataIterArray = NULL;
taosMemoryFreeClear(pReader->status.uidList.tableUidList);
tsdbTrace("tsdb/reader-close: %p, untake snapshot", pReader);
@@ -4955,7 +5730,14 @@ void tsdbReaderClose2(STsdbReader* pReader) {
}
static int32_t doSuspendCurrentReader(STsdbReader* pCurrentReader) {
- SReaderStatus* pStatus = &pCurrentReader->status;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ STableBlockScanInfo** p = NULL;
+
+ TSDB_CHECK_NULL(pCurrentReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pCurrentReader->status;
if (pStatus->loadFromFile) {
tsdbDataFileReaderClose(&pCurrentReader->pFileReader);
@@ -4963,14 +5745,10 @@ static int32_t doSuspendCurrentReader(STsdbReader* pCurrentReader) {
SReadCostSummary* pCost = &pCurrentReader->cost;
destroySttBlockReader(pStatus->pLDataIterArray, &pCost->sttCost);
pStatus->pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
- if (pStatus->pLDataIterArray == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pStatus->pLDataIterArray, code, lino, _end, terrno);
}
// resetDataBlockScanInfo excluding lastKey
- STableBlockScanInfo** p = NULL;
-
int32_t step = ASCENDING_TRAVERSE(pCurrentReader->info.order) ? 1 : -1;
int32_t iter = 0;
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
@@ -4983,12 +5761,20 @@ static int32_t doSuspendCurrentReader(STsdbReader* pCurrentReader) {
pStatus->uidList.currentIndex = 0;
initReaderStatus(pStatus);
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
// save reader's base state & reset top state to be reconstructed from base state
- int32_t code = 0;
pReader->status.suspendInvoked = true; // record the suspend status
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
@@ -5005,6 +5791,7 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
// make sure only release once
void* p = pReader->pReadSnap;
+ TSDB_CHECK_NULL(p, code, lino, _end, TSDB_CODE_INVALID_PARA);
if ((p == atomic_val_compare_exchange_ptr((void**)&pReader->pReadSnap, p, NULL)) && (p != NULL)) {
tsdbUntakeReadSnap2(pReader, p, false);
pReader->pReadSnap = NULL;
@@ -5026,6 +5813,11 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
#endif
tsdbDebug("reader: %p suspended in this query %s, step:%d", pReader, pReader->idStr, pReader->step);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
@@ -5041,7 +5833,7 @@ static int32_t tsdbSetQueryReseek(void* pQHandle) {
}
code = tsdbReaderSuspend2(pReader);
- (void) tsdbReleaseReader(pReader);
+ (void)tsdbReleaseReader(pReader);
return code;
} else if (code == EBUSY) {
return TSDB_CODE_VND_QUERY_BUSY;
@@ -5052,17 +5844,21 @@ static int32_t tsdbSetQueryReseek(void* pQHandle) {
}
int32_t tsdbReaderResume2(STsdbReader* pReader) {
- int32_t code = 0;
- STableBlockScanInfo** pBlockScanInfo = pReader->status.pTableIter;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STableBlockScanInfo** pBlockScanInfo = NULL;
+ int32_t numOfTables = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pBlockScanInfo = pReader->status.pTableIter;
// restore reader's state, task snapshot
- int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
+ numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
if (numOfTables > 0) {
tsdbTrace("tsdb/reader: %p, take snapshot", pReader);
code = tsdbTakeReadSnap2(pReader, tsdbSetQueryReseek, &pReader->pReadSnap, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _err;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
// open reader failure may cause the flag still to be READER_STATUS_SUSPEND, which may cause suspend reader failure.
// So we need to set it A.S.A.P
@@ -5070,9 +5866,7 @@ int32_t tsdbReaderResume2(STsdbReader* pReader) {
if (pReader->type == TIMEWINDOW_RANGE_CONTAINED) {
code = doOpenReaderImpl(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
STsdbReader* pPrevReader = pReader->innerReader[0];
STsdbReader* pNextReader = pReader->innerReader[1];
@@ -5080,54 +5874,55 @@ int32_t tsdbReaderResume2(STsdbReader* pReader) {
// we need only one row
pPrevReader->resBlockInfo.capacity = 1;
code = setSharedPtr(pPrevReader, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pNextReader->resBlockInfo.capacity = 1;
code = setSharedPtr(pNextReader, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pReader->step == 0 || pReader->step == EXTERNAL_ROWS_PREV) {
code = doOpenReaderImpl(pPrevReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if (pReader->step == EXTERNAL_ROWS_MAIN) {
code = doOpenReaderImpl(pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
code = doOpenReaderImpl(pNextReader);
- }
-
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
}
tsdbDebug("reader: %p resumed uid %" PRIu64 ", numOfTable:%" PRId32 ", in this query %s", pReader,
pBlockScanInfo ? (*pBlockScanInfo)->uid : 0, numOfTables, pReader->idStr);
- return code;
-_err:
- tsdbError("failed to resume data reader, code:%s %s", tstrerror(code), pReader->idStr);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s, %s", __func__, lino, tstrerror(code),
+ (pReader && pReader->idStr) ? pReader->idStr : "");
+ }
return code;
}
static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader) {
int32_t code = TSDB_CODE_SUCCESS;
- SReaderStatus* pStatus = &pReader->status;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SSDataBlock* pBlock = NULL;
+ int32_t fid = 0;
+ STimeWindow win = {0};
- SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
- int32_t fid = pReader->status.pCurrentFileset->fid;
- STimeWindow win = {0};
+ pStatus = &pReader->status;
+ pBlock = pReader->resBlockInfo.pResBlock;
+ fid = pReader->status.pCurrentFileset->fid;
tsdbFidKeyRange(fid, pReader->pTsdb->keepCfg.days, pReader->pTsdb->keepCfg.precision, &win.skey, &win.ekey);
int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? win.skey : win.ekey;
code = buildBlockFromBufferSeqForPreFileset(pReader, endKey);
- if (code != TSDB_CODE_SUCCESS || pBlock->info.rows > 0) {
- return code;
- } else {
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (pBlock->info.rows <= 0) {
tsdbDebug("finished pre-fileset %d buffer processing. %s", fid, pReader->idStr);
pStatus->bProcMemPreFileset = false;
pStatus->processingMemPreFileSet = false;
@@ -5138,26 +5933,36 @@ static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader) {
tsdbDebug("new duration %d start notification when buffer pre-fileset, %s", fid, pReader->idStr);
}
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
int32_t code = TSDB_CODE_SUCCESS;
- SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SSDataBlock* pBlock = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pBlock = pReader->resBlockInfo.pResBlock;
if (pStatus->loadFromFile) {
if (pStatus->bProcMemPreFileset) {
code = buildFromPreFilesetBuffer(pReader);
- if (code != TSDB_CODE_SUCCESS || pBlock->info.rows > 0) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (pBlock->info.rows > 0) {
+ goto _end;
}
}
code = buildBlockFromFiles(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
tsdbTrace("block from file rows: %" PRId64 ", will process pre-file set buffer: %d. %s", pBlock->info.rows,
pStatus->bProcMemFirstFileset, pReader->idStr);
@@ -5176,59 +5981,77 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) {
if (pBlock->info.rows <= 0) {
code = resetTableListIndex(&pReader->status, pReader->idStr);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN;
code = buildBlockFromBufferSequentially(pReader, endKey);
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else { // no data in files, let's try the buffer
int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN;
code = buildBlockFromBufferSequentially(pReader, endKey);
+ TSDB_CHECK_CODE(code, lino, _end);
+ }
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t doTsdbNextDataBlockFilesFirst(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
int32_t code = TSDB_CODE_SUCCESS;
- SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SSDataBlock* pBlock = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
+ pBlock = pReader->resBlockInfo.pResBlock;
if (pStatus->loadFromFile) {
code = buildBlockFromFiles(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pBlock->info.rows <= 0) {
code = resetTableListIndex(&pReader->status, pReader->idStr);
- if (code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN;
code = buildBlockFromBufferSequentially(pReader, endKey);
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else { // no data in files, let's try the buffer
int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN;
code = buildBlockFromBufferSequentially(pReader, endKey);
+ TSDB_CHECK_CODE(code, lino, _end);
+ }
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t doTsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SSDataBlock* pBlock = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
// cleanup the data that belongs to the previous data block
- SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
+ pBlock = pReader->resBlockInfo.pResBlock;
blockDataCleanup(pBlock);
*hasNext = false;
SReaderStatus* pStatus = &pReader->status;
if (tSimpleHashGetSize(pStatus->pTableMap) == 0) {
- return code;
+ goto _end;
}
if (!pReader->bFilesetDelimited) {
@@ -5238,21 +6061,34 @@ static int32_t doTsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
}
*hasNext = pBlock->info.rows > 0;
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ bool acquired = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(hasNext, code, lino, _end, TSDB_CODE_INVALID_PARA);
*hasNext = false;
- if (isEmptyQueryTimeWindow(&pReader->info.window) || pReader->step == EXTERNAL_ROWS_NEXT ||
- pReader->code != TSDB_CODE_SUCCESS) {
- return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code;
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ if (isEmptyQueryTimeWindow(&pReader->info.window) || pReader->step == EXTERNAL_ROWS_NEXT) {
+ goto _end;
}
- SReaderStatus* pStatus = &pReader->status;
+ pStatus = &pReader->status;
// NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit
// the data should be ingested in round-robin and all the child tables should be createted before ingesting data
@@ -5265,27 +6101,19 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
#endif
code = tsdbAcquireReader(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ acquired = true;
tsdbTrace("tsdb/read: %p, take read mutex, code: %d", pReader, code);
if (pReader->flag == READER_STATUS_SUSPEND) {
code = tsdbReaderResume2(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- // release reader failure should be suppressed here, to avoid over-write the original error code
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pReader->innerReader[0] != NULL && pReader->step == 0) {
code = doTsdbNextDataBlock2(pReader->innerReader[0], hasNext);
- if (code) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pReader->step = EXTERNAL_ROWS_PREV;
if (*hasNext) {
@@ -5293,6 +6121,8 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
if (pStatus->composedDataBlock) {
tsdbTrace("tsdb/read: %p, unlock read mutex", pReader);
code = tsdbReleaseReader(pReader);
+ acquired = false;
+ TSDB_CHECK_CODE(code, lino, _end);
}
return code;
@@ -5307,25 +6137,20 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
int32_t step = 1;
resetAllDataBlockScanInfo(pReader->status.pTableMap, pReader->innerReader[0]->info.window.ekey, step);
-
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pReader->step = EXTERNAL_ROWS_MAIN;
}
code = doTsdbNextDataBlock2(pReader, hasNext);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (*hasNext) {
if (pStatus->composedDataBlock) {
tsdbTrace("tsdb/read: %p, unlock read mutex", pReader);
code = tsdbReleaseReader(pReader);
+ acquired = false;
+ TSDB_CHECK_CODE(code, lino, _end);
}
return code;
}
@@ -5338,16 +6163,10 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
int32_t step = -1;
resetAllDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->info.window.ekey, step);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = doTsdbNextDataBlock2(pReader->innerReader[1], hasNext);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
pReader->step = EXTERNAL_ROWS_NEXT;
if (*hasNext) {
@@ -5355,6 +6174,8 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
if (pStatus->composedDataBlock) {
tsdbTrace("tsdb/read: %p, unlock read mutex", pReader);
code = tsdbReleaseReader(pReader);
+ acquired = false;
+ TSDB_CHECK_CODE(code, lino, _end);
}
return code;
@@ -5363,17 +6184,33 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
tsdbTrace("tsdb/read: %p, unlock read mutex", pReader);
code = tsdbReleaseReader(pReader);
+ acquired = false;
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (acquired) {
+ tsdbTrace("tsdb/read: %p, unlock read mutex", pReader);
+ (void)tsdbReleaseReader(pReader);
+ }
return code;
}
-static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_t numOfCols, SColumnDataAgg* pTsAgg) {
+static int32_t doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_t numOfCols,
+ SColumnDataAgg* pTsAgg) {
// do fill all null column value SMA info
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
int32_t i = 0, j = 0;
- int32_t size = (int32_t)TARRAY2_SIZE(&pSup->colAggArray);
- int32_t code = TARRAY2_INSERT_PTR(&pSup->colAggArray, 0, pTsAgg);
- if (code != TSDB_CODE_SUCCESS) {
- return;
- }
+ int32_t size = 0;
+
+ TSDB_CHECK_NULL(pSup, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ size = (int32_t)TARRAY2_SIZE(&pSup->colAggArray);
+ code = TARRAY2_INSERT_PTR(&pSup->colAggArray, 0, pTsAgg);
+ TSDB_CHECK_CODE(code, lino, _end);
size++;
@@ -5388,9 +6225,7 @@ static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_
if (pSup->colId[j] != PRIMARYKEY_TIMESTAMP_COL_ID) {
SColumnDataAgg nullColAgg = {.colId = pSup->colId[j], .numOfNull = numOfRows};
code = TARRAY2_INSERT_PTR(&pSup->colAggArray, i, &nullColAgg);
- if (code != TSDB_CODE_SUCCESS) {
- return;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
i += 1;
size++;
@@ -5403,42 +6238,51 @@ static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_
if (pSup->colId[j] != PRIMARYKEY_TIMESTAMP_COL_ID) {
SColumnDataAgg nullColAgg = {.colId = pSup->colId[j], .numOfNull = numOfRows};
code = TARRAY2_INSERT_PTR(&pSup->colAggArray, i, &nullColAgg);
- if (code != TSDB_CODE_SUCCESS) {
- return;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
i += 1;
}
j++;
}
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t tsdbRetrieveDatablockSMA2(STsdbReader* pReader, SSDataBlock* pDataBlock, bool* allHave, bool* hasNullSMA) {
- SColumnDataAgg** pBlockSMA = &pDataBlock->pBlockAgg;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SColumnDataAgg** pBlockSMA = NULL;
SFileDataBlockInfo* pBlockInfo = NULL;
- int32_t code = 0;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(allHave, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pBlockSMA = &pDataBlock->pBlockAgg;
*allHave = false;
*pBlockSMA = NULL;
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
// there is no statistics data for composed block
if (pReader->status.composedDataBlock || (!pReader->suppInfo.smaValid)) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
code = getCurrentBlockInfo(&pReader->status.blockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
if (pResBlock->info.id.uid != pBlockInfo->uid) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
// int64_t st = taosGetTimestampUs();
@@ -5450,14 +6294,14 @@ int32_t tsdbRetrieveDatablockSMA2(STsdbReader* pReader, SSDataBlock* pDataBlock,
if (code != TSDB_CODE_SUCCESS) {
tsdbDebug("vgId:%d, failed to load block SMA for uid %" PRIu64 ", code:%s, %s", 0, pBlockInfo->uid, tstrerror(code),
pReader->idStr);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (pSup->colAggArray.size > 0) {
*allHave = true;
} else {
*pBlockSMA = NULL;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
// always load the first primary timestamp column data
@@ -5474,16 +6318,15 @@ int32_t tsdbRetrieveDatablockSMA2(STsdbReader* pReader, SSDataBlock* pDataBlock,
if (pResBlock->pBlockAgg == NULL) {
size_t num = taosArrayGetSize(pResBlock->pDataBlock);
pResBlock->pBlockAgg = taosMemoryCalloc(num, sizeof(SColumnDataAgg));
- if (pResBlock->pBlockAgg == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pResBlock->pBlockAgg, code, lino, _end, terrno);
for (int i = 0; i < num; ++i) {
pResBlock->pBlockAgg[i].colId = -1;
}
}
// do fill all null column value SMA info
- doFillNullColSMA(pSup, pBlockInfo->numRow, numOfCols, pTsAgg);
+ code = doFillNullColSMA(pSup, pBlockInfo->numRow, numOfCols, pTsAgg);
+ TSDB_CHECK_CODE(code, lino, _end);
size_t size = pSup->colAggArray.size;
@@ -5510,49 +6353,67 @@ int32_t tsdbRetrieveDatablockSMA2(STsdbReader* pReader, SSDataBlock* pDataBlock,
pReader->cost.smaLoadTime += 0; // elapsedTime;
tsdbDebug("vgId:%d, succeed to load block SMA for uid %" PRIu64 ", %s", 0, pBlockInfo->uid, pReader->idStr);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
static int32_t doRetrieveDataBlock(STsdbReader* pReader, SSDataBlock** pBlock) {
- SReaderStatus* pStatus = &pReader->status;
- int32_t code = TSDB_CODE_SUCCESS;
- SFileDataBlockInfo* pBlockInfo = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ SFileDataBlockInfo* pBlockInfo = NULL;
+ STableBlockScanInfo* pBlockScanInfo = NULL;
+ bool reset = false;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pStatus = &pReader->status;
*pBlock = NULL;
code = getCurrentBlockInfo(&pStatus->blockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
- if (pReader->code != TSDB_CODE_SUCCESS) {
- return pReader->code;
- }
+ code = pReader->code;
+ TSDB_CHECK_CODE(code, lino, _end);
- STableBlockScanInfo* pBlockScanInfo = NULL;
code = getTableBlockScanInfo(pStatus->pTableMap, pBlockInfo->uid, &pBlockScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ reset = true;
code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData, pBlockScanInfo->uid);
- if (code != TSDB_CODE_SUCCESS) {
- tBlockDataReset(&pStatus->fileBlockData);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = copyBlockDataToSDataBlock(pReader, &pBlockScanInfo->lastProcKey);
- if (code != TSDB_CODE_SUCCESS) {
- tBlockDataReset(&pStatus->fileBlockData);
- }
+ TSDB_CHECK_CODE(code, lino, _end);
*pBlock = pReader->resBlockInfo.pResBlock;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ if (reset) {
+ tBlockDataReset(&pStatus->fileBlockData);
+ }
+ }
return code;
}
int32_t tsdbRetrieveDataBlock2(STsdbReader* pReader, SSDataBlock** pBlock, SArray* pIdList) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STsdbReader* pTReader = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*pBlock = NULL;
- STsdbReader* pTReader = pReader;
+ pTReader = pReader;
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
if (pReader->step == EXTERNAL_ROWS_PREV) {
pTReader = pReader->innerReader[0];
@@ -5566,47 +6427,55 @@ int32_t tsdbRetrieveDataBlock2(STsdbReader* pReader, SSDataBlock** pBlock, SArra
// tsdbReaderSuspend2(pReader);
// tsdbReaderResume2(pReader);
*pBlock = pTReader->resBlockInfo.pResBlock;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
- int32_t code = doRetrieveDataBlock(pTReader, pBlock);
+ code = doRetrieveDataBlock(pTReader, pBlock);
tsdbTrace("tsdb/read-retrieve: %p, unlock read mutex", pReader);
(void) tsdbReleaseReader(pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
// tsdbReaderSuspend2(pReader);
// tsdbReaderResume2(pReader);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t tsdbReaderReset2(STsdbReader* pReader, SQueryTableDataCond* pCond) {
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool acquired = false;
tsdbTrace("tsdb/reader-reset: %p, take read mutex", pReader);
code = tsdbAcquireReader(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ acquired = true;
if (pReader->flag == READER_STATUS_SUSPEND) {
code = tsdbReaderResume2(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
if (isEmptyQueryTimeWindow(&pReader->info.window) || pReader->pReadSnap == NULL) {
tsdbDebug("tsdb reader reset return %p, %s", pReader->pReadSnap, pReader->idStr);
- return tsdbReleaseReader(pReader);
+ code = tsdbReleaseReader(pReader);
+ acquired = false;
+ TSDB_CHECK_CODE(code, lino, _end);
+ goto _end;
}
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
+ TSDB_CHECK_NULL(pCond, code, lino, _end, TSDB_CODE_INVALID_PARA);
pReader->info.order = pCond->order;
pReader->type = TIMEWINDOW_RANGE_CONTAINED;
- pReader->info.window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
+ code = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows, &pReader->info.window);
+ TSDB_CHECK_CODE(code, lino, _end);
pStatus->loadFromFile = true;
pStatus->pTableIter = NULL;
@@ -5619,22 +6488,13 @@ int32_t tsdbReaderReset2(STsdbReader* pReader, SQueryTableDataCond* pCond) {
int32_t numOfTables = tSimpleHashGetSize(pStatus->pTableMap);
code = initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->pfSetArray, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = resetTableListIndex(&pReader->status, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
bool asc = ASCENDING_TRAVERSE(pReader->info.order);
int32_t step = asc ? 1 : -1;
@@ -5650,18 +6510,13 @@ int32_t tsdbReaderReset2(STsdbReader* pReader, SQueryTableDataCond* pCond) {
if (pStatus->fileIter.numOfFiles == 0) {
pStatus->loadFromFile = false;
code = resetTableListIndex(pStatus, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", pReader,
numOfTables, pReader->info.window.skey, pReader->info.window.ekey, pReader->idStr);
-
- (void) tsdbReleaseReader(pReader);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
@@ -5671,6 +6526,16 @@ int32_t tsdbReaderReset2(STsdbReader* pReader, SQueryTableDataCond* pCond) {
pReader->info.window.ekey, pReader->idStr);
code = tsdbReleaseReader(pReader);
+ acquired = false;
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (acquired) {
+ (void)tsdbReleaseReader(pReader);
+ }
return code;
}
@@ -5687,7 +6552,11 @@ static int32_t getBucketIndex(int32_t startRow, int32_t bucketRange, int32_t num
int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pTableBlockInfo) {
int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
const int32_t numOfBuckets = 20.0;
+ bool acquired = false;
+
+ TSDB_CHECK_NULL(pTableBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
pTableBlockInfo->totalSize = 0;
pTableBlockInfo->totalRows = 0;
@@ -5695,15 +6564,12 @@ int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pT
// find the start data block in file
code = tsdbAcquireReader(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ acquired = true;
if (pReader->flag == READER_STATUS_SUSPEND) {
code = tsdbReaderResume2(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return tsdbReleaseReader(pReader);
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
SMergeTreeConf conf = {
@@ -5744,9 +6610,7 @@ int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pT
if (hasNext) {
SFileDataBlockInfo* pBlockInfo = NULL;
code = getCurrentBlockInfo(pBlockIter, &pBlockInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- break;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
int32_t numOfRows = pBlockInfo->numRow;
@@ -5765,10 +6629,11 @@ int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pT
int32_t bucketIndex = getBucketIndex(pTableBlockInfo->defMinRows, bucketRange, numOfRows, numOfBuckets);
pTableBlockInfo->blockRowsHisto[bucketIndex]++;
- hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr);
+ hasNext = blockIteratorNext(&pStatus->blockIter);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
- if ((code != TSDB_CODE_SUCCESS) || (pStatus->loadFromFile == false)) {
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (pStatus->loadFromFile == false) {
break;
}
@@ -5784,14 +6649,17 @@ int32_t tsdbGetFileBlocksDistInfo2(STsdbReader* pReader, STableBlockDistInfo* pT
}
// record the data in stt files
- (void) tsdbReleaseReader(pReader);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (acquired) {
+ (void)tsdbReleaseReader(pReader);
+ }
return code;
}
static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t* pMinKey) {
- int32_t code = TSDB_CODE_SUCCESS;
- int64_t rows = 0;
-
SReaderStatus* pStatus = &pReader->status;
int32_t iter = 0;
@@ -5836,22 +6704,24 @@ static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t
*pMinKey = minKey;
}
-int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader* pReader) {
- int32_t code = TSDB_CODE_SUCCESS;
- int64_t rows = 0;
+int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader* pReader, uint32_t* rows) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SReaderStatus* pStatus = NULL;
+ bool acquired = false;
- SReaderStatus* pStatus = &pReader->status;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(rows, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *rows = 0;
+ pStatus = &pReader->status;
code = tsdbAcquireReader(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
+ acquired = true;
if (pReader->flag == READER_STATUS_SUSPEND) {
code = tsdbReaderResume2(pReader);
- if (code != TSDB_CODE_SUCCESS) {
- (void) tsdbReleaseReader(pReader);
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
int32_t iter = 0;
@@ -5864,7 +6734,7 @@ int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader* pReader) {
if (pReader->pReadSnap->pMem != NULL) {
d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->info.suid, pBlockScanInfo->uid);
if (d != NULL) {
- rows += tsdbGetNRowsInTbData(d);
+ *rows += tsdbGetNRowsInTbData(d);
}
}
@@ -5872,7 +6742,7 @@ int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader* pReader) {
if (pReader->pReadSnap->pIMem != NULL) {
di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->info.suid, pBlockScanInfo->uid);
if (di != NULL) {
- rows += tsdbGetNRowsInTbData(di);
+ *rows += tsdbGetNRowsInTbData(di);
}
}
@@ -5880,9 +6750,14 @@ int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader* pReader) {
pStatus->pTableIter = tSimpleHashIterate(pStatus->pTableMap, pStatus->pTableIter, &iter);
}
- (void) tsdbReleaseReader(pReader);
-
- return rows;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (acquired) {
+ (void)tsdbReleaseReader(pReader);
+ }
+ return code;
}
int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_t* suid) {
@@ -6037,17 +6912,23 @@ void tsdbUntakeReadSnap2(STsdbReader* pReader, STsdbReadSnap* pSnap, bool proact
// if failed, do nothing
int32_t tsdbReaderSetId(void* p, const char* idstr) {
- STsdbReader* pReader = (STsdbReader*) p;
- taosMemoryFreeClear(pReader->idStr);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STsdbReader* pReader = (STsdbReader*)p;
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ taosMemoryFreeClear(pReader->idStr);
pReader->idStr = taosStrdup(idstr);
- if (pReader->idStr == NULL) {
- tsdbError("%s failed to build reader id, code:%s", idstr, tstrerror(terrno));
- return terrno;
- }
+ TSDB_CHECK_NULL(pReader->idStr, code, lino, _end, terrno);
pReader->status.fileIter.pSttBlockReader->mergeTree.idStr = pReader->idStr;
- return 0;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
void tsdbReaderSetCloseFlag(STsdbReader* pReader) { /*pReader->code = TSDB_CODE_TSC_QUERY_CANCELLED;*/ }
diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c
index a65fc7f3ed..5f77d03efc 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c
@@ -25,47 +25,57 @@ static bool overlapWithDelSkylineWithoutVer(STableBlockScanInfo* pBlockScanInfo,
int32_t order);
static int32_t initBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables) {
- int32_t num = numOfTables / pBuf->numPerBucket;
- int32_t remainder = numOfTables % pBuf->numPerBucket;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t num = 0;
+ int32_t remainder = 0;
+ STableBlockScanInfo* p = NULL;
+ const void* px = NULL;
+
+ TSDB_CHECK_CONDITION(pBuf && pBuf->numPerBucket > 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(numOfTables >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ num = numOfTables / pBuf->numPerBucket;
+ remainder = numOfTables % pBuf->numPerBucket;
if (pBuf->pData == NULL) {
pBuf->pData = taosArrayInit(num + 1, POINTER_BYTES);
- if (pBuf->pData == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pBuf->pData, code, lino, _end, terrno);
}
for (int32_t i = 0; i < num; ++i) {
- char* p = taosMemoryCalloc(pBuf->numPerBucket, sizeof(STableBlockScanInfo));
- if (p == NULL) {
- return terrno;
- }
+ p = taosMemoryCalloc(pBuf->numPerBucket, sizeof(STableBlockScanInfo));
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
- void* px = taosArrayPush(pBuf->pData, &p);
- if (px == NULL) {
- return terrno;
- }
+ px = taosArrayPush(pBuf->pData, &p);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
+ p = NULL;
}
if (remainder > 0) {
- char* p = taosMemoryCalloc(remainder, sizeof(STableBlockScanInfo));
- if (p == NULL) {
- return terrno;
- }
- void* px = taosArrayPush(pBuf->pData, &p);
- if (px == NULL) {
- return terrno;
- }
+ p = taosMemoryCalloc(remainder, sizeof(STableBlockScanInfo));
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
+
+ px = taosArrayPush(pBuf->pData, &p);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
+ p = NULL;
}
pBuf->numOfTables = numOfTables;
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (p) {
+ taosMemoryFreeClear(p);
+ }
+ return code;
}
int32_t uidComparFunc(const void* p1, const void* p2) {
- uint64_t pu1 = *(uint64_t*)p1;
- uint64_t pu2 = *(uint64_t*)p2;
+ uint64_t pu1 = *(const uint64_t*)p1;
+ uint64_t pu2 = *(const uint64_t*)p2;
if (pu1 == pu2) {
return 0;
} else {
@@ -74,90 +84,133 @@ int32_t uidComparFunc(const void* p1, const void* p2) {
}
int32_t ensureBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t num = 0;
+ int32_t remainder = 0;
+ STableBlockScanInfo* p = NULL;
+ const void* px = NULL;
+
+ TSDB_CHECK_CONDITION(pBuf && pBuf->numPerBucket > 0 && pBuf->numOfTables >= 0, code, lino, _end,
+ TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(numOfTables >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
if (numOfTables <= pBuf->numOfTables) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
- if (pBuf->numOfTables > 0) {
- STableBlockScanInfo** p = (STableBlockScanInfo**)taosArrayPop(pBuf->pData);
- taosMemoryFree(*p);
- pBuf->numOfTables /= pBuf->numPerBucket;
+ remainder = pBuf->numOfTables % pBuf->numPerBucket;
+ if (remainder > 0) {
+ TSDB_CHECK_CONDITION(taosArrayGetSize(pBuf->pData) > 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ px = taosArrayPop(pBuf->pData);
+ TSDB_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ p = *(STableBlockScanInfo**)px;
+ taosMemoryFreeClear(p);
+ pBuf->numOfTables -= remainder;
}
- int32_t num = (numOfTables - pBuf->numOfTables) / pBuf->numPerBucket;
- int32_t remainder = (numOfTables - pBuf->numOfTables) % pBuf->numPerBucket;
+ num = (numOfTables - pBuf->numOfTables) / pBuf->numPerBucket;
+ remainder = (numOfTables - pBuf->numOfTables) % pBuf->numPerBucket;
+
if (pBuf->pData == NULL) {
pBuf->pData = taosArrayInit(num + 1, POINTER_BYTES);
- if (pBuf->pData == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pBuf->pData, code, lino, _end, terrno);
}
for (int32_t i = 0; i < num; ++i) {
- char* p = taosMemoryCalloc(pBuf->numPerBucket, sizeof(STableBlockScanInfo));
- if (p == NULL) {
- return terrno;
- }
+ p = taosMemoryCalloc(pBuf->numPerBucket, sizeof(STableBlockScanInfo));
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
- void* px = taosArrayPush(pBuf->pData, &p);
- if (px == NULL) {
- return terrno;
- }
+ px = taosArrayPush(pBuf->pData, &p);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
+ p = NULL;
}
if (remainder > 0) {
- char* p = taosMemoryCalloc(remainder, sizeof(STableBlockScanInfo));
- if (p == NULL) {
- return terrno;
- }
- void* px = taosArrayPush(pBuf->pData, &p);
- if (px == NULL) {
- return terrno;
- }
+ p = taosMemoryCalloc(remainder, sizeof(STableBlockScanInfo));
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
+
+ px = taosArrayPush(pBuf->pData, &p);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
+ p = NULL;
}
pBuf->numOfTables = numOfTables;
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ if (p) {
+ taosMemoryFreeClear(p);
+ }
+ return code;
}
void clearBlockScanInfoBuf(SBlockInfoBuf* pBuf) {
- size_t num = taosArrayGetSize(pBuf->pData);
- for (int32_t i = 0; i < num; ++i) {
- char** p = taosArrayGet(pBuf->pData, i);
- if (p != NULL) {
- taosMemoryFree(*p);
- }
+ if (pBuf == NULL) return;
+ if (pBuf->pData != NULL) {
+ taosArrayDestroyP(pBuf->pData, (FDelete)taosMemoryFree);
+ pBuf->pData = NULL;
}
-
- taosArrayDestroy(pBuf->pData);
}
int32_t getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index, STableBlockScanInfo** pInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t bucketIndex = 0;
+ STableBlockScanInfo** pBucket = NULL;
+
+ TSDB_CHECK_CONDITION(pBuf && pBuf->numPerBucket > 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(index >= 0 && index < pBuf->numOfTables, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*pInfo = NULL;
- int32_t bucketIndex = index / pBuf->numPerBucket;
- char** pBucket = taosArrayGet(pBuf->pData, bucketIndex);
- if (pBucket == NULL) {
- return TSDB_CODE_NOT_FOUND;
- }
+ bucketIndex = index / pBuf->numPerBucket;
+ pBucket = taosArrayGet(pBuf->pData, bucketIndex);
+ TSDB_CHECK_NULL(pBucket, code, lino, _end, terrno);
- *pInfo = (STableBlockScanInfo*)((*pBucket) + (index % pBuf->numPerBucket) * sizeof(STableBlockScanInfo));
- return TSDB_CODE_SUCCESS;
+ *pInfo = (*pBucket) + (index % pBuf->numPerBucket);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t getTableBlockScanInfo(SSHashObj* pTableMap, uint64_t uid, STableBlockScanInfo** pInfo, const char* id) {
- *pInfo = *(STableBlockScanInfo**)tSimpleHashGet(pTableMap, &uid, sizeof(uid));
- if (pInfo == NULL) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STableBlockScanInfo** pVal = NULL;
+
+ TSDB_CHECK_NULL(pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(id, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pVal = (STableBlockScanInfo**)tSimpleHashGet(pTableMap, &uid, sizeof(uid));
+ if (pVal == NULL) {
int32_t size = tSimpleHashGetSize(pTableMap);
tsdbError("failed to locate the uid:%" PRIu64 " in query table uid list, total tables:%d, %s", uid, size, id);
- return TSDB_CODE_INVALID_PARA;
+ code = TSDB_CODE_INVALID_PARA;
+ TSDB_CHECK_CODE(code, lino, _end);
}
+ *pInfo = *pVal;
+ TSDB_CHECK_NULL(*pInfo, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t initRowKey(SRowKey* pKey, int64_t ts, int32_t numOfPks, int32_t type, int32_t len, bool asc) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pKey->numOfPKs = numOfPks;
pKey->ts = ts;
@@ -194,7 +247,8 @@ int32_t initRowKey(SRowKey* pKey, int64_t ts, int32_t numOfPks, int32_t type, in
break;
}
default:
- return TSDB_CODE_INVALID_PARA;
+ code = TSDB_CODE_INVALID_PARA;
+ TSDB_CHECK_CODE(code, lino, _end);
}
} else {
switch (type) {
@@ -223,16 +277,14 @@ int32_t initRowKey(SRowKey* pKey, int64_t ts, int32_t numOfPks, int32_t type, in
pKey->pks[0].val = UINT8_MAX;
break;
default:
- return TSDB_CODE_INVALID_PARA;
+ code = TSDB_CODE_INVALID_PARA;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
} else {
- pKey->pks[0].pData = taosMemoryCalloc(1, len);
pKey->pks[0].nData = 0;
-
- if (pKey->pks[0].pData == NULL) {
- return terrno;
- }
+ pKey->pks[0].pData = taosMemoryCalloc(1, len);
+ TSDB_CHECK_NULL(pKey->pks[0].pData, code, lino, _end, terrno);
if (!asc) {
pKey->numOfPKs = 2;
@@ -240,7 +292,11 @@ int32_t initRowKey(SRowKey* pKey, int64_t ts, int32_t numOfPks, int32_t type, in
}
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
void clearRowKey(SRowKey* pKey) {
@@ -250,53 +306,65 @@ void clearRowKey(SRowKey* pKey) {
taosMemoryFreeClear(pKey->pks[0].pData);
}
-static int32_t initLastProcKey(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
- int32_t code = 0;
- int32_t numOfPks = pReader->suppInfo.numOfPks;
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
- int8_t type = pReader->suppInfo.pk.type;
- int32_t bytes = pReader->suppInfo.pk.bytes;
+static int32_t initLastProcKey(STableBlockScanInfo* pScanInfo, const STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t numOfPks = 0;
+ int32_t type = 0;
+ int32_t bytes = 0;
+ bool asc = false;
+ SRowKey* pRowKey = NULL;
- SRowKey* pRowKey = &pScanInfo->lastProcKey;
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ numOfPks = pReader->suppInfo.numOfPks;
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
+ type = pReader->suppInfo.pk.type;
+ bytes = pReader->suppInfo.pk.bytes;
+
+ pRowKey = &pScanInfo->lastProcKey;
if (asc) {
int64_t skey = pReader->info.window.skey;
int64_t ts = (skey > INT64_MIN) ? (skey - 1) : skey;
code = initRowKey(pRowKey, ts, numOfPks, type, bytes, asc);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = initRowKey(&pScanInfo->sttKeyInfo.nextProcKey, skey, numOfPks, type, bytes, asc);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
int64_t ekey = pReader->info.window.ekey;
int64_t ts = (ekey < INT64_MAX) ? (ekey + 1) : ekey;
code = initRowKey(pRowKey, ts, numOfPks, type, bytes, asc);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = initRowKey(&pScanInfo->sttKeyInfo.nextProcKey, ekey, numOfPks, type, bytes, asc);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
code = initRowKey(&pScanInfo->sttRange.skey, INT64_MAX, numOfPks, type, bytes, asc);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
code = initRowKey(&pScanInfo->sttRange.ekey, INT64_MIN, numOfPks, type, bytes, asc);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
int32_t initTableBlockScanInfo(STableBlockScanInfo* pScanInfo, uint64_t uid, SSHashObj* pTableMap,
STsdbReader* pReader) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pScanInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pScanInfo->uid = uid;
INIT_KEYRANGE(&pScanInfo->sttRange);
INIT_TIMEWINDOW(&pScanInfo->filesetWindow);
@@ -304,46 +372,52 @@ int32_t initTableBlockScanInfo(STableBlockScanInfo* pScanInfo, uint64_t uid, SSH
pScanInfo->cleanSttBlocks = false;
pScanInfo->sttBlockReturned = false;
- int32_t code = initLastProcKey(pScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = initLastProcKey(pScanInfo, pReader);
+ TSDB_CHECK_CODE(code, lino, _end);
pScanInfo->sttKeyInfo.status = STT_FILE_READER_UNINIT;
code = tSimpleHashPut(pTableMap, &pScanInfo->uid, sizeof(uint64_t), &pScanInfo, POINTER_BYTES);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
tsdbTrace("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pReader, pScanInfo->uid,
pScanInfo->lastProcKey.ts, pReader->idStr);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
// NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model
int32_t createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList,
STableUidList* pUidList, int32_t numOfTables, SSHashObj** pHashObj) {
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SSHashObj* pTableMap = NULL;
+ int64_t st = 0;
+
+ TSDB_CHECK_NULL(pUidList, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pHashObj, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*pHashObj = NULL;
// allocate buffer in order to load data blocks from file
// todo use simple hash instead, optimize the memory consumption
- SSHashObj* pTableMap = tSimpleHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
- if (pTableMap == NULL) {
- return terrno;
- }
+ pTableMap = tSimpleHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
+ TSDB_CHECK_NULL(pTableMap, code, lino, _end, terrno);
- int64_t st = taosGetTimestampUs();
+ st = taosGetTimestampUs();
code = initBlockScanInfoBuf(pBuf, numOfTables);
if (code != TSDB_CODE_SUCCESS) {
tSimpleHashCleanup(pTableMap);
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t));
if (pUidList->tableUidList == NULL) {
tSimpleHashCleanup(pTableMap);
- return terrno;
+ TSDB_CHECK_NULL(pUidList->tableUidList, code, lino, _end, terrno);
}
pUidList->currentIndex = 0;
@@ -354,11 +428,13 @@ int32_t createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, c
STableBlockScanInfo* pScanInfo = NULL;
code = getPosInBlockInfoBuf(pBuf, j, &pScanInfo);
if (code != TSDB_CODE_SUCCESS) {
+ lino = __LINE__;
break;
}
code = initTableBlockScanInfo(pScanInfo, idList[j].uid, pTableMap, pTsdbReader);
if (code != TSDB_CODE_SUCCESS) {
+ lino = __LINE__;
break;
}
}
@@ -371,15 +447,23 @@ int32_t createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, c
pTsdbReader->idStr);
*pHashObj = pTableMap;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
void resetAllDataBlockScanInfo(SSHashObj* pTableMap, int64_t ts, int32_t step) {
- void* p = NULL;
- int32_t iter = 0;
+ STableBlockScanInfo** p = NULL;
+ int32_t iter = 0;
while ((p = tSimpleHashIterate(pTableMap, p, &iter)) != NULL) {
- STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
+ STableBlockScanInfo* pInfo = *p;
+ if (pInfo == NULL) {
+ continue;
+ }
pInfo->iterInit = false;
pInfo->iter.hasVal = false;
@@ -402,6 +486,10 @@ void resetAllDataBlockScanInfo(SSHashObj* pTableMap, int64_t ts, int32_t step) {
}
void clearBlockScanInfo(STableBlockScanInfo* p) {
+ if (p == NULL) {
+ return;
+ }
+
p->iterInit = false;
p->iter.hasVal = false;
p->iiter.hasVal = false;
@@ -432,18 +520,26 @@ void clearBlockScanInfo(STableBlockScanInfo* p) {
clearRowKey(&p->sttKeyInfo.nextProcKey);
}
-void destroyAllBlockScanInfo(SSHashObj* pTableMap) {
- void* p = NULL;
- int32_t iter = 0;
+void destroyAllBlockScanInfo(SSHashObj** pTableMap) {
+ STableBlockScanInfo** p = NULL;
+ int32_t iter = 0;
- while ((p = tSimpleHashIterate(pTableMap, p, &iter)) != NULL) {
- clearBlockScanInfo(*(STableBlockScanInfo**)p);
+ if (pTableMap == NULL || *pTableMap == NULL) {
+ return;
}
- tSimpleHashCleanup(pTableMap);
+ while ((p = tSimpleHashIterate(*pTableMap, p, &iter)) != NULL) {
+ clearBlockScanInfo(*p);
+ }
+
+ tSimpleHashCleanup(*pTableMap);
+ *pTableMap = NULL;
}
static void doCleanupInfoForNextFileset(STableBlockScanInfo* pScanInfo) {
+ if (pScanInfo == NULL) {
+ return;
+ }
// reset the index in last block when handing a new file
taosArrayClear(pScanInfo->pBlockList);
taosArrayClear(pScanInfo->pBlockIdxList);
@@ -458,8 +554,8 @@ static void doCleanupInfoForNextFileset(STableBlockScanInfo* pScanInfo) {
void cleanupInfoForNextFileset(SSHashObj* pTableMap) {
STableBlockScanInfo** p = NULL;
+ int32_t iter = 0;
- int32_t iter = 0;
while ((p = tSimpleHashIterate(pTableMap, p, &iter)) != NULL) {
doCleanupInfoForNextFileset(*p);
}
@@ -467,6 +563,10 @@ void cleanupInfoForNextFileset(SSHashObj* pTableMap) {
// brin records iterator
void initBrinRecordIter(SBrinRecordIter* pIter, SDataFileReader* pReader, SArray* pList) {
+ if (pIter == NULL) {
+ return;
+ }
+
(void)memset(&pIter->block, 0, sizeof(SBrinBlock));
(void)memset(&pIter->record, 0, sizeof(SBrinRecord));
pIter->blockIndex = -1;
@@ -477,89 +577,133 @@ void initBrinRecordIter(SBrinRecordIter* pIter, SDataFileReader* pReader, SArray
}
int32_t getNextBrinRecord(SBrinRecordIter* pIter, SBrinRecord** pRecord) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
+ TSDB_CHECK_NULL(pIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRecord, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
*pRecord = NULL;
if (pIter->blockIndex == -1 || (pIter->recordIndex + 1) >= pIter->block.numOfRecords) {
pIter->blockIndex += 1;
if (pIter->blockIndex >= taosArrayGetSize(pIter->pBrinBlockList)) {
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
pIter->pCurrentBlk = taosArrayGet(pIter->pBrinBlockList, pIter->blockIndex);
- if (pIter->pCurrentBlk == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_NULL(pIter->pCurrentBlk, code, lino, _end, terrno);
tBrinBlockClear(&pIter->block);
- int32_t code = tsdbDataFileReadBrinBlock(pIter->pReader, pIter->pCurrentBlk, &pIter->block);
+ TSDB_CHECK_NULL(pIter->pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ code = tsdbDataFileReadBrinBlock(pIter->pReader, pIter->pCurrentBlk, &pIter->block);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("failed to read brinBlock from file, code:%s", tstrerror(code));
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
pIter->recordIndex = -1;
}
pIter->recordIndex += 1;
- int32_t code = tBrinBlockGet(&pIter->block, pIter->recordIndex, &pIter->record);
+ code = tBrinBlockGet(&pIter->block, pIter->recordIndex, &pIter->record);
*pRecord = &pIter->record;
+ TSDB_CHECK_CODE(code, lino, _end);
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return code;
}
-void clearBrinBlockIter(SBrinRecordIter* pIter) { tBrinBlockDestroy(&pIter->block); }
+void clearBrinBlockIter(SBrinRecordIter* pIter) {
+ if (pIter != NULL) {
+ tBrinBlockDestroy(&pIter->block);
+ }
+}
// initialize the file block access order
// sort the file blocks according to the offset of each data block in the files
static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) {
+ if (pSup == NULL) {
+ return;
+ }
+
taosMemoryFreeClear(pSup->numOfBlocksPerTable);
taosMemoryFreeClear(pSup->indexPerTable);
- for (int32_t i = 0; i < pSup->numOfTables; ++i) {
- SBlockOrderWrapper* pBlockInfo = pSup->pDataBlockInfo[i];
- taosMemoryFreeClear(pBlockInfo);
- }
+ if (pSup->pDataBlockInfo != NULL) {
+ for (int32_t i = 0; i < pSup->numOfTables; ++i) {
+ SBlockOrderWrapper* pBlockInfo = pSup->pDataBlockInfo[i];
+ taosMemoryFreeClear(pBlockInfo);
+ }
- taosMemoryFreeClear(pSup->pDataBlockInfo);
+ taosMemoryFreeClear(pSup->pDataBlockInfo);
+ }
}
static int32_t initBlockOrderSupporter(SBlockOrderSupporter* pSup, int32_t numOfTables) {
- pSup->pDataBlockInfo = taosMemoryCalloc(1, POINTER_BYTES * numOfTables);
- pSup->indexPerTable = taosMemoryCalloc(1, sizeof(int32_t) * numOfTables);
- pSup->numOfBlocksPerTable = taosMemoryCalloc(1, sizeof(int32_t) * numOfTables);
- pSup->numOfTables = 0;
- if (pSup->pDataBlockInfo == NULL || pSup->indexPerTable == NULL || pSup->numOfBlocksPerTable == NULL) {
- cleanupBlockOrderSupporter(pSup);
- return terrno;
- }
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
- return TSDB_CODE_SUCCESS;
+ TSDB_CHECK_NULL(pSup, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(numOfTables >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pSup->pDataBlockInfo = taosMemoryCalloc(numOfTables, POINTER_BYTES);
+ TSDB_CHECK_NULL(pSup->pDataBlockInfo, code, lino, _end, terrno);
+ pSup->indexPerTable = taosMemoryCalloc(numOfTables, sizeof(int32_t));
+ TSDB_CHECK_NULL(pSup->indexPerTable, code, lino, _end, terrno);
+ pSup->numOfBlocksPerTable = taosMemoryCalloc(numOfTables, sizeof(int32_t));
+ TSDB_CHECK_NULL(pSup->numOfBlocksPerTable, code, lino, _end, terrno);
+ pSup->numOfTables = 0;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, void* param) {
- int32_t leftIndex = *(int32_t*)pLeft;
- int32_t rightIndex = *(int32_t*)pRight;
+ int32_t leftIndex = 0;
+ int32_t rightIndex = 0;
+ int32_t leftTableBlockIndex = 0;
+ int32_t rightTableBlockIndex = 0;
+ const SBlockOrderSupporter* pSupporter = NULL;
+ const SBlockOrderWrapper* pLeftBlock = NULL;
+ const SBlockOrderWrapper* pRightBlock = NULL;
- SBlockOrderSupporter* pSupporter = (SBlockOrderSupporter*)param;
+ leftIndex = *(const int32_t*)pLeft;
+ rightIndex = *(const int32_t*)pRight;
+ pSupporter = (const SBlockOrderSupporter*)param;
- int32_t leftTableBlockIndex = pSupporter->indexPerTable[leftIndex];
- int32_t rightTableBlockIndex = pSupporter->indexPerTable[rightIndex];
+ leftTableBlockIndex = pSupporter->indexPerTable[leftIndex];
+ rightTableBlockIndex = pSupporter->indexPerTable[rightIndex];
- if (leftTableBlockIndex > pSupporter->numOfBlocksPerTable[leftIndex]) {
+ if (leftTableBlockIndex >= pSupporter->numOfBlocksPerTable[leftIndex]) {
/* left block is empty */
return 1;
- } else if (rightTableBlockIndex > pSupporter->numOfBlocksPerTable[rightIndex]) {
+ } else if (rightTableBlockIndex >= pSupporter->numOfBlocksPerTable[rightIndex]) {
/* right block is empty */
return -1;
}
- SBlockOrderWrapper* pLeftBlock = &pSupporter->pDataBlockInfo[leftIndex][leftTableBlockIndex];
- SBlockOrderWrapper* pRightBlock = &pSupporter->pDataBlockInfo[rightIndex][rightTableBlockIndex];
+ pLeftBlock = &pSupporter->pDataBlockInfo[leftIndex][leftTableBlockIndex];
+ pRightBlock = &pSupporter->pDataBlockInfo[rightIndex][rightTableBlockIndex];
return pLeftBlock->offset > pRightBlock->offset ? 1 : -1;
}
int32_t recordToBlockInfo(SFileDataBlockInfo* pBlockInfo, SBrinRecord* record) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ const SRowKey* pFirstKey = NULL;
+ const SRowKey* pLastKey = NULL;
+
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(record, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
pBlockInfo->uid = record->uid;
pBlockInfo->firstKey = record->firstKey.key.ts;
pBlockInfo->lastKey = record->lastKey.key.ts;
@@ -573,40 +717,50 @@ int32_t recordToBlockInfo(SFileDataBlockInfo* pBlockInfo, SBrinRecord* record) {
pBlockInfo->numRow = record->numRow;
pBlockInfo->count = record->count;
- SRowKey* pFirstKey = &record->firstKey.key;
+ pFirstKey = &record->firstKey.key;
+ pLastKey = &record->lastKey.key;
+ TSDB_CHECK_CONDITION((pFirstKey->numOfPKs == pLastKey->numOfPKs), code, lino, _end, TSDB_CODE_INVALID_PARA);
if (pFirstKey->numOfPKs > 0) {
if (IS_NUMERIC_TYPE(pFirstKey->pks[0].type)) {
pBlockInfo->firstPk.val = pFirstKey->pks[0].val;
- pBlockInfo->lastPk.val = record->lastKey.key.pks[0].val;
+ pBlockInfo->lastPk.val = pLastKey->pks[0].val;
} else {
- char* p = taosMemoryCalloc(1, pFirstKey->pks[0].nData + VARSTR_HEADER_SIZE);
- if (p == NULL) {
- return terrno;
- }
- memcpy(varDataVal(p), pFirstKey->pks[0].pData, pFirstKey->pks[0].nData);
- varDataSetLen(p, pFirstKey->pks[0].nData);
+ int32_t keyLen = pFirstKey->pks[0].nData;
+ char* p = taosMemoryMalloc(keyLen + VARSTR_HEADER_SIZE);
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
+ memcpy(varDataVal(p), pFirstKey->pks[0].pData, keyLen);
+ varDataSetLen(p, keyLen);
pBlockInfo->firstPk.pData = (uint8_t*)p;
- int32_t keyLen = record->lastKey.key.pks[0].nData;
- p = taosMemoryCalloc(1, keyLen + VARSTR_HEADER_SIZE);
- if (p == NULL) {
- return terrno;
- }
- memcpy(varDataVal(p), record->lastKey.key.pks[0].pData, keyLen);
+ keyLen = pLastKey->pks[0].nData;
+ p = taosMemoryMalloc(keyLen + VARSTR_HEADER_SIZE);
+ TSDB_CHECK_NULL(p, code, lino, _end, terrno);
+ memcpy(varDataVal(p), pLastKey->pks[0].pData, keyLen);
varDataSetLen(p, keyLen);
pBlockInfo->lastPk.pData = (uint8_t*)p;
}
}
- return TSDB_CODE_SUCCESS;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
static void freePkItem(void* pItem) {
SFileDataBlockInfo* p = pItem;
- taosMemoryFreeClear(p->firstPk.pData);
- taosMemoryFreeClear(p->lastPk.pData);
+ if (p != NULL) {
+ taosMemoryFreeClear(p->firstPk.pData);
+ taosMemoryFreeClear(p->lastPk.pData);
+ }
}
void clearDataBlockIterator(SDataBlockIter* pIter, bool needFree) {
+ if (pIter == NULL) {
+ return;
+ }
+
pIter->index = -1;
pIter->numOfBlocks = 0;
@@ -618,6 +772,10 @@ void clearDataBlockIterator(SDataBlockIter* pIter, bool needFree) {
}
void cleanupDataBlockIterator(SDataBlockIter* pIter, bool needFree) {
+ if (pIter == NULL) {
+ return;
+ }
+
pIter->index = -1;
pIter->numOfBlocks = 0;
if (needFree) {
@@ -625,159 +783,149 @@ void cleanupDataBlockIterator(SDataBlockIter* pIter, bool needFree) {
} else {
taosArrayDestroy(pIter->blockList);
}
+ pIter->blockList = NULL;
}
int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t numOfBlocks, SArray* pTableList) {
- bool asc = ASCENDING_TRAVERSE(pReader->info.order);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ bool asc = false;
+ int32_t numOfTables = 0;
+ int64_t st = 0;
+ int64_t et = 0;
+ int32_t cnt = 0;
+ SBlockOrderSupporter sup = {0};
+ SMultiwayMergeTreeInfo* pTree = NULL;
+ STableBlockScanInfo* pTableScanInfo = NULL;
+ const SFileDataBlockInfo* pBlockInfo = NULL;
+ const void* px = NULL;
- SBlockOrderSupporter sup = {0};
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockIter, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_CONDITION(numOfBlocks >= 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ asc = ASCENDING_TRAVERSE(pReader->info.order);
clearDataBlockIterator(pBlockIter, shouldFreePkBuf(&pReader->suppInfo));
-
pBlockIter->numOfBlocks = numOfBlocks;
// access data blocks according to the offset of each block in asc/desc order.
- int32_t numOfTables = taosArrayGetSize(pTableList);
+ numOfTables = taosArrayGetSize(pTableList);
- int64_t st = taosGetTimestampUs();
- int32_t code = initBlockOrderSupporter(&sup, numOfTables);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- int32_t cnt = 0;
+ st = taosGetTimestampUs();
+ code = initBlockOrderSupporter(&sup, numOfTables);
+ TSDB_CHECK_CODE(code, lino, _end);
for (int32_t i = 0; i < numOfTables; ++i) {
- STableBlockScanInfo* pTableScanInfo = taosArrayGetP(pTableList, i);
+ pTableScanInfo = taosArrayGetP(pTableList, i);
- size_t num = taosArrayGetSize(pTableScanInfo->pBlockList);
+ size_t num = taosArrayGetSize(pTableScanInfo->pBlockList);
+ SBlockOrderWrapper* buf = taosMemoryMalloc(sizeof(SBlockOrderWrapper) * num);
+ TSDB_CHECK_NULL(buf, code, lino, _end, terrno);
sup.numOfBlocksPerTable[sup.numOfTables] = num;
-
- char* buf = taosMemoryMalloc(sizeof(SBlockOrderWrapper) * num);
- if (buf == NULL) {
- cleanupBlockOrderSupporter(&sup);
- return terrno;
- }
-
- sup.pDataBlockInfo[sup.numOfTables] = (SBlockOrderWrapper*)buf;
+ sup.pDataBlockInfo[sup.numOfTables] = buf;
+ sup.numOfTables++;
for (int32_t k = 0; k < num; ++k) {
- SFileDataBlockInfo* pBlockInfo = taosArrayGet(pTableScanInfo->pBlockList, k);
- if (pBlockInfo == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pBlockInfo = taosArrayGet(pTableScanInfo->pBlockList, k);
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
- sup.pDataBlockInfo[sup.numOfTables][k] =
+ sup.pDataBlockInfo[i][k] =
(SBlockOrderWrapper){.uid = pTableScanInfo->uid, .offset = pBlockInfo->blockOffset, .pInfo = pTableScanInfo};
cnt++;
}
-
- sup.numOfTables += 1;
}
- if (numOfBlocks != cnt && sup.numOfTables != numOfTables) {
- cleanupBlockOrderSupporter(&sup);
- return TSDB_CODE_INVALID_PARA;
- }
+ TSDB_CHECK_CONDITION(!(numOfBlocks != cnt && sup.numOfTables != numOfTables), code, lino, _end,
+ TSDB_CODE_INVALID_PARA);
// since there is only one table qualified, blocks are not sorted
if (sup.numOfTables == 1) {
- STableBlockScanInfo* pTableScanInfo = taosArrayGetP(pTableList, 0);
+ pTableScanInfo = taosArrayGetP(pTableList, 0);
for (int32_t i = 0; i < numOfBlocks; ++i) {
STableDataBlockIdx tableDataBlockIdx = {.globalIndex = i};
- void* px = taosArrayPush(pTableScanInfo->pBlockIdxList, &tableDataBlockIdx);
- if (px == NULL) {
- return terrno;
- }
+ px = taosArrayPush(pTableScanInfo->pBlockIdxList, &tableDataBlockIdx);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
- void* p = taosArrayAddAll(pBlockIter->blockList, pTableScanInfo->pBlockList);
- if (p == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ px = taosArrayAddAll(pBlockIter->blockList, pTableScanInfo->pBlockList);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
taosArrayDestroy(pTableScanInfo->pBlockList);
pTableScanInfo->pBlockList = NULL;
- int64_t et = taosGetTimestampUs();
+ et = taosGetTimestampUs();
tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s",
pReader, numOfBlocks, (et - st) / 1000.0, pReader->idStr);
pBlockIter->index = asc ? 0 : (numOfBlocks - 1);
- cleanupBlockOrderSupporter(&sup);
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %s", pReader, cnt, sup.numOfTables,
pReader->idStr);
- SMultiwayMergeTreeInfo* pTree = NULL;
+ code = tMergeTreeCreate(&pTree, sup.numOfTables, &sup, fileDataBlockOrderCompar);
+ TSDB_CHECK_CODE(code, lino, _end);
- uint8_t ret = tMergeTreeCreate(&pTree, sup.numOfTables, &sup, fileDataBlockOrderCompar);
- if (ret != TSDB_CODE_SUCCESS) {
- cleanupBlockOrderSupporter(&sup);
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
- int32_t numOfTotal = 0;
- while (numOfTotal < cnt) {
+ for (int32_t i = 0; i < cnt; ++i) {
int32_t pos = tMergeTreeGetChosenIndex(pTree);
int32_t index = sup.indexPerTable[pos]++;
+ pTableScanInfo = sup.pDataBlockInfo[pos][index].pInfo;
- SFileDataBlockInfo* pBlockInfo = taosArrayGet(sup.pDataBlockInfo[pos][index].pInfo->pBlockList, index);
- if (pBlockInfo == NULL) {
- return TSDB_CODE_INVALID_PARA;
- }
+ pBlockInfo = taosArrayGet(pTableScanInfo->pBlockList, index);
+ TSDB_CHECK_NULL(pBlockInfo, code, lino, _end, terrno);
- void* px = taosArrayPush(pBlockIter->blockList, pBlockInfo);
- if (px == NULL) {
- return terrno;
- }
-
- STableBlockScanInfo* pTableScanInfo = sup.pDataBlockInfo[pos][index].pInfo;
- STableDataBlockIdx tableDataBlockIdx = {.globalIndex = numOfTotal};
+ px = taosArrayPush(pBlockIter->blockList, pBlockInfo);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
+ STableDataBlockIdx tableDataBlockIdx = {.globalIndex = i};
px = taosArrayPush(pTableScanInfo->pBlockIdxList, &tableDataBlockIdx);
- if (px == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
// set data block index overflow, in order to disable the offset comparator
if (sup.indexPerTable[pos] >= sup.numOfBlocksPerTable[pos]) {
sup.indexPerTable[pos] = sup.numOfBlocksPerTable[pos] + 1;
}
- numOfTotal += 1;
code = tMergeTreeAdjust(pTree, tMergeTreeGetAdjustIndex(pTree));
- if (TSDB_CODE_SUCCESS != code) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
for (int32_t i = 0; i < numOfTables; ++i) {
- STableBlockScanInfo* pTableScanInfo = taosArrayGetP(pTableList, i);
+ pTableScanInfo = taosArrayGetP(pTableList, i);
taosArrayDestroy(pTableScanInfo->pBlockList);
pTableScanInfo->pBlockList = NULL;
}
- int64_t et = taosGetTimestampUs();
+ et = taosGetTimestampUs();
tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks,
(et - st) / 1000.0, pReader->idStr);
- cleanupBlockOrderSupporter(&sup);
- taosMemoryFree(pTree);
pBlockIter->index = asc ? 0 : (numOfBlocks - 1);
- return TSDB_CODE_SUCCESS;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ cleanupBlockOrderSupporter(&sup);
+ if (pTree != NULL) {
+ tMergeTreeDestroy(&pTree);
+ }
+ return code;
}
-bool blockIteratorNext(SDataBlockIter* pBlockIter, const char* idStr) {
- bool asc = ASCENDING_TRAVERSE(pBlockIter->order);
+bool blockIteratorNext(SDataBlockIter* pBlockIter) {
+ bool asc = false;
- int32_t step = asc ? 1 : -1;
- if ((pBlockIter->index >= pBlockIter->numOfBlocks - 1 && asc) || (pBlockIter->index <= 0 && (!asc))) {
+ if (pBlockIter == NULL) {
return false;
}
- pBlockIter->index += step;
+ asc = ASCENDING_TRAVERSE(pBlockIter->order);
+ if ((pBlockIter->index >= pBlockIter->numOfBlocks - 1 && asc) || (pBlockIter->index <= 0 && (!asc))) {
+ return false;
+ }
+ pBlockIter->index += asc ? 1 : -1;
return true;
}
@@ -791,30 +939,29 @@ static int32_t loadNextStatisticsBlock(SSttFileReader* pSttFileReader, STbStatis
int32_t* j);
static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_t numOfTables, int32_t* j,
ETombBlkCheckEnum* pRet) {
- int32_t code = 0;
- STombRecord record = {0};
-
- uint64_t uid = pReader->status.uidList.tableUidList[*j];
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STombRecord record = {0};
+ uint64_t uid = 0;
STableBlockScanInfo* pScanInfo = NULL;
+ TSDB_CHECK_NULL(pBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pRet, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *pRet = BLK_CHECK_QUIT;
+ uid = pReader->status.uidList.tableUidList[*j];
code = getTableBlockScanInfo(pReader->status.pTableMap, uid, &pScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pScanInfo->pFileDelData == NULL) {
pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
- if (pScanInfo->pFileDelData == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pScanInfo->pFileDelData, code, lino, _end, terrno);
}
for (int32_t k = 0; k < pBlock->numOfRecords; ++k) {
code = tTombBlockGet(pBlock, k, &record);
- if (code != TSDB_CODE_SUCCESS) {
- *pRet = BLK_CHECK_QUIT;
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (record.suid < pReader->info.suid) {
continue;
@@ -822,7 +969,7 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_
if (record.suid > pReader->info.suid) {
*pRet = BLK_CHECK_QUIT;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
if (uid < record.uid) {
@@ -832,20 +979,16 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_
if ((*j) >= numOfTables) {
*pRet = BLK_CHECK_QUIT;
- return TSDB_CODE_SUCCESS;
+ goto _end;
}
uid = pReader->status.uidList.tableUidList[*j];
code = getTableBlockScanInfo(pReader->status.pTableMap, uid, &pScanInfo, pReader->idStr);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
if (pScanInfo->pFileDelData == NULL) {
pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
- if (pScanInfo->pFileDelData == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(pScanInfo->pFileDelData, code, lino, _end, terrno);
}
}
@@ -853,30 +996,38 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_
continue;
}
- if (!(record.suid == pReader->info.suid && uid == record.uid)) {
- tsdbError("tsdb reader failed at: %s:%d", __func__, __LINE__);
- return TSDB_CODE_INTERNAL_ERROR;
- }
+ TSDB_CHECK_CONDITION((record.suid == pReader->info.suid) && (uid == record.uid), code, lino, _end,
+ TSDB_CODE_INTERNAL_ERROR);
if (record.version <= pReader->info.verRange.maxVer) {
- SDelData delData = {.version = record.version, .sKey = record.skey, .eKey = record.ekey};
- void* px = taosArrayPush(pScanInfo->pFileDelData, &delData);
- if (px == NULL) {
- return terrno;
- }
+ SDelData delData = {.version = record.version, .sKey = record.skey, .eKey = record.ekey};
+ const void* px = taosArrayPush(pScanInfo->pFileDelData, &delData);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
}
*pRet = BLK_CHECK_CONTINUE;
- return TSDB_CODE_SUCCESS;
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
// load tomb data API
static int32_t doLoadTombDataFromTombBlk(const TTombBlkArray* pTombBlkArray, STsdbReader* pReader, void* pFileReader,
bool isFile) {
- int32_t code = 0;
- STableUidList* pList = &pReader->status.uidList;
- int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ const STableUidList* pList = NULL;
+ int32_t numOfTables = 0;
+
+ TSDB_CHECK_NULL(pTombBlkArray, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ pList = &pReader->status.uidList;
+ numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
int32_t i = 0, j = 0;
while (i < pTombBlkArray->size && j < numOfTables) {
@@ -890,10 +1041,10 @@ static int32_t doLoadTombDataFromTombBlk(const TTombBlkArray* pTombBlkArray, STs
break;
}
- if (!(pTombBlk->minTbid.suid <= pReader->info.suid && pTombBlk->maxTbid.suid >= pReader->info.suid)) {
- tsdbError("tsdb reader failed at: %s:%d", __func__, __LINE__);
- return TSDB_CODE_INTERNAL_ERROR;
- }
+ TSDB_CHECK_CONDITION(
+ (pTombBlk->minTbid.suid <= pReader->info.suid) && (pTombBlk->maxTbid.suid >= pReader->info.suid), code, lino,
+ _end, TSDB_CODE_INTERNAL_ERROR);
+
if (pTombBlk->maxTbid.suid == pReader->info.suid && pTombBlk->maxTbid.uid < pList->tableUidList[0]) {
i += 1;
continue;
@@ -906,79 +1057,98 @@ static int32_t doLoadTombDataFromTombBlk(const TTombBlkArray* pTombBlkArray, STs
STombBlock block = {0};
code = isFile ? tsdbDataFileReadTombBlock(pFileReader, &pTombBlkArray->data[i], &block)
: tsdbSttFileReadTombBlock(pFileReader, &pTombBlkArray->data[i], &block);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- // uint64_t uid = pReader->status.uidList.tableUidList[j];
-
- // STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr);
- // if (pScanInfo->pFileDelData == NULL) {
- // pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
- // }
+ TSDB_CHECK_CODE(code, lino, _end);
ETombBlkCheckEnum ret = 0;
code = doCheckTombBlock(&block, pReader, numOfTables, &j, &ret);
tTombBlockDestroy(&block);
- if (code != TSDB_CODE_SUCCESS || ret == BLK_CHECK_QUIT) {
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
+ if (ret == BLK_CHECK_QUIT) {
+ break;
}
i += 1;
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t loadDataFileTombDataForAll(STsdbReader* pReader) {
- if (pReader->status.pCurrentFileset == NULL || pReader->status.pCurrentFileset->farr[3] == NULL) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ const TTombBlkArray* pBlkArray = NULL;
+
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ if ((pReader->status.pCurrentFileset == NULL) || (pReader->status.pCurrentFileset->farr[TSDB_FTYPE_TOMB] == NULL)) {
return TSDB_CODE_SUCCESS;
}
- const TTombBlkArray* pBlkArray = NULL;
+ code = tsdbDataFileReadTombBlk(pReader->pFileReader, &pBlkArray);
+ TSDB_CHECK_CODE(code, lino, _end);
- int32_t code = tsdbDataFileReadTombBlk(pReader->pFileReader, &pBlkArray);
+ code = doLoadTombDataFromTombBlk(pBlkArray, pReader, pReader->pFileReader, true);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
if (code != TSDB_CODE_SUCCESS) {
- return code;
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
-
- return doLoadTombDataFromTombBlk(pBlkArray, pReader, pReader->pFileReader, true);
+ return code;
}
int32_t loadSttTombDataForAll(STsdbReader* pReader, SSttFileReader* pSttFileReader, SSttBlockLoadInfo* pLoadInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
const TTombBlkArray* pBlkArray = NULL;
- int32_t code = tsdbSttFileReadTombBlk(pSttFileReader, &pBlkArray);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
- return doLoadTombDataFromTombBlk(pBlkArray, pReader, pSttFileReader, false);
+ TSDB_CHECK_NULL(pReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ code = tsdbSttFileReadTombBlk(pSttFileReader, &pBlkArray);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+ code = doLoadTombDataFromTombBlk(pBlkArray, pReader, pSttFileReader, false);
+ TSDB_CHECK_CODE(code, lino, _end);
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piMemTbData, int64_t ver) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ SArray* pMemDelData = NULL;
+ const SDelData* p = NULL;
+ const void* px = NULL;
+
+ TSDB_CHECK_NULL(ppMemDelData, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
if (*ppMemDelData == NULL) {
*ppMemDelData = taosArrayInit(4, sizeof(SDelData));
- if (*ppMemDelData == NULL) {
- return terrno;
- }
+ TSDB_CHECK_NULL(*ppMemDelData, code, lino, _end, terrno);
}
- SArray* pMemDelData = *ppMemDelData;
+ pMemDelData = *ppMemDelData;
- SDelData* p = NULL;
if (pMemTbData != NULL) {
taosRLockLatch(&pMemTbData->lock);
p = pMemTbData->pHead;
while (p) {
if (p->version <= ver) {
- void* px = taosArrayPush(pMemDelData, p);
+ px = taosArrayPush(pMemDelData, p);
if (px == NULL) {
taosRUnLockLatch(&pMemTbData->lock);
- return terrno;
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
}
-
p = p->pNext;
}
taosRUnLockLatch(&pMemTbData->lock);
@@ -988,28 +1158,36 @@ int32_t loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piM
p = piMemTbData->pHead;
while (p) {
if (p->version <= ver) {
- void* px = taosArrayPush(pMemDelData, p);
- if (px == NULL) {
- return terrno;
- }
+ px = taosArrayPush(pMemDelData, p);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
p = p->pNext;
}
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo* pBlockLoadInfo,
TStatisBlkArray* pStatisBlkArray, uint64_t suid, const uint64_t* pUidList,
int32_t numOfTables, int32_t* pNumOfRows) {
- int32_t num = 0;
- int32_t code = 0;
- int32_t lino = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t num = 0;
+ int64_t st = 0;
+ const SStatisBlk* p = NULL;
+ STbStatisBlock* pStatisBlock = NULL;
- if (pNumOfRows != 0) {
- *pNumOfRows = 0;
- }
+ TSDB_CHECK_NULL(pSttFileReader, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pBlockLoadInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pStatisBlkArray, code, lino, _end, TSDB_CODE_INVALID_PARA);
+ TSDB_CHECK_NULL(pNumOfRows, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ *pNumOfRows = 0;
if (TARRAY2_SIZE(pStatisBlkArray) <= 0) {
return code;
@@ -1021,21 +1199,21 @@ int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo
}
if (i >= TARRAY2_SIZE(pStatisBlkArray)) {
- return code;
+ goto _end;
}
- SStatisBlk* p = &pStatisBlkArray->data[i];
- STbStatisBlock* pStatisBlock = taosMemoryCalloc(1, sizeof(STbStatisBlock));
- TSDB_CHECK_NULL(pStatisBlock, code, lino, _err, terrno);
+ p = &pStatisBlkArray->data[i];
+ pStatisBlock = taosMemoryCalloc(1, sizeof(STbStatisBlock));
+ TSDB_CHECK_NULL(pStatisBlock, code, lino, _end, terrno);
code = tStatisBlockInit(pStatisBlock);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
- int64_t st = taosGetTimestampMs();
+ st = taosGetTimestampUs();
code = tsdbSttFileReadStatisBlock(pSttFileReader, p, pStatisBlock);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
- double el = (taosGetTimestampMs() - st) / 1000.0;
+ double el = (taosGetTimestampUs() - st) / 1000.0;
pBlockLoadInfo->cost.loadStatisBlocks += 1;
pBlockLoadInfo->cost.statisElapsedTime += el;
@@ -1045,10 +1223,8 @@ int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo
}
if (index >= pStatisBlock->numOfRecords) {
- tStatisBlockDestroy(pStatisBlock);
- taosMemoryFreeClear(pStatisBlock);
*pNumOfRows = num;
- return code;
+ goto _end;
}
int32_t j = index;
@@ -1056,10 +1232,8 @@ int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo
while (i < TARRAY2_SIZE(pStatisBlkArray) && uidIndex < numOfTables) {
p = &pStatisBlkArray->data[i];
if (p->minTbid.suid > suid) {
- tStatisBlockDestroy(pStatisBlock);
- taosMemoryFreeClear(pStatisBlock);
*pNumOfRows = num;
- return code;
+ goto _end;
}
uint64_t uid = pUidList[uidIndex];
@@ -1069,24 +1243,26 @@ int32_t getNumOfRowsInSttBlock(SSttFileReader* pSttFileReader, SSttBlockLoadInfo
uidIndex += 1;
j += 1;
code = loadNextStatisticsBlock(pSttFileReader, pStatisBlock, pStatisBlkArray, pStatisBlock->numOfRecords, &i, &j);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
} else if (((int64_t*)pStatisBlock->uids.data)[j] < uid) {
j += 1;
code = loadNextStatisticsBlock(pSttFileReader, pStatisBlock, pStatisBlkArray, pStatisBlock->numOfRecords, &i, &j);
- TSDB_CHECK_CODE(code, lino, _err);
+ TSDB_CHECK_CODE(code, lino, _end);
} else {
uidIndex += 1;
}
}
- tStatisBlockDestroy(pStatisBlock);
- taosMemoryFreeClear(pStatisBlock);
*pNumOfRows = num;
- return code;
-_err:
- tsdbError("%p failed to get number of rows in stt block, %s at line:%d code:%s", pSttFileReader, __func__, lino,
- tstrerror(code));
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s with %p failed at line %d since %s", __func__, pSttFileReader, lino, tstrerror(code));
+ }
+ if (pStatisBlock) {
+ tStatisBlockDestroy(pStatisBlock);
+ taosMemoryFreeClear(pStatisBlock);
+ }
return code;
}
@@ -1094,93 +1270,118 @@ _err:
static int32_t loadNextStatisticsBlock(SSttFileReader* pSttFileReader, STbStatisBlock* pStatisBlock,
const TStatisBlkArray* pStatisBlkArray, int32_t numOfRows, int32_t* i,
int32_t* j) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+
if ((*j) >= numOfRows) {
(*i) += 1;
(*j) = 0;
if ((*i) < TARRAY2_SIZE(pStatisBlkArray)) {
- int32_t code = tsdbSttFileReadStatisBlock(pSttFileReader, &pStatisBlkArray->data[(*i)], pStatisBlock);
+ code = tsdbSttFileReadStatisBlock(pSttFileReader, &pStatisBlkArray->data[(*i)], pStatisBlock);
if (code != 0) {
tsdbError("%p failed to read statisBlock, code:%s", pSttFileReader, tstrerror(code));
- return code;
+ TSDB_CHECK_CODE(code, lino, _end);
}
}
}
- return 0;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t doAdjustValidDataIters(SArray* pLDIterList, int32_t numOfFileObj) {
- int32_t size = taosArrayGetSize(pLDIterList);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t size = 0;
+ int32_t inc = 0;
+ SLDataIter* pIter = NULL;
+
+ size = taosArrayGetSize(pLDIterList);
if (size < numOfFileObj) {
- int32_t inc = numOfFileObj - size;
+ inc = numOfFileObj - size;
for (int32_t k = 0; k < inc; ++k) {
- SLDataIter* pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
- if (!pIter) {
- return terrno;
- }
+ pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
+ TSDB_CHECK_NULL(pIter, code, lino, _end, terrno);
void* px = taosArrayPush(pLDIterList, &pIter);
if (px == NULL) {
- taosMemoryFree(pIter);
- return terrno;
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
+ taosMemoryFreeClear(pIter);
}
}
} else if (size > numOfFileObj) { // remove unused LDataIter
- int32_t inc = size - numOfFileObj;
+ inc = size - numOfFileObj;
- for (int i = 0; i < inc; ++i) {
- SLDataIter* pIter = taosArrayPop(pLDIterList);
+ for (int32_t i = 0; i < inc; ++i) {
+ pIter = taosArrayPop(pLDIterList);
destroyLDataIter(pIter);
}
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t adjustSttDataIters(SArray* pSttFileBlockIterArray, STFileSet* pFileSet) {
- int32_t numOfLevels = pFileSet->lvlArr->size;
- int32_t code = 0;
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t numOfLevels = 0;
+ SSttLvl* pSttLevel = NULL;
+ SArray* pList = NULL;
+
+ TSDB_CHECK_NULL(pFileSet, code, lino, _end, TSDB_CODE_INVALID_PARA);
+
+ numOfLevels = pFileSet->lvlArr->size;
// add the list/iter placeholder
while (taosArrayGetSize(pSttFileBlockIterArray) < numOfLevels) {
- SArray* pList = taosArrayInit(4, POINTER_BYTES);
- if (pList == NULL) {
- return terrno;
- }
+ pList = taosArrayInit(4, POINTER_BYTES);
+ TSDB_CHECK_NULL(pList, code, lino, _end, terrno);
void* px = taosArrayPush(pSttFileBlockIterArray, &pList);
if (px == NULL) {
- return terrno;
+ taosArrayDestroy(pList);
+ TSDB_CHECK_NULL(px, code, lino, _end, terrno);
}
}
for (int32_t j = 0; j < numOfLevels; ++j) {
- SSttLvl* pSttLevel = pFileSet->lvlArr->data[j];
- SArray* pList = taosArrayGetP(pSttFileBlockIterArray, j);
+ pSttLevel = pFileSet->lvlArr->data[j];
+ pList = taosArrayGetP(pSttFileBlockIterArray, j);
code = doAdjustValidDataIters(pList, TARRAY2_SIZE(pSttLevel->fobjArr));
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
}
- return TSDB_CODE_SUCCESS;
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
}
int32_t tsdbGetRowsInSttFiles(STFileSet* pFileSet, SArray* pSttFileBlockIterArray, STsdb* pTsdb, SMergeTreeConf* pConf,
const char* pstr) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
int32_t numOfRows = 0;
- int32_t code = 0;
+ int32_t numOfLevels = 0;
+
+ TSDB_CHECK_NULL(pFileSet, code, lino, _end, TSDB_CODE_INVALID_PARA);
// no data exists, go to end
- int32_t numOfLevels = pFileSet->lvlArr->size;
+ numOfLevels = pFileSet->lvlArr->size;
if (numOfLevels == 0) {
- return numOfRows;
+ goto _end;
}
// add the list/iter placeholder
code = adjustSttDataIters(pSttFileBlockIterArray, pFileSet);
- if (code != TSDB_CODE_SUCCESS) {
- return numOfRows;
- }
+ TSDB_CHECK_CODE(code, lino, _end);
for (int32_t j = 0; j < numOfLevels; ++j) {
SSttLvl* pSttLevel = pFileSet->lvlArr->data[j];
@@ -1233,26 +1434,43 @@ int32_t tsdbGetRowsInSttFiles(STFileSet* pFileSet, SArray* pSttFileBlockIterArra
}
}
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
return numOfRows;
}
static bool overlapHelper(const STimeWindow* pLeft, TSKEY minKey, TSKEY maxKey) {
- return (pLeft->ekey >= minKey) && (pLeft->skey <= maxKey);
+ return (pLeft != NULL) && (pLeft->ekey >= minKey) && (pLeft->skey <= maxKey);
}
static bool overlapWithTimeWindow(STimeWindow* p1, STimeWindow* pQueryWindow, STableBlockScanInfo* pBlockScanInfo,
int32_t order) {
+ SIterInfo* pMemIter = NULL;
+ SIterInfo* pIMemIter = NULL;
+ STbData* pTbData = NULL;
+ STimeWindow* pFileWin = NULL;
+
+ if (p1 == NULL || pQueryWindow == NULL) {
+ return false;
+ }
+
// overlap with query window
if (!(p1->skey >= pQueryWindow->skey && p1->ekey <= pQueryWindow->ekey)) {
return true;
}
- SIterInfo* pMemIter = &pBlockScanInfo->iter;
- SIterInfo* pIMemIter = &pBlockScanInfo->iiter;
+ if (pBlockScanInfo == NULL) {
+ return false;
+ }
+
+ pMemIter = &pBlockScanInfo->iter;
+ pIMemIter = &pBlockScanInfo->iiter;
// overlap with mem data
if (pMemIter->hasVal) {
- STbData* pTbData = pMemIter->iter->pTbData;
+ pTbData = pMemIter->iter->pTbData;
if (overlapHelper(p1, pTbData->minKey, pTbData->maxKey)) {
return true;
}
@@ -1260,14 +1478,14 @@ static bool overlapWithTimeWindow(STimeWindow* p1, STimeWindow* pQueryWindow, ST
// overlap with imem data
if (pIMemIter->hasVal) {
- STbData* pITbData = pIMemIter->iter->pTbData;
- if (overlapHelper(p1, pITbData->minKey, pITbData->maxKey)) {
+ pTbData = pIMemIter->iter->pTbData;
+ if (overlapHelper(p1, pTbData->minKey, pTbData->maxKey)) {
return true;
}
}
// overlap with data file block
- STimeWindow* pFileWin = &pBlockScanInfo->filesetWindow;
+ pFileWin = &pBlockScanInfo->filesetWindow;
if ((taosArrayGetSize(pBlockScanInfo->pBlockIdxList) > 0) && overlapHelper(p1, pFileWin->skey, pFileWin->ekey)) {
return true;
}
@@ -1290,20 +1508,24 @@ static int32_t sortUidComparFn(const void* p1, const void* p2) {
}
bool isCleanSttBlock(SArray* pKeyRangeList, STimeWindow* pQueryWindow, STableBlockScanInfo* pScanInfo, int32_t order) {
- // check if it overlap with del skyline
- taosArraySort(pKeyRangeList, sortUidComparFn);
+ int32_t num = 0;
+ SSttKeyRange* pRange = NULL;
+ STimeWindow w;
- int32_t num = taosArrayGetSize(pKeyRangeList);
+ num = taosArrayGetSize(pKeyRangeList);
if (num == 0) {
return false;
}
- SSttKeyRange* pRange = taosArrayGet(pKeyRangeList, 0);
+ // check if it overlap with del skyline
+ taosArraySort(pKeyRangeList, sortUidComparFn);
+
+ pRange = taosArrayGet(pKeyRangeList, 0);
if (pRange == NULL) {
return false;
}
- STimeWindow w = {.skey = pRange->skey.ts, .ekey = pRange->ekey.ts};
+ w = (STimeWindow){.skey = pRange->skey.ts, .ekey = pRange->ekey.ts};
if (overlapWithTimeWindow(&w, pQueryWindow, pScanInfo, order)) {
return false;
}
@@ -1319,8 +1541,8 @@ bool isCleanSttBlock(SArray* pKeyRangeList, STimeWindow* pQueryWindow, STableBlo
return false;
}
- STimeWindow w2 = {.skey = p2->skey.ts, .ekey = p2->ekey.ts};
- bool overlap = overlapWithTimeWindow(&w2, pQueryWindow, pScanInfo, order);
+ w = (STimeWindow){.skey = p2->skey.ts, .ekey = p2->ekey.ts};
+ bool overlap = overlapWithTimeWindow(&w, pQueryWindow, pScanInfo, order);
if (overlap) {
return false;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h
index 6ec1f99577..703c548aca 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h
+++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h
@@ -30,13 +30,13 @@ extern "C" {
do { \
(_w)->skey = INT64_MAX; \
(_w)->ekey = INT64_MIN; \
- } while (0);
+ } while (0)
#define INIT_KEYRANGE(_k) \
do { \
(_k)->skey.ts = INT64_MAX; \
(_k)->ekey.ts = INT64_MIN; \
- } while (0);
+ } while (0)
#define tRowGetKeyEx(_pRow, _pKey) \
{ \
@@ -72,7 +72,6 @@ typedef struct STsdbReaderInfo {
} STsdbReaderInfo;
typedef struct SBlockInfoBuf {
- int32_t currentIndex;
SArray* pData;
int32_t numPerBucket;
int32_t numOfTables;
@@ -241,7 +240,6 @@ typedef struct SDataBlockIter {
int32_t index;
SArray* blockList; // SArray
int32_t order;
- SDataBlk block; // current SDataBlk data
} SDataBlockIter;
typedef struct SFileBlockDumpInfo {
@@ -321,7 +319,7 @@ int32_t createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, c
int32_t initTableBlockScanInfo(STableBlockScanInfo* pScanInfo, uint64_t uid, SSHashObj* pTableMap,
STsdbReader* pReader);
void clearBlockScanInfo(STableBlockScanInfo* p);
-void destroyAllBlockScanInfo(SSHashObj* pTableMap);
+void destroyAllBlockScanInfo(SSHashObj** pTableMap);
void resetAllDataBlockScanInfo(SSHashObj* pTableMap, int64_t ts, int32_t step);
void cleanupInfoForNextFileset(SSHashObj* pTableMap);
int32_t ensureBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables);
@@ -335,7 +333,7 @@ void clearBrinBlockIter(SBrinRecordIter* pIter);
// initialize block iterator API
int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t numOfBlocks, SArray* pTableList);
-bool blockIteratorNext(SDataBlockIter* pBlockIter, const char* idStr);
+bool blockIteratorNext(SDataBlockIter* pBlockIter);
// load tomb data API (stt/mem only for one table each, tomb data from data files are load for all tables at one time)
int32_t loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piMemTbData, int64_t ver);
diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c
index d688d1323d..0ac0ee1b8f 100644
--- a/source/dnode/vnode/src/vnd/vnodeInitApi.c
+++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c
@@ -147,6 +147,7 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStatePutParName = streamStatePutParName;
pStore->streamStateGetParName = streamStateGetParName;
+ pStore->streamStateDeleteParName = streamStateDeleteParName;
pStore->streamStateAddIfNotExist = streamStateAddIfNotExist;
pStore->streamStateReleaseBuf = streamStateReleaseBuf;
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index 53365303b0..2d2446415e 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -360,6 +360,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
info.config = vnodeCfgDefault;
// load vnode info
+ vInfo("vgId:%d, start to vnode load info %s", info.config.vgId, dir);
ret = vnodeLoadInfo(dir, &info);
if (ret < 0) {
vError("failed to open vnode from %s since %s", path, tstrerror(terrno));
@@ -429,22 +430,26 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
int8_t rollback = vnodeShouldRollback(pVnode);
// open buffer pool
+ vInfo("vgId:%d, start to open vnode buffer pool", TD_VID(pVnode));
if (vnodeOpenBufPool(pVnode) < 0) {
vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open meta
+ vInfo("vgId:%d, start to open vnode meta", TD_VID(pVnode));
if (metaOpen(pVnode, &pVnode->pMeta, rollback) < 0) {
vError("vgId:%d, failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
+ vInfo("vgId:%d, start to upgrade meta", TD_VID(pVnode));
if (metaUpgrade(pVnode, &pVnode->pMeta) < 0) {
vError("vgId:%d, failed to upgrade meta since %s", TD_VID(pVnode), tstrerror(terrno));
}
// open tsdb
+ vInfo("vgId:%d, start to open vnode tsdb", TD_VID(pVnode));
if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL, rollback, force) < 0) {
vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@@ -455,6 +460,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
ret = taosRealPath(tdir, NULL, sizeof(tdir));
TAOS_UNUSED(ret);
+ vInfo("vgId:%d, start to open vnode wal", TD_VID(pVnode));
pVnode->pWal = walOpen(tdir, &(pVnode->config.walCfg));
if (pVnode->pWal == NULL) {
vError("vgId:%d, failed to open vnode wal since %s. wal:%s", TD_VID(pVnode), tstrerror(terrno), tdir);
@@ -467,6 +473,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
TAOS_UNUSED(ret);
// open query
+ vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode));
if (vnodeQueryOpen(pVnode)) {
vError("vgId:%d, failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno));
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -474,18 +481,21 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
}
// sma required the tq is initialized before the vnode open
+ vInfo("vgId:%d, start to open vnode tq", TD_VID(pVnode));
if (tqOpen(tdir, pVnode)) {
vError("vgId:%d, failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open sma
+ vInfo("vgId:%d, start to open vnode sma", TD_VID(pVnode));
if (smaOpen(pVnode, rollback, force)) {
vError("vgId:%d, failed to open vnode sma since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// vnode begin
+ vInfo("vgId:%d, start to begin vnode", TD_VID(pVnode));
if (vnodeBegin(pVnode) < 0) {
vError("vgId:%d, failed to begin since %s", TD_VID(pVnode), tstrerror(terrno));
terrno = TSDB_CODE_OUT_OF_MEMORY;
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index dd13c975cf..16c5e026d1 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -50,6 +50,8 @@ static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t ver, void *pReq,
static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessConfigChangeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessArbCheckSyncReq(SVnode *pVnode, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDropTSmaCtbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp,
+ SRpcMsg *pOriginRpc);
static int32_t vnodePreCheckAssignedLogSyncd(SVnode *pVnode, char *member0Token, char *member1Token);
static int32_t vnodeCheckAssignedLogSyncd(SVnode *pVnode, char *member0Token, char *member1Token);
@@ -360,6 +362,10 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
int32_t lino = 0;
+ if (tsBypassFlag & TSDB_BYPASS_RA_RPC_RECV_SUBMIT) {
+ return TSDB_CODE_MSG_PREPROCESSED;
+ }
+
SDecoder *pCoder = &(SDecoder){0};
if (taosHton64(((SSubmitReq2Msg *)pMsg->pCont)->version) != 1) {
@@ -481,6 +487,61 @@ static int32_t vnodePreProcessArbCheckSyncMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return code;
}
+int32_t vnodePreProcessDropTbMsg(SVnode* pVnode, SRpcMsg* pMsg) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t size = 0;
+ SDecoder dc = {0};
+ SEncoder ec = {0};
+ SVDropTbBatchReq receivedBatchReqs = {0};
+ SVDropTbBatchReq sentBatchReqs = {0};
+
+ tDecoderInit(&dc, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead));
+
+ code = tDecodeSVDropTbBatchReq(&dc, &receivedBatchReqs);
+ if (code < 0) {
+ terrno = code;
+ TSDB_CHECK_CODE(code, lino, _exit);
+ }
+ sentBatchReqs.pArray = taosArrayInit(receivedBatchReqs.nReqs, sizeof(SVDropTbReq));
+ if (!sentBatchReqs.pArray) {
+ code = terrno;
+ goto _exit;
+ }
+
+ for (int32_t i = 0; i < receivedBatchReqs.nReqs; ++i) {
+ SVDropTbReq* pReq = receivedBatchReqs.pReqs + i;
+ tb_uid_t uid = metaGetTableEntryUidByName(pVnode->pMeta, pReq->name);
+ if (uid == 0) {
+ vWarn("vgId:%d, preprocess drop ctb: %s not found", TD_VID(pVnode), pReq->name);
+ continue;
+ }
+ pReq->uid = uid;
+ vDebug("vgId:%d %s for: %s, uid: %"PRId64, TD_VID(pVnode), __func__, pReq->name, pReq->uid);
+ if (taosArrayPush(sentBatchReqs.pArray, pReq) == NULL) {
+ code = terrno;
+ goto _exit;
+ }
+ }
+ sentBatchReqs.nReqs = sentBatchReqs.pArray->size;
+
+ tEncodeSize(tEncodeSVDropTbBatchReq, &sentBatchReqs, size, code);
+ tEncoderInit(&ec, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), size);
+ code = tEncodeSVDropTbBatchReq(&ec, &sentBatchReqs);
+ tEncoderClear(&ec);
+ if (code != TSDB_CODE_SUCCESS) {
+ vError("vgId:%d %s failed to encode drop tb batch req: %s", TD_VID(pVnode), __func__, tstrerror(code));
+ TSDB_CHECK_CODE(code, lino, _exit);
+ }
+
+_exit:
+ tDecoderClear(&dc);
+ if (sentBatchReqs.pArray) {
+ taosArrayDestroy(sentBatchReqs.pArray);
+ }
+ return code;
+}
+
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
@@ -507,6 +568,9 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
case TDMT_VND_ARB_CHECK_SYNC: {
code = vnodePreProcessArbCheckSyncMsg(pVnode, pMsg);
} break;
+ case TDMT_VND_DROP_TABLE: {
+ code = vnodePreProcessDropTbMsg(pVnode, pMsg);
+ } break;
default:
break;
}
@@ -1110,7 +1174,6 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
STbUidStore *pStore = NULL;
SArray *tbUids = NULL;
SArray *tbNames = NULL;
-
pRsp->msgType = TDMT_VND_CREATE_TABLE_RSP;
pRsp->code = TSDB_CODE_SUCCESS;
pRsp->pCont = NULL;
@@ -2512,3 +2575,4 @@ _OVER:
int32_t vnodeAsyncCompact(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) { return 0; }
int32_t tsdbAsyncCompact(STsdb *tsdb, const STimeWindow *tw, bool sync) { return 0; }
#endif
+
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index e757163ba8..b581e31919 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -271,6 +271,7 @@ typedef struct SCtgViewsCtx {
SArray* pNames;
SArray* pResList;
SArray* pFetchs;
+ bool forceFetch;
} SCtgViewsCtx;
typedef enum {
@@ -831,12 +832,12 @@ typedef struct SCtgCacheItemInfo {
#define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__)
#define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__)
-#define ctgTaskFatal(param, ...) qFatal("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
-#define ctgTaskError(param, ...) qError("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
-#define ctgTaskWarn(param, ...) qWarn("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
-#define ctgTaskInfo(param, ...) qInfo("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
-#define ctgTaskDebug(param, ...) qDebug("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
-#define ctgTaskTrace(param, ...) qTrace("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
+#define ctgTaskFatal(param, ...) qFatal("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
+#define ctgTaskError(param, ...) qError("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
+#define ctgTaskWarn(param, ...) qWarn("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
+#define ctgTaskInfo(param, ...) qInfo("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
+#define ctgTaskDebug(param, ...) qDebug("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
+#define ctgTaskTrace(param, ...) qTrace("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
#define CTG_LOCK_DEBUG(...) \
do { \
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index c1dcdf2741..9bfb4102aa 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -20,6 +20,11 @@
#include "tref.h"
#include "trpc.h"
+typedef struct SCtgViewTaskParam {
+ bool forceFetch;
+ SArray* pTableReqs;
+} SCtgViewTaskParam;
+
void ctgIsTaskDone(SCtgJob* pJob, CTG_TASK_TYPE type, bool* done) {
SCtgTask* pTask = NULL;
@@ -500,7 +505,7 @@ int32_t ctgInitGetTbTagTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
SCtgTask task = {0};
-
+ SCtgViewTaskParam* p = param;
task.type = CTG_TASK_GET_VIEW;
task.taskId = taskIdx;
task.pJob = pJob;
@@ -511,7 +516,8 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
}
SCtgViewsCtx* ctx = task.taskCtx;
- ctx->pNames = param;
+ ctx->pNames = p->pTableReqs;
+ ctx->forceFetch = p->forceFetch;
ctx->pResList = taosArrayInit(pJob->viewNum, sizeof(SMetaRes));
if (NULL == ctx->pResList) {
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum,
@@ -849,13 +855,12 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
int32_t tbTagNum = (int32_t)taosArrayGetSize(pReq->pTableTag);
int32_t viewNum = (int32_t)ctgGetTablesReqNum(pReq->pView);
- int32_t tbTsmaNum = (int32_t)taosArrayGetSize(pReq->pTableTSMAs);
+ int32_t tbTsmaNum = tsQuerySmaOptimize ? (int32_t)taosArrayGetSize(pReq->pTableTSMAs) : 0;
int32_t tsmaNum = (int32_t)taosArrayGetSize(pReq->pTSMAs);
int32_t tbNameNum = (int32_t)ctgGetTablesReqNum(pReq->pTableName);
int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum +
userNum + dbInfoNum + tbIndexNum + tbCfgNum + tbTagNum + viewNum + tbTsmaNum + tbNameNum;
-
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
if (NULL == *job) {
ctgError("failed to calloc, size:%d,QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId);
@@ -1014,7 +1019,8 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
}
if (viewNum > 0) {
- CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_VIEW, pReq->pView, NULL));
+ SCtgViewTaskParam param = {.forceFetch = pReq->forceFetchViewMeta, .pTableReqs = pReq->pView};
+ CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_VIEW, ¶m, NULL));
}
if (tbTsmaNum > 0) {
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_TB_TSMA, pReq->pTableTSMAs, NULL));
@@ -3712,16 +3718,14 @@ int32_t ctgLaunchGetViewsTask(SCtgTask* pTask) {
bool tbMetaDone = false;
SName* pName = NULL;
- /*
- ctgIsTaskDone(pJob, CTG_TASK_GET_TB_META_BATCH, &tbMetaDone);
- if (tbMetaDone) {
- CTG_ERR_RET(ctgBuildViewNullRes(pTask, pCtx));
- TSWAP(pTask->res, pCtx->pResList);
+ ctgIsTaskDone(pJob, CTG_TASK_GET_TB_META_BATCH, &tbMetaDone);
+ if (tbMetaDone && !pCtx->forceFetch) {
+ CTG_ERR_RET(ctgBuildViewNullRes(pTask, pCtx));
+ TSWAP(pTask->res, pCtx->pResList);
- CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
- return TSDB_CODE_SUCCESS;
- }
- */
+ CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
+ return TSDB_CODE_SUCCESS;
+ }
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
int32_t fetchIdx = 0;
diff --git a/source/libs/catalog/test/CMakeLists.txt b/source/libs/catalog/test/CMakeLists.txt
index de4d08835c..f23a6beaee 100644
--- a/source/libs/catalog/test/CMakeLists.txt
+++ b/source/libs/catalog/test/CMakeLists.txt
@@ -9,7 +9,7 @@ IF(NOT TD_DARWIN)
ADD_EXECUTABLE(catalogTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(
catalogTest
- PUBLIC os util common nodes catalog transport gtest qcom taos_static
+ PUBLIC os util common nodes catalog transport gtest qcom ${TAOS_LIB_STATIC}
)
TARGET_INCLUDE_DIRECTORIES(
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 6272ac7049..353bc1fa18 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -35,6 +35,9 @@
extern SConfig* tsCfg;
static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) {
+ if (NULL == pBlock || NULL == pRsp) {
+ return TSDB_CODE_INVALID_PARA;
+ }
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN;
*pRsp = taosMemoryCalloc(1, rspSize);
@@ -53,6 +56,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe
int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols);
if(len < 0) {
taosMemoryFree(*pRsp);
+ *pRsp = NULL;
return terrno;
}
SET_PAYLOAD_LEN((*pRsp)->data, len, len);
@@ -216,6 +220,9 @@ static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock,
static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp, int8_t biMode) {
SDescribeStmt* pDesc = (SDescribeStmt*)pStmt;
+ if (NULL == pDesc || NULL == pDesc->pMeta) {
+ return TSDB_CODE_INVALID_PARA;
+ }
int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta);
SSDataBlock* pBlock = NULL;
@@ -505,7 +512,7 @@ static int32_t buildCreateViewResultDataBlock(SSDataBlock** pOutput) {
return code;
}
-void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
+static void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
for (int32_t i = 0; i < pCfg->numOfColumns; ++i) {
SSchema* pSchema = pCfg->pSchemas + i;
#define LTYPE_LEN (32 + 60) // 60 byte for compress info
@@ -539,7 +546,7 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
}
}
-void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
+static void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
for (int32_t i = 0; i < pCfg->numOfTags; ++i) {
SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i;
char type[32];
@@ -558,7 +565,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
}
}
-void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) {
+static void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) {
for (int32_t i = 0; i < pCfg->numOfTags; ++i) {
SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i;
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len),
@@ -566,7 +573,7 @@ void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) {
}
}
-int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) {
+static int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pTagVals = NULL;
STag* pTag = (STag*)pCfg->pTags;
@@ -643,7 +650,7 @@ _exit:
return code;
}
-void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* pCfg) {
+static void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* pCfg) {
if (pCfg->commentLen > 0) {
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len),
" COMMENT '%s'", pCfg->pComment);
@@ -947,12 +954,18 @@ static int32_t buildLocalVariablesResultDataBlock(SSDataBlock** pOutput) {
goto _exit;
}
+ infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
+ infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN;
+ if (taosArrayPush(pBlock->pDataBlock, &infoData) == NULL) {
+ goto _exit;
+ }
+
*pOutput = pBlock;
_exit:
if (terrno != TSDB_CODE_SUCCESS) {
- taosMemoryFree(pBlock);
taosArrayDestroy(pBlock->pDataBlock);
+ taosMemoryFree(pBlock);
}
return terrno;
}
@@ -997,7 +1010,7 @@ static int32_t createSelectResultDataBlock(SNodeList* pProjects, SSDataBlock** p
return code;
}
-int32_t buildSelectResultDataBlock(SNodeList* pProjects, SSDataBlock* pBlock) {
+static int32_t buildSelectResultDataBlock(SNodeList* pProjects, SSDataBlock* pBlock) {
QRY_ERR_RET(blockDataEnsureCapacity(pBlock, 1));
int32_t index = 0;
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index 42c214fac7..24b43ac95b 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -30,8 +30,8 @@ char *gJoinTypeStr[JOIN_TYPE_MAX_VALUE][JOIN_STYPE_MAX_VALUE] = {
/*FULL*/ {"Full Join", "Full Join", NULL, NULL, NULL, NULL},
};
-int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
-int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel);
+static int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
+static int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel);
char *qExplainGetDynQryCtrlType(EDynQueryType type) {
switch (type) {
@@ -118,7 +118,7 @@ void qExplainFreeCtx(SExplainCtx *pCtx) {
taosMemoryFree(pCtx);
}
-int32_t qExplainInitCtx(SExplainCtx **pCtx, SHashObj *groupHash, bool verbose, double ratio, EExplainMode mode) {
+static int32_t qExplainInitCtx(SExplainCtx **pCtx, SHashObj *groupHash, bool verbose, double ratio, EExplainMode mode) {
int32_t code = 0;
SExplainCtx *ctx = taosMemoryCalloc(1, sizeof(SExplainCtx));
if (NULL == ctx) {
@@ -158,7 +158,7 @@ _return:
QRY_RET(code);
}
-int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNodeList **pChildren) {
+static int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNodeList **pChildren) {
int32_t tlen = 0;
SNodeList *pPhysiChildren = pNode->pChildren;
@@ -180,7 +180,7 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, SExplainGroup *group) {
+static int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, SExplainGroup *group) {
*pExecInfo = taosArrayInit(group->nodeNum, sizeof(SExplainExecInfo));
if (NULL == (*pExecInfo)) {
qError("taosArrayInit %d explainExecInfo failed", group->nodeNum);
@@ -217,7 +217,7 @@ int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, S
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pResNode) {
+static int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pResNode) {
if (NULL == pNode) {
*pResNode = NULL;
qError("physical node is NULL");
@@ -250,7 +250,7 @@ _return:
QRY_RET(code);
}
-int32_t qExplainBufAppendExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
+static int32_t qExplainBufAppendExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
int32_t tlen = *len;
int32_t nodeNum = taosArrayGetSize(pExecInfo);
SExplainExecInfo maxExecInfo = {0};
@@ -275,7 +275,7 @@ int32_t qExplainBufAppendExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainBufAppendVerboseExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
+static int32_t qExplainBufAppendVerboseExecInfo(SArray *pExecInfo, char *tbuf, int32_t *len) {
int32_t tlen = 0;
bool gotVerbose = false;
int32_t nodeNum = taosArrayGetSize(pExecInfo);
@@ -297,7 +297,7 @@ int32_t qExplainBufAppendVerboseExecInfo(SArray *pExecInfo, char *tbuf, int32_t
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainResAppendRow(SExplainCtx *ctx, char *tbuf, int32_t len, int32_t level) {
+static int32_t qExplainResAppendRow(SExplainCtx *ctx, char *tbuf, int32_t len, int32_t level) {
SQueryExplainRowInfo row = {0};
row.buf = taosMemoryMalloc(len);
if (NULL == row.buf) {
@@ -362,7 +362,7 @@ static char* qExplainGetScanDataLoad(STableScanPhysiNode* pScan) {
return "unknown";
}
-int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
+static int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
int32_t tlen = 0;
bool isVerboseLine = false;
char *tbuf = ctx->tbuf;
@@ -1900,7 +1900,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
+static int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
if (NULL == pResNode) {
qError("explain res node is NULL");
QRY_ERR_RET(TSDB_CODE_APP_ERROR);
@@ -1915,7 +1915,7 @@ int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel) {
+static int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel) {
SExplainResNode *node = NULL;
int32_t code = 0;
SExplainCtx *ctx = (SExplainCtx *)pCtx;
@@ -1940,7 +1940,7 @@ _return:
QRY_RET(code);
}
-int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
+static int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
int32_t code = 0;
SSDataBlock *pBlock = NULL;
SExplainCtx *pCtx = (SExplainCtx *)ctx;
@@ -1997,7 +1997,7 @@ _return:
QRY_RET(code);
}
-int32_t qExplainPrepareCtx(SQueryPlan *pDag, SExplainCtx **pCtx) {
+static int32_t qExplainPrepareCtx(SQueryPlan *pDag, SExplainCtx **pCtx) {
int32_t code = 0;
SNodeListNode *plans = NULL;
int32_t taskNum = 0;
@@ -2080,7 +2080,7 @@ _return:
QRY_RET(code);
}
-int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
+static int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
if (EXPLAIN_MODE_ANALYZE != pCtx->mode) {
return TSDB_CODE_SUCCESS;
}
@@ -2103,7 +2103,7 @@ int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
+static int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
QRY_ERR_RET(qExplainAppendGroupResRows(pCtx, pCtx->rootGroupId, 0, false));
QRY_ERR_RET(qExplainAppendPlanRows(pCtx));
QRY_ERR_RET(qExplainGetRspFromCtx(pCtx, pRsp));
@@ -2112,6 +2112,7 @@ int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
}
int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp) {
+ if(!pCtx || !pRspMsg || !pRsp) return TSDB_CODE_INVALID_PARA;
SExplainResNode *node = NULL;
int32_t code = 0;
bool groupDone = false;
@@ -2176,6 +2177,7 @@ _exit:
}
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp) {
+ if (!pDag || !pRsp) return TSDB_CODE_INVALID_PARA;
int32_t code = 0;
SExplainCtx *pCtx = NULL;
@@ -2188,6 +2190,7 @@ _return:
}
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs) {
+ if(!pDag || !pCtx) return TSDB_CODE_INVALID_PARA;
QRY_ERR_RET(qExplainPrepareCtx(pDag, pCtx));
(*pCtx)->reqStartTs = startTs;
@@ -2197,6 +2200,7 @@ int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs)
}
int32_t qExecExplainEnd(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
+ if(!pCtx || !pRsp) return TSDB_CODE_INVALID_PARA;
int32_t code = 0;
pCtx->jobDoneTs = taosGetTimestampUs();
diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c
index 94cc5d9129..3bc9c806b0 100644
--- a/source/libs/executor/src/anomalywindowoperator.c
+++ b/source/libs/executor/src/anomalywindowoperator.c
@@ -44,9 +44,9 @@ typedef struct {
SExprSupp scalarSup;
int32_t tsSlotId;
STimeWindowAggSupp twAggSup;
- char algoName[TSDB_ANAL_ALGO_NAME_LEN];
- char algoUrl[TSDB_ANAL_ALGO_URL_LEN];
- char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
+ char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN];
+ char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN];
+ char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
SAnomalyWindowSupp anomalySup;
SWindowRowsSup anomalyWinRowSup;
SColumn anomalyCol;
@@ -75,13 +75,13 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p
if (!taosAnalGetOptStr(pAnomalyNode->anomalyOpt, "algo", pInfo->algoName, sizeof(pInfo->algoName))) {
qError("failed to get anomaly_window algorithm name from %s", pAnomalyNode->anomalyOpt);
- code = TSDB_CODE_ANAL_ALGO_NOT_FOUND;
+ code = TSDB_CODE_ANA_ALGO_NOT_FOUND;
goto _error;
}
if (taosAnalGetAlgoUrl(pInfo->algoName, ANAL_ALGO_TYPE_ANOMALY_DETECT, pInfo->algoUrl, sizeof(pInfo->algoUrl)) != 0) {
qError("failed to get anomaly_window algorithm url from %s", pInfo->algoName);
- code = TSDB_CODE_ANAL_ALGO_NOT_LOAD;
+ code = TSDB_CODE_ANA_ALGO_NOT_LOAD;
goto _error;
}
@@ -262,7 +262,7 @@ static void anomalyDestroyOperatorInfo(void* param) {
static int32_t anomalyCacheBlock(SAnomalyWindowOperatorInfo* pInfo, SSDataBlock* pSrc) {
if (pInfo->anomalySup.cachedRows > ANAL_ANOMALY_WINDOW_MAX_ROWS) {
- return TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS;
+ return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
}
SSDataBlock* pDst = NULL;
@@ -287,7 +287,7 @@ static int32_t anomalyFindWindow(SAnomalyWindowSupp* pSupp, TSKEY key) {
return -1;
}
-static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
+static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows, const char* pId) {
int32_t code = 0;
int32_t rows = 0;
STimeWindow win = {0};
@@ -295,8 +295,23 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
taosArrayClear(pWindows);
tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code);
- if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT;
- if (rows <= 0) return 0;
+ if (code < 0) {
+ return TSDB_CODE_INVALID_JSON_FORMAT;
+ }
+
+ if (rows < 0) {
+ char pMsg[1024] = {0};
+ code = tjsonGetStringValue(pJson, "msg", pMsg);
+ if (code) {
+ qError("%s failed to get error msg from rsp, unknown error", pId);
+ } else {
+ qError("%s failed to exec forecast, msg:%s", pId, pMsg);
+ }
+
+ return TSDB_CODE_ANA_WN_DATA;
+ } else if (rows == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
SJson* res = tjsonGetObjectItem(pJson, "res");
if (res == NULL) return TSDB_CODE_INVALID_JSON_FORMAT;
@@ -313,7 +328,10 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
SJson* start = tjsonGetArrayItem(row, 0);
SJson* end = tjsonGetArrayItem(row, 1);
- if (start == NULL || end == NULL) return TSDB_CODE_INVALID_JSON_FORMAT;
+ if (start == NULL || end == NULL) {
+ qError("%s invalid res from analytic sys, code:%s", pId, tstrerror(TSDB_CODE_INVALID_JSON_FORMAT));
+ return TSDB_CODE_INVALID_JSON_FORMAT;
+ }
tjsonGetObjectValueBigInt(start, &win.skey);
tjsonGetObjectValueBigInt(end, &win.ekey);
@@ -322,52 +340,57 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
win.ekey = win.skey + 1;
}
- if (taosArrayPush(pWindows, &win) == NULL) return TSDB_CODE_OUT_OF_BUFFER;
+ if (taosArrayPush(pWindows, &win) == NULL) {
+ qError("%s out of memory in generating anomaly_window", pId);
+ return TSDB_CODE_OUT_OF_BUFFER;
+ }
}
int32_t numOfWins = taosArrayGetSize(pWindows);
- qDebug("anomaly window recevied, total:%d", numOfWins);
+ qDebug("%s anomaly window recevied, total:%d", pId, numOfWins);
for (int32_t i = 0; i < numOfWins; ++i) {
STimeWindow* pWindow = taosArrayGet(pWindows, i);
- qDebug("anomaly win:%d [%" PRId64 ", %" PRId64 ")", i, pWindow->skey, pWindow->ekey);
+ qDebug("%s anomaly win:%d [%" PRId64 ", %" PRId64 ")", pId, i, pWindow->skey, pWindow->ekey);
}
- return 0;
+ return code;
}
static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
SAnomalyWindowOperatorInfo* pInfo = pOperator->info;
SAnomalyWindowSupp* pSupp = &pInfo->anomalySup;
SJson* pJson = NULL;
- SAnalBuf analBuf = {.bufType = ANAL_BUF_TYPE_JSON};
+ SAnalyticBuf analBuf = {.bufType = ANALYTICS_BUF_TYPE_JSON};
char dataBuf[64] = {0};
int32_t code = 0;
int64_t ts = 0;
+ int32_t lino = 0;
+ const char* pId = GET_TASKID(pOperator->pTaskInfo);
- // int64_t ts = taosGetTimestampMs();
snprintf(analBuf.fileName, sizeof(analBuf.fileName), "%s/tdengine-anomaly-%" PRId64 "-%" PRId64, tsTempDir, ts,
pSupp->groupId);
code = tsosAnalBufOpen(&analBuf, 2);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
const char* prec = TSDB_TIME_PRECISION_MILLI_STR;
if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_MICRO) prec = TSDB_TIME_PRECISION_MICRO_STR;
if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_NANO) prec = TSDB_TIME_PRECISION_NANO_STR;
code = taosAnalBufWriteColMeta(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, "ts");
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufWriteColMeta(&analBuf, 1, pInfo->anomalyCol.type, "val");
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufWriteDataBegin(&analBuf);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
int32_t numOfBlocks = (int32_t)taosArrayGetSize(pSupp->blocks);
// timestamp
code = taosAnalBufWriteColBegin(&analBuf, 0);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
+
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i);
if (pBlock == NULL) break;
@@ -375,15 +398,17 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
if (pTsCol == NULL) break;
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
code = taosAnalBufWriteColData(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, &((TSKEY*)pTsCol->pData)[j]);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
}
}
+
code = taosAnalBufWriteColEnd(&analBuf, 0);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
// data
code = taosAnalBufWriteColBegin(&analBuf, 1);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
+
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i);
if (pBlock == NULL) break;
@@ -392,48 +417,47 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
code = taosAnalBufWriteColData(&analBuf, 1, pValCol->info.type, colDataGetData(pValCol, j));
- if (code != 0) goto _OVER;
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
}
}
code = taosAnalBufWriteColEnd(&analBuf, 1);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufWriteDataEnd(&analBuf);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufWriteOptStr(&analBuf, "option", pInfo->anomalyOpt);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufWriteOptStr(&analBuf, "algo", pInfo->algoName);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufWriteOptStr(&analBuf, "prec", prec);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
int64_t wncheck = ANAL_FORECAST_DEFAULT_WNCHECK;
bool hasWncheck = taosAnalGetOptInt(pInfo->anomalyOpt, "wncheck", &wncheck);
if (!hasWncheck) {
qDebug("anomaly_window wncheck not found from %s, use default:%" PRId64, pInfo->anomalyOpt, wncheck);
}
+
code = taosAnalBufWriteOptInt(&analBuf, "wncheck", wncheck);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
code = taosAnalBufClose(&analBuf);
- if (code != 0) goto _OVER;
+ QUERY_CHECK_CODE(code, lino, _OVER);
- pJson = taosAnalSendReqRetJson(pInfo->algoUrl, ANAL_HTTP_TYPE_POST, &analBuf);
+ pJson = taosAnalSendReqRetJson(pInfo->algoUrl, ANALYTICS_HTTP_TYPE_POST, &analBuf);
if (pJson == NULL) {
code = terrno;
goto _OVER;
}
- code = anomalyParseJson(pJson, pSupp->windows);
- if (code != 0) goto _OVER;
+ code = anomalyParseJson(pJson, pSupp->windows, pId);
_OVER:
if (code != 0) {
- qError("failed to analysis window since %s", tstrerror(code));
+ qError("%s failed to analysis window since %s, lino:%d", pId, tstrerror(code), lino);
}
taosAnalBufDestroy(&analBuf);
diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c
index 042fcf0120..7222f2d297 100644
--- a/source/libs/executor/src/exchangeoperator.c
+++ b/source/libs/executor/src/exchangeoperator.c
@@ -121,10 +121,10 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn
}
} else {
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
- qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64
- ", totalRows:%" PRIu64 ", try next %d/%" PRIzu,
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pDataInfo->totalRows,
- pExchangeInfo->loadInfo.totalRows, i + 1, totalSources);
+ qDebug("%s vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
+ " execId:%d index:%d completed, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", try next %d/%" PRIzu,
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId, i,
+ pDataInfo->totalRows, pExchangeInfo->loadInfo.totalRows, i + 1, totalSources);
taosMemoryFreeClear(pDataInfo->pRsp);
}
break;
@@ -141,17 +141,17 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn
if (pRsp->completed == 1) {
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
- qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64
+ qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
" execId:%d index:%d completed, blocks:%d, numOfRows:%" PRId64 ", rowsOfSource:%" PRIu64
", totalRows:%" PRIu64 ", total:%.2f Kb, try next %d/%" PRIzu,
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRsp->numOfBlocks,
- pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0, i + 1,
- totalSources);
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId, i,
+ pRsp->numOfBlocks, pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows,
+ pLoadInfo->totalSize / 1024.0, i + 1, totalSources);
} else {
- qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d blocks:%d, numOfRows:%" PRId64
- ", totalRows:%" PRIu64 ", total:%.2f Kb",
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRsp->numOfBlocks,
- pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0);
+ qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
+ " execId:%d blocks:%d, numOfRows:%" PRId64 ", totalRows:%" PRIu64 ", total:%.2f Kb",
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
+ pRsp->numOfBlocks, pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0);
}
taosMemoryFreeClear(pDataInfo->pRsp);
@@ -640,9 +640,9 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
if (pSource->localExec) {
SDataBuf pBuf = {0};
- int32_t code =
- (*pTaskInfo->localFetch.fp)(pTaskInfo->localFetch.handle, pSource->schedId, pTaskInfo->id.queryId,
- pSource->taskId, 0, pSource->execId, &pBuf.pData, pTaskInfo->localFetch.explainRes);
+ int32_t code = (*pTaskInfo->localFetch.fp)(pTaskInfo->localFetch.handle, pSource->schedId, pTaskInfo->id.queryId,
+ pSource->clientId, pSource->taskId, 0, pSource->execId, &pBuf.pData,
+ pTaskInfo->localFetch.explainRes);
code = loadRemoteDataCallback(pWrapper, &pBuf, code);
QUERY_CHECK_CODE(code, lino, _end);
taosMemoryFree(pWrapper);
@@ -650,6 +650,7 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
SResFetchReq req = {0};
req.header.vgId = pSource->addr.nodeId;
req.sId = pSource->schedId;
+ req.clientId = pSource->clientId;
req.taskId = pSource->taskId;
req.queryId = pTaskInfo->id.queryId;
req.execId = pSource->execId;
@@ -691,9 +692,10 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
freeOperatorParam(req.pOpParam, OP_GET_PARAM);
- qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %p, %d/%" PRIzu,
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId,
- pSource->execId, pExchangeInfo, sourceIndex, totalSources);
+ qDebug("%s build fetch msg and send to vgId:%d, ep:%s, clientId:0x%" PRIx64 " taskId:0x%" PRIx64
+ ", execId:%d, %p, %d/%" PRIzu,
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->clientId,
+ pSource->taskId, pSource->execId, pExchangeInfo, sourceIndex, totalSources);
// send the fetch remote task result reques
SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
@@ -974,8 +976,9 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
}
if (pDataInfo->code != TSDB_CODE_SUCCESS) {
- qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo),
- pSource->addr.nodeId, pSource->taskId, pSource->execId, tstrerror(pDataInfo->code));
+ qError("%s vgId:%d, clientId:0x%" PRIx64 " taskID:0x%" PRIx64 " execId:%d error happens, code:%s",
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
+ tstrerror(pDataInfo->code));
pOperator->pTaskInfo->code = pDataInfo->code;
return pOperator->pTaskInfo->code;
}
@@ -984,10 +987,10 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
if (pRsp->numOfRows == 0) {
- qDebug("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d %d of total completed, rowsOfSource:%" PRIu64
- ", totalRows:%" PRIu64 " try next",
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pExchangeInfo->current + 1,
- pDataInfo->totalRows, pLoadInfo->totalRows);
+ qDebug("%s vgId:%d, clientId:0x%" PRIx64 " taskID:0x%" PRIx64
+ " execId:%d %d of total completed, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 " try next",
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
+ pExchangeInfo->current + 1, pDataInfo->totalRows, pLoadInfo->totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1;
@@ -1002,19 +1005,19 @@ int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
if (pRsp->completed == 1) {
- qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64
+ qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64
", rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu,
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
- pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1,
- totalSources);
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
+ pRetrieveRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize,
+ pExchangeInfo->current + 1, totalSources);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1;
} else {
- qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64 ", totalRows:%" PRIu64
- ", totalBytes:%" PRIu64,
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
- pLoadInfo->totalRows, pLoadInfo->totalSize);
+ qDebug("%s fetch msg rsp from vgId:%d, clientId:0x%" PRIx64 " taskId:0x%" PRIx64 " execId:%d numOfRows:%" PRId64
+ ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64,
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->clientId, pSource->taskId, pSource->execId,
+ pRetrieveRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize);
}
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c
index 1b823bf69d..af8e01be5e 100644
--- a/source/libs/executor/src/executorInt.c
+++ b/source/libs/executor/src/executorInt.c
@@ -1083,18 +1083,13 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
bool groupbyTbname(SNodeList* pGroupList) {
bool bytbname = false;
- if (LIST_LENGTH(pGroupList) == 1) {
- SNode* p = nodesListGetNode(pGroupList, 0);
- if (!p) {
- qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
- return false;
- }
- if (p->type == QUERY_NODE_FUNCTION) {
- // partition by tbname/group by tbname
- bytbname = (strcmp(((struct SFunctionNode*)p)->functionName, "tbname") == 0);
+ SNode*pNode = NULL;
+ FOREACH(pNode, pGroupList) {
+ if (pNode->type == QUERY_NODE_FUNCTION) {
+ bytbname = (strcmp(((struct SFunctionNode*)pNode)->functionName, "tbname") == 0);
+ break;
}
}
-
return bytbname;
}
diff --git a/source/libs/executor/src/forecastoperator.c b/source/libs/executor/src/forecastoperator.c
index 20dc9e28ba..bf1efc54ca 100644
--- a/source/libs/executor/src/forecastoperator.c
+++ b/source/libs/executor/src/forecastoperator.c
@@ -29,9 +29,9 @@
#ifdef USE_ANALYTICS
typedef struct {
- char algoName[TSDB_ANAL_ALGO_NAME_LEN];
- char algoUrl[TSDB_ANAL_ALGO_URL_LEN];
- char algoOpt[TSDB_ANAL_ALGO_OPTION_LEN];
+ char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN];
+ char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN];
+ char algoOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
int64_t maxTs;
int64_t minTs;
int64_t numOfRows;
@@ -47,7 +47,7 @@ typedef struct {
int16_t inputValSlot;
int8_t inputValType;
int8_t inputPrecision;
- SAnalBuf analBuf;
+ SAnalyticBuf analBuf;
} SForecastSupp;
typedef struct SForecastOperatorInfo {
@@ -74,12 +74,12 @@ static FORCE_INLINE int32_t forecastEnsureBlockCapacity(SSDataBlock* pBlock, int
static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) {
if (pSupp->cachedRows > ANAL_FORECAST_MAX_ROWS) {
- return TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS;
+ return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
}
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
- SAnalBuf* pBuf = &pSupp->analBuf;
+ SAnalyticBuf* pBuf = &pSupp->analBuf;
qDebug("block:%d, %p rows:%" PRId64, pSupp->numOfBlocks, pBlock, pBlock->info.rows);
pSupp->numOfBlocks++;
@@ -108,7 +108,7 @@ static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) {
}
static int32_t forecastCloseBuf(SForecastSupp* pSupp) {
- SAnalBuf* pBuf = &pSupp->analBuf;
+ SAnalyticBuf* pBuf = &pSupp->analBuf;
int32_t code = 0;
for (int32_t i = 0; i < 2; ++i) {
@@ -180,8 +180,8 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp) {
return code;
}
-static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) {
- SAnalBuf* pBuf = &pSupp->analBuf;
+static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const char* pId) {
+ SAnalyticBuf* pBuf = &pSupp->analBuf;
int32_t resCurRow = pBlock->info.rows;
int8_t tmpI8;
int16_t tmpI16;
@@ -192,28 +192,45 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) {
int32_t code = 0;
SColumnInfoData* pResValCol = taosArrayGet(pBlock->pDataBlock, pSupp->resValSlot);
- if (NULL == pResValCol) return TSDB_CODE_OUT_OF_RANGE;
+ if (NULL == pResValCol) {
+ return terrno;
+ }
SColumnInfoData* pResTsCol = (pSupp->resTsSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resTsSlot) : NULL);
SColumnInfoData* pResLowCol = (pSupp->resLowSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resLowSlot) : NULL);
SColumnInfoData* pResHighCol =
(pSupp->resHighSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resHighSlot) : NULL);
- SJson* pJson = taosAnalSendReqRetJson(pSupp->algoUrl, ANAL_HTTP_TYPE_POST, pBuf);
- if (pJson == NULL) return terrno;
+ SJson* pJson = taosAnalSendReqRetJson(pSupp->algoUrl, ANALYTICS_HTTP_TYPE_POST, pBuf);
+ if (pJson == NULL) {
+ return terrno;
+ }
int32_t rows = 0;
tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code);
- if (code < 0) goto _OVER;
- if (rows <= 0) goto _OVER;
+ if (rows < 0 && code == 0) {
+ char pMsg[1024] = {0};
+ code = tjsonGetStringValue(pJson, "msg", pMsg);
+ if (code != 0) {
+ qError("%s failed to get msg from rsp, unknown error", pId);
+ } else {
+ qError("%s failed to exec forecast, msg:%s", pId, pMsg);
+ }
+
+ tjsonDelete(pJson);
+ return TSDB_CODE_ANA_WN_DATA;
+ }
+
+ if (code < 0) {
+ goto _OVER;
+ }
SJson* res = tjsonGetObjectItem(pJson, "res");
if (res == NULL) goto _OVER;
int32_t ressize = tjsonGetArraySize(res);
bool returnConf = (pSupp->resHighSlot != -1 || pSupp->resLowSlot != -1);
- if (returnConf) {
- if (ressize != 4) goto _OVER;
- } else if (ressize != 2) {
+
+ if ((returnConf && (ressize != 4)) || ((!returnConf) && (ressize != 2))) {
goto _OVER;
}
@@ -313,41 +330,25 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) {
resCurRow++;
}
- // for (int32_t i = rows; i < pSupp->optRows; ++i) {
- // colDataSetNNULL(pResValCol, rows, (pSupp->optRows - rows));
- // if (pResTsCol != NULL) {
- // colDataSetNNULL(pResTsCol, rows, (pSupp->optRows - rows));
- // }
- // if (pResLowCol != NULL) {
- // colDataSetNNULL(pResLowCol, rows, (pSupp->optRows - rows));
- // }
- // if (pResHighCol != NULL) {
- // colDataSetNNULL(pResHighCol, rows, (pSupp->optRows - rows));
- // }
- // }
-
- // if (rows == pSupp->optRows) {
- // pResValCol->hasNull = false;
- // }
-
pBlock->info.rows += rows;
if (pJson != NULL) tjsonDelete(pJson);
return 0;
_OVER:
- if (pJson != NULL) tjsonDelete(pJson);
+ tjsonDelete(pJson);
if (code == 0) {
code = TSDB_CODE_INVALID_JSON_FORMAT;
}
- qError("failed to perform forecast finalize since %s", tstrerror(code));
- return TSDB_CODE_INVALID_JSON_FORMAT;
+
+ qError("%s failed to perform forecast finalize since %s", pId, tstrerror(code));
+ return code;
}
-static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBlock) {
+static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBlock, const char* pId) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
- SAnalBuf* pBuf = &pSupp->analBuf;
+ SAnalyticBuf* pBuf = &pSupp->analBuf;
code = forecastCloseBuf(pSupp);
QUERY_CHECK_CODE(code, lino, _end);
@@ -355,10 +356,10 @@ static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBl
code = forecastEnsureBlockCapacity(pResBlock, 1);
QUERY_CHECK_CODE(code, lino, _end);
- code = forecastAnalysis(pSupp, pResBlock);
+ code = forecastAnalysis(pSupp, pResBlock, pId);
QUERY_CHECK_CODE(code, lino, _end);
- uInfo("block:%d, forecast finalize", pSupp->numOfBlocks);
+ uInfo("%s block:%d, forecast finalize", pId, pSupp->numOfBlocks);
_end:
pSupp->numOfBlocks = 0;
@@ -373,9 +374,10 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
SForecastOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pResBlock = pInfo->pRes;
SForecastSupp* pSupp = &pInfo->forecastSupp;
- SAnalBuf* pBuf = &pSupp->analBuf;
+ SAnalyticBuf* pBuf = &pSupp->analBuf;
int64_t st = taosGetTimestampUs();
int32_t numOfBlocks = pSupp->numOfBlocks;
+ const char* pId = GET_TASKID(pOperator->pTaskInfo);
blockDataCleanup(pResBlock);
@@ -389,45 +391,46 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
pSupp->groupId = pBlock->info.id.groupId;
numOfBlocks++;
pSupp->cachedRows += pBlock->info.rows;
- qDebug("group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pSupp->groupId, numOfBlocks,
+ qDebug("%s group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId, numOfBlocks,
pBlock->info.rows, pSupp->cachedRows);
code = forecastCacheBlock(pSupp, pBlock);
QUERY_CHECK_CODE(code, lino, _end);
} else {
- qDebug("group:%" PRId64 ", read finish for new group coming, blocks:%d", pSupp->groupId, numOfBlocks);
- code = forecastAggregateBlocks(pSupp, pResBlock);
+ qDebug("%s group:%" PRId64 ", read finish for new group coming, blocks:%d", pId, pSupp->groupId, numOfBlocks);
+ code = forecastAggregateBlocks(pSupp, pResBlock, pId);
QUERY_CHECK_CODE(code, lino, _end);
pSupp->groupId = pBlock->info.id.groupId;
numOfBlocks = 1;
pSupp->cachedRows = pBlock->info.rows;
- qDebug("group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pSupp->groupId, pBlock->info.rows,
- pSupp->cachedRows);
+ qDebug("%s group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId,
+ pBlock->info.rows, pSupp->cachedRows);
code = forecastCacheBlock(pSupp, pBlock);
QUERY_CHECK_CODE(code, lino, _end);
}
if (pResBlock->info.rows > 0) {
(*ppRes) = pResBlock;
- qDebug("group:%" PRId64 ", return to upstream, blocks:%d", pResBlock->info.id.groupId, numOfBlocks);
+ qDebug("%s group:%" PRId64 ", return to upstream, blocks:%d", pId, pResBlock->info.id.groupId, numOfBlocks);
return code;
}
}
if (numOfBlocks > 0) {
- qDebug("group:%" PRId64 ", read finish, blocks:%d", pSupp->groupId, numOfBlocks);
- code = forecastAggregateBlocks(pSupp, pResBlock);
+ qDebug("%s group:%" PRId64 ", read finish, blocks:%d", pId, pSupp->groupId, numOfBlocks);
+ code = forecastAggregateBlocks(pSupp, pResBlock, pId);
QUERY_CHECK_CODE(code, lino, _end);
}
int64_t cost = taosGetTimestampUs() - st;
- qDebug("all groups finished, cost:%" PRId64 "us", cost);
+ qDebug("%s all groups finished, cost:%" PRId64 "us", pId, cost);
_end:
if (code != TSDB_CODE_SUCCESS) {
- qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ qError("%s %s failed at line %d since %s", pId, __func__, lino, tstrerror(code));
pTaskInfo->code = code;
T_LONG_JMP(pTaskInfo->env, code);
}
+
(*ppRes) = (pResBlock->info.rows == 0) ? NULL : pResBlock;
return code;
}
@@ -498,7 +501,7 @@ static int32_t forecastParseInput(SForecastSupp* pSupp, SNodeList* pFuncs) {
pSupp->inputPrecision = pTsNode->node.resType.precision;
pSupp->inputValSlot = pValNode->slotId;
pSupp->inputValType = pValNode->node.resType.type;
- tstrncpy(pSupp->algoOpt, "algo=arima", TSDB_ANAL_ALGO_OPTION_LEN);
+ tstrncpy(pSupp->algoOpt, "algo=arima", TSDB_ANALYTIC_ALGO_OPTION_LEN);
} else {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
@@ -516,22 +519,22 @@ static int32_t forecastParseAlgo(SForecastSupp* pSupp) {
if (!taosAnalGetOptStr(pSupp->algoOpt, "algo", pSupp->algoName, sizeof(pSupp->algoName))) {
qError("failed to get forecast algorithm name from %s", pSupp->algoOpt);
- return TSDB_CODE_ANAL_ALGO_NOT_FOUND;
+ return TSDB_CODE_ANA_ALGO_NOT_FOUND;
}
if (taosAnalGetAlgoUrl(pSupp->algoName, ANAL_ALGO_TYPE_FORECAST, pSupp->algoUrl, sizeof(pSupp->algoUrl)) != 0) {
qError("failed to get forecast algorithm url from %s", pSupp->algoName);
- return TSDB_CODE_ANAL_ALGO_NOT_LOAD;
+ return TSDB_CODE_ANA_ALGO_NOT_LOAD;
}
return 0;
}
static int32_t forecastCreateBuf(SForecastSupp* pSupp) {
- SAnalBuf* pBuf = &pSupp->analBuf;
+ SAnalyticBuf* pBuf = &pSupp->analBuf;
int64_t ts = 0; // taosGetTimestampMs();
- pBuf->bufType = ANAL_BUF_TYPE_JSON_COL;
+ pBuf->bufType = ANALYTICS_BUF_TYPE_JSON_COL;
snprintf(pBuf->fileName, sizeof(pBuf->fileName), "%s/tdengine-forecast-%" PRId64, tsTempDir, ts);
int32_t code = tsosAnalBufOpen(pBuf, 2);
if (code != 0) goto _OVER;
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index fec35c3371..d6e3d26267 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -1326,7 +1326,6 @@ int32_t appendCreateTableRow(void* pState, SExprSupp* pTableSup, SExprSupp* pTag
int32_t winCode = TSDB_CODE_SUCCESS;
code = pAPI->streamStateGetParName(pState, groupId, &pValue, true, &winCode);
QUERY_CHECK_CODE(code, lino, _end);
-
if (winCode != TSDB_CODE_SUCCESS) {
SSDataBlock* pTmpBlock = NULL;
code = blockCopyOneRow(pSrcBlock, rowId, &pTmpBlock);
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 5b5d5c5d11..b128fe41ed 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -289,6 +289,7 @@ static int32_t doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* p
pTaskInfo, &pTableScanInfo->metaCache);
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ if (pTaskInfo->streamInfo.pState) blockDataCleanup(pBlock);
code = 0;
}
}
@@ -3038,12 +3039,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache);
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
- if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
- code = 0;
- }
-
if (code) {
- blockDataFreeRes((SSDataBlock*)pBlock);
QUERY_CHECK_CODE(code, lino, _end);
}
@@ -3411,6 +3407,8 @@ int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff, int32_t*
QUERY_CHECK_CODE(code, lino, _end);
}
+ qDebug("%s last scan range %d. %" PRId64 ",%" PRId64, __func__, __LINE__, pInfo->lastScanRange.skey, pInfo->lastScanRange.ekey);
+
*pLen = len;
_end:
@@ -3468,11 +3466,6 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
goto _end;
}
- void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
- if (!pUpInfo) {
- lino = __LINE__;
- goto _end;
- }
SDecoder decoder = {0};
pDeCoder = &decoder;
tDecoderInit(pDeCoder, buf, tlen);
@@ -3481,14 +3474,20 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
goto _end;
}
+ void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
+ if (!pUpInfo) {
+ lino = __LINE__;
+ goto _end;
+ }
code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
if (code == TSDB_CODE_SUCCESS) {
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
pInfo->pUpdateInfo = pUpInfo;
+ qDebug("%s line:%d. stream scan updateinfo deserialize success", __func__, __LINE__);
} else {
taosMemoryFree(pUpInfo);
- lino = __LINE__;
- goto _end;
+ code = TSDB_CODE_SUCCESS;
+ qDebug("%s line:%d. stream scan did not have updateinfo", __func__, __LINE__);
}
if (tDecodeIsEnd(pDeCoder)) {
@@ -3508,6 +3507,7 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
lino = __LINE__;
goto _end;
}
+ qDebug("%s last scan range %d. %" PRId64 ",%" PRId64, __func__, __LINE__, pInfo->lastScanRange.skey, pInfo->lastScanRange.ekey);
_end:
if (pDeCoder != NULL) {
@@ -3535,6 +3535,46 @@ static int32_t copyGetResultBlock(SSDataBlock* dest, TSKEY start, TSKEY end) {
return appendDataToSpecialBlock(dest, &start, &end, NULL, NULL, NULL);
}
+static int32_t deletePartName(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t *deleteNum) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ for (int32_t i = 0; i < pBlock->info.rows; i++) {
+ // uid is the same as gid
+ SColumnInfoData* pGpIdCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pTbnameCol = taosArrayGet(pBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
+ int64_t* gpIdCol = (int64_t*)pGpIdCol->pData;
+ void* pParName = NULL;
+ int32_t winCode = 0;
+ code = pInfo->stateStore.streamStateGetParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, gpIdCol[i],
+ &pParName, false, &winCode);
+ if (TSDB_CODE_SUCCESS == code && winCode != 0) {
+ qDebug("delete stream part Name for:%"PRId64 " not found", gpIdCol[i]);
+ colDataSetNULL(pTbnameCol, i);
+ continue;
+ }
+ (*deleteNum)++;
+ QUERY_CHECK_CODE(code, lino, _end);
+ char varTbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE + 1] = {0};
+ varDataSetLen(varTbName, strlen(pParName));
+ int64_t len = tsnprintf(varTbName + VARSTR_HEADER_SIZE, TSDB_TABLE_NAME_LEN + 1, "%s", pParName);
+ code = colDataSetVal(pTbnameCol, i, varTbName, false);
+ qDebug("delete stream part for:%"PRId64 " res tb: %s", gpIdCol[i], (char*)pParName);
+ pInfo->stateStore.streamStateFreeVal(pParName);
+ QUERY_CHECK_CODE(code, lino, _end);
+ code = pInfo->stateStore.streamStateDeleteParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, gpIdCol[i]);
+ QUERY_CHECK_CODE(code, lino, _end);
+ pBlock->info.id.groupId = gpIdCol[i];
+ // currently, only one valid row in pBlock
+ memcpy(pBlock->info.parTbName, varTbName + VARSTR_HEADER_SIZE, TSDB_TABLE_NAME_LEN + 1);
+ }
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
+}
+
static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
// NOTE: this operator does never check if current status is done or not
int32_t code = TSDB_CODE_SUCCESS;
@@ -3774,6 +3814,12 @@ FETCH_NEXT_BLOCK:
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
} break;
+ case STREAM_DROP_CHILD_TABLE: {
+ int32_t deleteNum = 0;
+ code = deletePartName(pInfo, pBlock, &deleteNum);
+ QUERY_CHECK_CODE(code, lino, _end);
+ if (deleteNum == 0) goto FETCH_NEXT_BLOCK;
+ } break;
case STREAM_CHECKPOINT: {
qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK");
} break;
@@ -3915,7 +3961,13 @@ FETCH_NEXT_BLOCK:
}
code = setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false);
- QUERY_CHECK_CODE(code, lino, _end);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ pInfo->pRes->info.rows = 0;
+ code = TSDB_CODE_SUCCESS;
+ } else {
+ QUERY_CHECK_CODE(code, lino, _end);
+ }
+
if (pInfo->pRes->info.rows == 0) {
continue;
}
diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c
index 8fd00e9313..2e906d2ba6 100644
--- a/source/libs/executor/src/streamtimewindowoperator.c
+++ b/source/libs/executor/src/streamtimewindowoperator.c
@@ -5215,7 +5215,7 @@ static int32_t doStreamIntervalAggNext(SOperatorInfo* pOperator, SSDataBlock** p
code = getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap);
QUERY_CHECK_CODE(code, lino, _end);
continue;
- } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
+ } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE || pBlock->info.type == STREAM_DROP_CHILD_TABLE) {
printDataBlock(pBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
(*ppRes) = pBlock;
return code;
diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c
index 8aad415f70..af5313297e 100644
--- a/source/libs/executor/src/sysscanoperator.c
+++ b/source/libs/executor/src/sysscanoperator.c
@@ -685,10 +685,10 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
pAPI->metaFn.pauseTableMetaCursor(pInfo->pCur);
break;
}
- } else {
- code = sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, pDataBlock, tableName, schemaRow, typeName);
- QUERY_CHECK_CODE(code, lino, _end);
}
+ // if pInfo->pRes->info.rows == 0, also need to add the meta to pDataBlock
+ code = sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, pDataBlock, tableName, schemaRow, typeName);
+ QUERY_CHECK_CODE(code, lino, _end);
}
if (numOfRows > 0) {
@@ -761,7 +761,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
SMetaReader smrChildTable = {0};
pAPI->metaReaderFn.initReader(&smrChildTable, pInfo->readHandle.vnode, META_READER_LOCK, &pAPI->metaFn);
- int32_t code = pAPI->metaReaderFn.getTableEntryByName(&smrChildTable, condTableName);
+ code = pAPI->metaReaderFn.getTableEntryByName(&smrChildTable, condTableName);
if (code != TSDB_CODE_SUCCESS) {
// terrno has been set by pAPI->metaReaderFn.getTableEntryByName, therefore, return directly
pAPI->metaReaderFn.clearReader(&smrChildTable);
@@ -847,18 +847,18 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
pAPI->metaReaderFn.clearReader(&smrSuperTable);
break;
}
- } else {
- code = sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows,
- dataBlock);
- if (code != TSDB_CODE_SUCCESS) {
- qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
- pAPI->metaReaderFn.clearReader(&smrSuperTable);
- pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
- pInfo->pCur = NULL;
- blockDataDestroy(dataBlock);
- dataBlock = NULL;
- T_LONG_JMP(pTaskInfo->env, terrno);
- }
+ }
+ // if pInfo->pRes->info.rows == 0, also need to add this meta into datablock.
+ code = sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows,
+ dataBlock);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
+ pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
+ pInfo->pCur = NULL;
+ blockDataDestroy(dataBlock);
+ dataBlock = NULL;
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pAPI->metaReaderFn.clearReader(&smrSuperTable);
}
@@ -2792,7 +2792,9 @@ static int32_t doBlockInfoScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes
code = pAPI->tsdReader.tsdReaderGetDataBlockDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
QUERY_CHECK_CODE(code, lino, _end);
- blockDistInfo.numOfInmemRows = (int32_t)pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle);
+ blockDistInfo.numOfInmemRows = 0;
+ code = pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle, &blockDistInfo.numOfInmemRows);
+ QUERY_CHECK_CODE(code, lino, _end);
SSDataBlock* pBlock = pBlockScanInfo->pResBlock;
diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c
index f77aa8f34a..50deba932f 100644
--- a/source/libs/executor/src/timesliceoperator.c
+++ b/source/libs/executor/src/timesliceoperator.c
@@ -1131,6 +1131,47 @@ static int32_t extractPkColumnFromFuncs(SNodeList* pFuncs, bool* pHasPk, SColumn
return TSDB_CODE_SUCCESS;
}
+/**
+ * @brief Determine the actual time range for reading data based on the RANGE clause and the WHERE conditions.
+ * @param[in] cond The range specified by WHERE condition.
+ * @param[in] range The range specified by RANGE clause.
+ * @param[out] twindow The range to be read in DESC order, and only one record is needed.
+ * @param[out] extTwindow The external range to read for only one record, which is used for FILL clause.
+ * @note `cond` and `twindow` may be the same address.
+ */
+static int32_t getQueryExtWindow(const STimeWindow* cond, const STimeWindow* range, STimeWindow* twindow,
+ STimeWindow* extTwindows) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ STimeWindow tempWindow;
+
+ if (cond->skey > cond->ekey || range->skey > range->ekey) {
+ *twindow = extTwindows[0] = extTwindows[1] = TSWINDOW_DESC_INITIALIZER;
+ return code;
+ }
+
+ if (range->ekey < cond->skey) {
+ extTwindows[1] = *cond;
+ *twindow = extTwindows[0] = TSWINDOW_DESC_INITIALIZER;
+ return code;
+ }
+
+ if (cond->ekey < range->skey) {
+ extTwindows[0] = *cond;
+ *twindow = extTwindows[1] = TSWINDOW_DESC_INITIALIZER;
+ return code;
+ }
+
+ // Only scan data in the time range intersecion.
+ extTwindows[0] = extTwindows[1] = *cond;
+ twindow->skey = TMAX(cond->skey, range->skey);
+ twindow->ekey = TMIN(cond->ekey, range->ekey);
+ extTwindows[0].ekey = twindow->skey - 1;
+ extTwindows[1].skey = twindow->ekey + 1;
+
+ return code;
+}
+
int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) {
QRY_PARAM_CHECK(pOptrInfo);
@@ -1206,8 +1247,10 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
- pScanInfo->base.cond.twindows = pInfo->win;
- pScanInfo->base.cond.type = TIMEWINDOW_RANGE_EXTERNAL;
+ SQueryTableDataCond *cond = &pScanInfo->base.cond;
+ cond->type = TIMEWINDOW_RANGE_EXTERNAL;
+ code = getQueryExtWindow(&cond->twindows, &pInfo->win, &cond->twindows, cond->extTwindows);
+ QUERY_CHECK_CODE(code, lino, _error);
}
setOperatorInfo(pOperator, "TimeSliceOperator", QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, false, OP_NOT_OPENED, pInfo,
diff --git a/source/libs/executor/test/CMakeLists.txt b/source/libs/executor/test/CMakeLists.txt
index c75de23c32..cb1f951c94 100644
--- a/source/libs/executor/test/CMakeLists.txt
+++ b/source/libs/executor/test/CMakeLists.txt
@@ -9,7 +9,7 @@ MESSAGE(STATUS "build parser unit test")
# ADD_EXECUTABLE(executorTest ${SOURCE_LIST})
# TARGET_LINK_LIBRARIES(
# executorTest
-# PRIVATE os util common transport gtest taos_static qcom executor function planner scalar nodes vnode
+# PRIVATE os util common transport gtest ${TAOS_LIB_STATIC} qcom executor function planner scalar nodes vnode
# )
#
# TARGET_INCLUDE_DIRECTORIES(
diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h
index 35067fa3ea..4738301cd3 100644
--- a/source/libs/function/inc/tpercentile.h
+++ b/source/libs/function/inc/tpercentile.h
@@ -26,7 +26,7 @@ extern "C" {
struct tMemBucket;
int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval, bool hasWindowOrGroup,
- struct tMemBucket **pBucket);
+ struct tMemBucket **pBucket, int32_t numOfElements);
void tMemBucketDestroy(struct tMemBucket **pBucket);
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index 2d68eb9d51..5ce15a32b2 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -188,7 +188,11 @@ static int32_t countTrailingSpaces(const SValueNode* pVal, bool isLtrim) {
static int32_t addTimezoneParam(SNodeList* pList) {
char buf[TD_TIME_STR_LEN] = {0};
- time_t t = taosTime(NULL);
+ time_t t;
+ int32_t code = taosTime(&t);
+ if (code != 0) {
+ return code;
+ }
struct tm tmInfo;
if (taosLocalTime(&t, &tmInfo, buf, sizeof(buf)) != NULL) {
(void)strftime(buf, sizeof(buf), "%z", &tmInfo);
@@ -196,7 +200,7 @@ static int32_t addTimezoneParam(SNodeList* pList) {
int32_t len = (int32_t)strlen(buf);
SValueNode* pVal = NULL;
- int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal);
+ code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal);
if (pVal == NULL) {
return code;
}
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index acdac7cbc3..83227dea9e 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -1805,7 +1805,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
pResInfo->complete = true;
return TSDB_CODE_SUCCESS;
} else {
- code = tMemBucketCreate(pCol->info.bytes, type, pInfo->minval, pInfo->maxval, pCtx->hasWindowOrGroup, &pInfo->pMemBucket);
+ code = tMemBucketCreate(pCol->info.bytes, type, pInfo->minval, pInfo->maxval, pCtx->hasWindowOrGroup, &pInfo->pMemBucket, pInfo->numOfElems);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
@@ -3037,61 +3037,60 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
-#if 0
- int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
-
- // the optimized version only valid if all tuples in one block are monotonious increasing or descreasing.
- // this assumption is NOT always works if project operator exists in downstream.
- if (blockDataOrder == TSDB_ORDER_ASC) {
+ if (pCtx->order == TSDB_ORDER_ASC && !pCtx->hasPrimaryKey) {
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
- char* data = colDataGetData(pInputCol, i);
+ bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
+ char* data = isNull ? NULL : colDataGetData(pInputCol, i);
TSKEY cts = getRowPTs(pInput->pPTS, i);
numOfElems++;
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
- doSaveLastrow(pCtx, data, i, cts, pInfo);
+ int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
+ if (code != TSDB_CODE_SUCCESS) return code;
}
break;
}
- } else { // descending order
+ } else if (!pCtx->hasPrimaryKey && pCtx->order == TSDB_ORDER_DESC) {
+ // the optimized version only valid if all tuples in one block are monotonious increasing or descreasing.
+ // this assumption is NOT always works if project operator exists in downstream.
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
- char* data = colDataGetData(pInputCol, i);
+ bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
+ char* data = isNull ? NULL : colDataGetData(pInputCol, i);
TSKEY cts = getRowPTs(pInput->pPTS, i);
numOfElems++;
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
- doSaveLastrow(pCtx, data, i, cts, pInfo);
+ int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
+ if (code != TSDB_CODE_SUCCESS) return code;
}
break;
}
- }
-#else
+ } else {
+ int64_t* pts = (int64_t*)pInput->pPTS->pData;
+ int from = -1;
+ int32_t i = -1;
+ while (funcInputGetNextRowIndex(pInput, from, false, &i, &from)) {
+ bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
+ char* data = isNull ? NULL : colDataGetData(pInputCol, i);
+ TSKEY cts = pts[i];
- int64_t* pts = (int64_t*)pInput->pPTS->pData;
- int from = -1;
- int32_t i = -1;
- while (funcInputGetNextRowIndex(pInput, from, false, &i, &from)) {
- bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
- char* data = isNull ? NULL : colDataGetData(pInputCol, i);
- TSKEY cts = pts[i];
-
- numOfElems++;
- char* pkData = NULL;
- if (pCtx->hasPrimaryKey) {
- pkData = colDataGetData(pkCol, i);
- }
- if (pResInfo->numOfRes == 0 || pInfo->ts < cts ||
- (pInfo->ts == pts[i] && pkCompareFn && pkCompareFn(pkData, pInfo->pkData) < 0)) {
- int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ numOfElems++;
+ char* pkData = NULL;
+ if (pCtx->hasPrimaryKey) {
+ pkData = colDataGetData(pkCol, i);
+ }
+ if (pResInfo->numOfRes == 0 || pInfo->ts < cts ||
+ (pInfo->ts == pts[i] && pkCompareFn && pkCompareFn(pkData, pInfo->pkData) < 0)) {
+ int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ pResInfo->numOfRes = 1;
}
- pResInfo->numOfRes = 1;
}
- }
-#endif
+ }
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c
index 78c16ec7cb..73f400c93e 100644
--- a/source/libs/function/src/tpercentile.c
+++ b/source/libs/function/src/tpercentile.c
@@ -269,18 +269,16 @@ static void resetSlotInfo(tMemBucket *pBucket) {
}
int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval, bool hasWindowOrGroup,
- tMemBucket **pBucket) {
+ tMemBucket **pBucket, int32_t numOfElements) {
*pBucket = (tMemBucket *)taosMemoryCalloc(1, sizeof(tMemBucket));
if (*pBucket == NULL) {
return terrno;
}
if (hasWindowOrGroup) {
- // With window or group by, we need to shrink page size and reduce page num to save memory.
- (*pBucket)->numOfSlots = DEFAULT_NUM_OF_SLOT / 8 ; // 128 bucket
+ // With window or group by, we need to shrink page size to save memory.
(*pBucket)->bufPageSize = 4096; // 4k per page
} else {
- (*pBucket)->numOfSlots = DEFAULT_NUM_OF_SLOT;
(*pBucket)->bufPageSize = 16384 * 4; // 16k per page
}
@@ -302,6 +300,8 @@ int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, dou
}
(*pBucket)->elemPerPage = ((*pBucket)->bufPageSize - sizeof(SFilePage)) / (*pBucket)->bytes;
+ (*pBucket)->numOfSlots = TMIN((int16_t)(numOfElements / ((*pBucket)->elemPerPage * 6)) + 1, DEFAULT_NUM_OF_SLOT);
+
(*pBucket)->comparFn = getKeyComparFunc((*pBucket)->type, TSDB_ORDER_ASC);
(*pBucket)->hashFunc = getHashFunc((*pBucket)->type);
@@ -587,7 +587,7 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
// try next round
tMemBucket *tmpBucket = NULL;
int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal,
- false, &tmpBucket);
+ false, &tmpBucket, pSlot->info.size);
if (TSDB_CODE_SUCCESS != code) {
tMemBucketDestroy(&tmpBucket);
return code;
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index a8198a804d..4efa8764e5 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -62,8 +62,13 @@ static void udfUdfdStopAsyncCb(uv_async_t *async);
static void udfWatchUdfd(void *args);
void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal) {
+ TAOS_UDF_CHECK_PTR_RVOID(process);
fnInfo("udfd process exited with status %" PRId64 ", signal %d", exitStatus, termSignal);
SUdfdData *pData = process->data;
+ if(pData == NULL) {
+ fnError("udfd process data is NULL");
+ return;
+ }
if (exitStatus == 0 && termSignal == 0 || atomic_load_32(&pData->stopCalled)) {
fnInfo("udfd process exit due to SIGINT or dnode-mgmt called stop");
} else {
@@ -77,6 +82,7 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal)
static int32_t udfSpawnUdfd(SUdfdData *pData) {
fnInfo("start to init udfd");
+ TAOS_UDF_CHECK_PTR_RCODE(pData);
int32_t err = 0;
uv_process_options_t options = {0};
@@ -267,17 +273,20 @@ _OVER:
}
static void udfUdfdCloseWalkCb(uv_handle_t *handle, void *arg) {
+ TAOS_UDF_CHECK_PTR_RVOID(handle);
if (!uv_is_closing(handle)) {
uv_close(handle, NULL);
}
}
static void udfUdfdStopAsyncCb(uv_async_t *async) {
+ TAOS_UDF_CHECK_PTR_RVOID(async);
SUdfdData *pData = async->data;
uv_stop(&pData->loop);
}
static void udfWatchUdfd(void *args) {
+ TAOS_UDF_CHECK_PTR_RVOID(args);
SUdfdData *pData = args;
TAOS_UV_CHECK_ERRNO(uv_loop_init(&pData->loop));
TAOS_UV_CHECK_ERRNO(uv_async_init(&pData->loop, &pData->stopAsync, udfUdfdStopAsyncCb));
@@ -873,6 +882,7 @@ void *decodeUdfResponse(const void *buf, SUdfResponse *rsp) {
}
void freeUdfColumnData(SUdfColumnData *data, SUdfColumnMeta *meta) {
+ TAOS_UDF_CHECK_PTR_RVOID(data, meta);
if (IS_VAR_DATA_TYPE(meta->type)) {
taosMemoryFree(data->varLenCol.varOffsets);
data->varLenCol.varOffsets = NULL;
@@ -886,9 +896,13 @@ void freeUdfColumnData(SUdfColumnData *data, SUdfColumnMeta *meta) {
}
}
-void freeUdfColumn(SUdfColumn *col) { freeUdfColumnData(&col->colData, &col->colMeta); }
+void freeUdfColumn(SUdfColumn *col) {
+ TAOS_UDF_CHECK_PTR_RVOID(col);
+ freeUdfColumnData(&col->colData, &col->colMeta);
+}
void freeUdfDataDataBlock(SUdfDataBlock *block) {
+ TAOS_UDF_CHECK_PTR_RVOID(block);
for (int32_t i = 0; i < block->numOfCols; ++i) {
freeUdfColumn(block->udfCols[i]);
taosMemoryFree(block->udfCols[i]);
@@ -899,11 +913,17 @@ void freeUdfDataDataBlock(SUdfDataBlock *block) {
}
void freeUdfInterBuf(SUdfInterBuf *buf) {
+ TAOS_UDF_CHECK_PTR_RVOID(buf);
taosMemoryFree(buf->buf);
buf->buf = NULL;
}
int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlock) {
+ TAOS_UDF_CHECK_PTR_RCODE(block, udfBlock);
+ int32_t code = blockDataCheck(block);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
udfBlock->numOfRows = block->info.rows;
udfBlock->numOfCols = taosArrayGetSize(block->pDataBlock);
udfBlock->udfCols = taosMemoryCalloc(taosArrayGetSize(block->pDataBlock), sizeof(SUdfColumn *));
@@ -973,6 +993,7 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
}
int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfCol, block);
int32_t code = 0, lino = 0;
SUdfColumnMeta *meta = &udfCol->colMeta;
@@ -998,6 +1019,8 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
}
block->info.rows = udfCol->colData.numOfRows;
+ code = blockDataCheck(block);
+ TAOS_CHECK_GOTO(code, &lino, _exit);
_exit:
if (code != 0) {
fnError("failed to convert udf column to data block, code:%d, line:%d", code, lino);
@@ -1006,6 +1029,7 @@ _exit:
}
int32_t convertScalarParamToDataBlock(SScalarParam *input, int32_t numOfCols, SSDataBlock *output) {
+ TAOS_UDF_CHECK_PTR_RCODE(input, output);
int32_t code = 0, lino = 0;
int32_t numOfRows = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
@@ -1053,6 +1077,7 @@ _exit:
}
int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) {
+ TAOS_UDF_CHECK_PTR_RCODE(input, output);
if (taosArrayGetSize(input->pDataBlock) != 1) {
fnError("scalar function only support one column");
return 0;
@@ -1131,6 +1156,7 @@ int32_t compareUdfcFuncSub(const void *elem1, const void *elem2) {
}
int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfName, pHandle);
int32_t code = 0, line = 0;
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
SUdfcFuncStub key = {0};
@@ -1189,6 +1215,7 @@ _exit:
}
void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle) {
+ TAOS_UDF_CHECK_PTR_RVOID(udfName);
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
SUdfcFuncStub key = {0};
tstrncpy(key.udfName, udfName, TSDB_FUNC_NAME_LEN);
@@ -1291,6 +1318,7 @@ int32_t cleanUpUdfs() {
}
int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfName, input, output);
UdfcFuncHandle handle = NULL;
int32_t code = acquireUdfFuncHandle(udfName, &handle);
if (code != 0) {
@@ -1320,6 +1348,10 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
}
bool udfAggGetEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv) {
+ if (pFunc == NULL || pEnv == NULL) {
+ fnError("udfAggGetEnv: invalid input lint: %d", __LINE__);
+ return false;
+ }
if (fmIsScalarFunc(pFunc->funcId)) {
return false;
}
@@ -1328,6 +1360,7 @@ bool udfAggGetEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv) {
}
int32_t udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResultCellInfo) {
+ TAOS_UDF_CHECK_PTR_RCODE(pCtx, pResultCellInfo);
if (pResultCellInfo->initialized) {
return TSDB_CODE_SUCCESS;
}
@@ -1369,6 +1402,7 @@ int32_t udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pRes
}
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(pCtx);
int32_t udfCode = 0;
UdfcFuncHandle handle = 0;
if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) {
@@ -1440,6 +1474,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
}
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock) {
+ TAOS_UDF_CHECK_PTR_RCODE(pCtx, pBlock);
int32_t udfCode = 0;
UdfcFuncHandle handle = 0;
if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) {
@@ -1534,7 +1569,7 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *
}
// TODO: the call buffer is setup and freed by udf invocation
- taosMemoryFree(uvTask->rspBuf.base);
+ taosMemoryFreeClear(uvTask->rspBuf.base);
} else {
code = uvTask->errCode;
if (code != 0) {
@@ -1759,7 +1794,7 @@ int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvT
}
if (uv_sem_init(&uvTask->taskSem, 0) != 0) {
if (uvTaskType == UV_TASK_REQ_RSP) {
- taosMemoryFree(uvTask->reqBuf.base);
+ taosMemoryFreeClear(uvTask->reqBuf.base);
}
fnError("udfc create uv task, init semaphore failed.");
return TSDB_CODE_UDF_UV_EXEC_FAILURE;
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index 6eef99e1f8..0ee14f7820 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -55,6 +55,7 @@ int32_t udfdCPluginOpen(SScriptUdfEnvItem *items, int numItems) { return 0; }
int32_t udfdCPluginClose() { return 0; }
int32_t udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfCtx, udfName);
char initFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *initSuffix = "_init";
snprintf(initFuncName, sizeof(initFuncName), "%s%s", udfName, initSuffix);
@@ -68,6 +69,7 @@ int32_t udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const cha
}
int32_t udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfCtx, udfName);
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
snprintf(processFuncName, sizeof(processFuncName), "%s", udfName);
TAOS_CHECK_RETURN(uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->aggProcFunc)));
@@ -93,6 +95,7 @@ int32_t udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfNa
}
int32_t udfdCPluginUdfInit(SScriptUdfInfo *udf, void **pUdfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(udf, pUdfCtx);
int32_t err = 0;
SUdfCPluginCtx *udfCtx = taosMemoryCalloc(1, sizeof(SUdfCPluginCtx));
if (NULL == udfCtx) {
@@ -146,6 +149,7 @@ _exit:
}
int32_t udfdCPluginUdfDestroy(void *udfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
int32_t code = 0;
if (ctx->destroyFunc) {
@@ -157,6 +161,7 @@ int32_t udfdCPluginUdfDestroy(void *udfCtx) {
}
int32_t udfdCPluginUdfScalarProc(SUdfDataBlock *block, SUdfColumn *resultCol, void *udfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(block, resultCol, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->scalarProcFunc) {
return ctx->scalarProcFunc(block, resultCol);
@@ -167,6 +172,7 @@ int32_t udfdCPluginUdfScalarProc(SUdfDataBlock *block, SUdfColumn *resultCol, vo
}
int32_t udfdCPluginUdfAggStart(SUdfInterBuf *buf, void *udfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(buf, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggStartFunc) {
return ctx->aggStartFunc(buf);
@@ -178,6 +184,7 @@ int32_t udfdCPluginUdfAggStart(SUdfInterBuf *buf, void *udfCtx) {
}
int32_t udfdCPluginUdfAggProc(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf, void *udfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(block, interBuf, newInterBuf, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggProcFunc) {
return ctx->aggProcFunc(block, interBuf, newInterBuf);
@@ -189,6 +196,7 @@ int32_t udfdCPluginUdfAggProc(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdf
int32_t udfdCPluginUdfAggMerge(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf,
void *udfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(inputBuf1, inputBuf2, outputBuf, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggMergeFunc) {
return ctx->aggMergeFunc(inputBuf1, inputBuf2, outputBuf);
@@ -199,6 +207,7 @@ int32_t udfdCPluginUdfAggMerge(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2,
}
int32_t udfdCPluginUdfAggFinish(SUdfInterBuf *buf, SUdfInterBuf *resultData, void *udfCtx) {
+ TAOS_UDF_CHECK_PTR_RCODE(buf, resultData, udfCtx);
SUdfCPluginCtx *ctx = udfCtx;
if (ctx->aggFinishFunc) {
return ctx->aggFinishFunc(buf, resultData);
@@ -360,6 +369,7 @@ int32_t udfdNewUdf(SUdf **pUdf, const char *udfName);
void udfdGetFuncBodyPath(const SUdf *udf, char *path);
int32_t udfdInitializeCPlugin(SUdfScriptPlugin *plugin) {
+ TAOS_UDF_CHECK_PTR_RCODE(plugin);
plugin->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
plugin->openFunc = udfdCPluginOpen;
plugin->closeFunc = udfdCPluginClose;
@@ -378,6 +388,7 @@ int32_t udfdInitializeCPlugin(SUdfScriptPlugin *plugin) {
}
int32_t udfdLoadSharedLib(char *libPath, uv_lib_t *pLib, const char *funcName[], void **func[], int numOfFuncs) {
+ TAOS_UDF_CHECK_PTR_RCODE(libPath, pLib, funcName, func);
int err = uv_dlopen(libPath, pLib);
if (err != 0) {
fnError("can not load library %s. error: %s", libPath, uv_strerror(err));
@@ -394,6 +405,7 @@ int32_t udfdLoadSharedLib(char *libPath, uv_lib_t *pLib, const char *funcName[],
}
int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) {
+ TAOS_UDF_CHECK_PTR_RCODE(plugin);
plugin->scriptType = TSDB_FUNC_SCRIPT_PYTHON;
// todo: windows support
snprintf(plugin->libPath, PATH_MAX, "%s", "libtaospyudf.so");
@@ -439,6 +451,7 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) {
}
void udfdDeinitCPlugin(SUdfScriptPlugin *plugin) {
+ TAOS_UDF_CHECK_PTR_RVOID(plugin);
if (plugin->closeFunc) {
if (plugin->closeFunc() != 0) {
fnError("udf script c plugin close func failed.line:%d", __LINE__);
@@ -457,8 +470,9 @@ void udfdDeinitCPlugin(SUdfScriptPlugin *plugin) {
}
void udfdDeinitPythonPlugin(SUdfScriptPlugin *plugin) {
+ TAOS_UDF_CHECK_PTR_RVOID(plugin);
if (plugin->closeFunc) {
- if(plugin->closeFunc() != 0) {
+ if (plugin->closeFunc() != 0) {
fnError("udf script python plugin close func failed.line:%d", __LINE__);
}
}
@@ -517,22 +531,29 @@ void udfdDeinitScriptPlugins() {
if (plugin != NULL) {
udfdDeinitPythonPlugin(plugin);
taosMemoryFree(plugin);
+ global.scriptPlugins[TSDB_FUNC_SCRIPT_PYTHON] = NULL;
}
plugin = global.scriptPlugins[TSDB_FUNC_SCRIPT_BIN_LIB];
if (plugin != NULL) {
udfdDeinitCPlugin(plugin);
taosMemoryFree(plugin);
+ global.scriptPlugins[TSDB_FUNC_SCRIPT_BIN_LIB] = NULL;
}
return;
}
void udfdProcessRequest(uv_work_t *req) {
+ TAOS_UDF_CHECK_PTR_RVOID(req);
SUvUdfWork *uvUdf = (SUvUdfWork *)(req->data);
+ if (uvUdf == NULL) {
+ fnError("udf work is NULL");
+ return;
+ }
SUdfRequest request = {0};
if(decodeUdfRequest(uvUdf->input.base, &request) == NULL)
{
- taosMemoryFree(uvUdf->input.base);
+ taosMemoryFreeClear(uvUdf->input.base);
fnError("udf request decode failed");
return;
}
@@ -557,7 +578,7 @@ void udfdProcessRequest(uv_work_t *req) {
}
}
-void convertUdf2UdfInfo(SUdf *udf, SScriptUdfInfo *udfInfo) {
+static void convertUdf2UdfInfo(SUdf *udf, SScriptUdfInfo *udfInfo) {
udfInfo->bufSize = udf->bufSize;
if (udf->funcType == TSDB_FUNC_TYPE_AGGREGATE) {
udfInfo->funcType = UDF_FUNC_TYPE_AGG;
@@ -573,7 +594,8 @@ void convertUdf2UdfInfo(SUdf *udf, SScriptUdfInfo *udfInfo) {
udfInfo->scriptType = udf->scriptType;
}
-int32_t udfdInitUdf(char *udfName, SUdf *udf) {
+static int32_t udfdInitUdf(char *udfName, SUdf *udf) {
+ TAOS_UDF_CHECK_PTR_RCODE(udfName, udf);
int32_t err = 0;
err = udfdFillUdfInfoFromMNode(global.clientRpc, udfName, udf);
if (err != 0) {
@@ -611,6 +633,7 @@ int32_t udfdInitUdf(char *udfName, SUdf *udf) {
}
int32_t udfdNewUdf(SUdf **pUdf, const char *udfName) {
+ TAOS_UDF_CHECK_PTR_RCODE(pUdf, udfName);
SUdf *udfNew = taosMemoryCalloc(1, sizeof(SUdf));
if (NULL == udfNew) {
return terrno;
@@ -654,6 +677,7 @@ void udfdFreeUdf(void *pData) {
}
int32_t udfdGetOrCreateUdf(SUdf **ppUdf, const char *udfName) {
+ TAOS_UDF_CHECK_PTR_RCODE(ppUdf, udfName);
uv_mutex_lock(&global.udfsMutex);
SUdf **pUdfHash = taosHashGet(global.udfsHash, udfName, strlen(udfName));
int64_t currTime = taosGetTimestampMs();
@@ -693,6 +717,7 @@ int32_t udfdGetOrCreateUdf(SUdf **ppUdf, const char *udfName) {
}
void udfdProcessSetupRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
+ TAOS_UDF_CHECK_PTR_RVOID(uvUdf, request);
// TODO: tracable id from client. connect, setup, call, teardown
fnInfo("setup request. seq num: %" PRId64 ", udf name: %s", request->seqNum, request->setup.udfName);
@@ -760,11 +785,55 @@ _send:
uvUdf->output = uv_buf_init(bufBegin, len);
- taosMemoryFree(uvUdf->input.base);
+ taosMemoryFreeClear(uvUdf->input.base);
return;
}
+static int32_t checkUDFScalaResult(SSDataBlock *block, SUdfColumn *output) {
+ if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
+ return TSDB_CODE_SUCCESS;
+ }
+ if (output->colData.numOfRows != block->info.rows) {
+ fnError("udf scala result num of rows %d not equal to input rows %" PRId64, output->colData.numOfRows, block->info.rows);
+ return TSDB_CODE_UDF_FUNC_EXEC_FAILURE;
+ }
+
+ if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_BYROW) {
+ for (int32_t i = 0; i < output->colData.numOfRows; ++i) {
+ if (!udfColDataIsNull(output, i)) {
+ if (IS_VAR_DATA_TYPE(output->colMeta.type)) {
+ TAOS_UDF_CHECK_CONDITION(output->colData.varLenCol.payload != NULL, TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
+ TAOS_UDF_CHECK_CONDITION(output->colData.varLenCol.varOffsets[i] >= 0 &&
+ output->colData.varLenCol.varOffsets[i] < output->colData.varLenCol.payloadLen,
+ TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
+ } else {
+ TAOS_UDF_CHECK_CONDITION(
+ output->colMeta.bytes * output->colData.numOfRows <= output->colData.fixLenCol.dataLen,
+ TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
+ break;
+ }
+ }
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t checkUDFAggResult(SSDataBlock *block, SUdfInterBuf *output) {
+ if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) {
+ return TSDB_CODE_SUCCESS;
+ }
+ if (output->numOfResult != 1 && output->numOfResult != 0) {
+ fnError("udf agg result num of rows %d not equal to 1", output->numOfResult);
+ return TSDB_CODE_UDF_FUNC_EXEC_FAILURE;
+ }
+ TAOS_UDF_CHECK_CONDITION(output->buf != NULL, TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
+ TAOS_UDF_CHECK_CONDITION(output->bufLen > 0, TSDB_CODE_UDF_FUNC_EXEC_FAILURE);
+ return TSDB_CODE_SUCCESS;
+}
+
void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
+ TAOS_UDF_CHECK_PTR_RVOID(uvUdf, request);
SUdfCallRequest *call = &request->call;
fnDebug("call request. call type %d, handle: %" PRIx64 ", seq num %" PRId64, call->callType, call->udfHandle,
request->seqNum);
@@ -787,6 +856,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
code = convertDataBlockToUdfDataBlock(&call->block, &input);
if (code == TSDB_CODE_SUCCESS) code = udf->scriptPlugin->udfScalarProcFunc(&input, &output, udf->scriptUdfCtx);
freeUdfDataDataBlock(&input);
+ if (code == TSDB_CODE_SUCCESS) code = checkUDFScalaResult(&call->block, &output);
if (code == TSDB_CODE_SUCCESS) code = convertUdfColumnToDataBlock(&output, &response.callRsp.resultData);
}
freeUdfColumn(&output);
@@ -809,6 +879,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
if (outBuf.buf != NULL) {
code = udf->scriptPlugin->udfAggProcFunc(&input, &call->interBuf, &outBuf, udf->scriptUdfCtx);
freeUdfInterBuf(&call->interBuf);
+ if (code == TSDB_CODE_SUCCESS) code = checkUDFAggResult(&call->block, &outBuf);
subRsp->resultBuf = outBuf;
} else {
code = terrno;
@@ -900,11 +971,12 @@ _exit:
break;
}
- taosMemoryFree(uvUdf->input.base);
+ taosMemoryFreeClear(uvUdf->input.base);
return;
}
void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
+ TAOS_UDF_CHECK_PTR_RVOID(uvUdf, request);
SUdfTeardownRequest *teardown = &request->teardown;
fnInfo("teardown. seq number: %" PRId64 ", handle:%" PRIx64, request->seqNum, teardown->udfHandle);
SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(teardown->udfHandle);
@@ -964,6 +1036,7 @@ _send:
}
void udfdGetFuncBodyPath(const SUdf *udf, char *path) {
+ TAOS_UDF_CHECK_PTR_RVOID(udf, path);
if (udf->scriptType == TSDB_FUNC_SCRIPT_BIN_LIB) {
#ifdef WINDOWS
snprintf(path, PATH_MAX, "%s%s_%d_%" PRIx64 ".dll", global.udfDataDir, udf->name, udf->version, udf->createdTime);
@@ -987,6 +1060,7 @@ void udfdGetFuncBodyPath(const SUdf *udf, char *path) {
}
int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) {
+ TAOS_UDF_CHECK_PTR_RCODE(pFuncInfo, udf);
if (!osDataSpaceAvailable()) {
terrno = TSDB_CODE_NO_DISKSPACE;
fnError("udfd create shared library failed since %s", terrstr());
@@ -1022,6 +1096,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) {
}
void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
+ TAOS_UDF_CHECK_PTR_RVOID(parent, pMsg);
SUdfdRpcSendRecvInfo *msgInfo = (SUdfdRpcSendRecvInfo *)pMsg->info.ahandle;
if (pEpSet) {
@@ -1093,6 +1168,7 @@ _return:
}
int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) {
+ TAOS_UDF_CHECK_PTR_RCODE(clientRpc, udfName, udf);
SRetrieveFuncReq retrieveReq = {0};
retrieveReq.numOfFuncs = 1;
retrieveReq.pFuncNames = taosArrayInit(1, TSDB_FUNC_NAME_LEN);
@@ -1233,6 +1309,7 @@ void udfdCloseClientRpc() {
}
void udfdOnWrite(uv_write_t *req, int status) {
+ TAOS_UDF_CHECK_PTR_RVOID(req);
SUvUdfWork *work = (SUvUdfWork *)req->data;
if (status < 0) {
fnError("udfd send response error, length: %zu code: %s", work->output.len, uv_err_name(status));
@@ -1254,6 +1331,7 @@ void udfdOnWrite(uv_write_t *req, int status) {
}
void udfdSendResponse(uv_work_t *work, int status) {
+ TAOS_UDF_CHECK_PTR_RVOID(work);
SUvUdfWork *udfWork = (SUvUdfWork *)(work->data);
if (udfWork->conn != NULL) {
@@ -1274,6 +1352,7 @@ void udfdSendResponse(uv_work_t *work, int status) {
}
void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) {
+ TAOS_UDF_CHECK_PTR_RVOID(handle, buf);
SUdfdUvConn *ctx = handle->data;
int32_t msgHeadSize = sizeof(int32_t) + sizeof(int64_t);
if (ctx->inputCap == 0) {
@@ -1307,6 +1386,10 @@ void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) {
}
bool isUdfdUvMsgComplete(SUdfdUvConn *pipe) {
+ if (pipe == NULL) {
+ fnError("udfd pipe is NULL, LINE:%d", __LINE__);
+ return false;
+ }
if (pipe->inputTotal == -1 && pipe->inputLen >= sizeof(int32_t)) {
pipe->inputTotal = *(int32_t *)(pipe->inputBuf);
}
@@ -1318,6 +1401,7 @@ bool isUdfdUvMsgComplete(SUdfdUvConn *pipe) {
}
void udfdHandleRequest(SUdfdUvConn *conn) {
+ TAOS_UDF_CHECK_PTR_RVOID(conn);
char *inputBuf = conn->inputBuf;
int32_t inputLen = conn->inputLen;
@@ -1350,6 +1434,7 @@ void udfdHandleRequest(SUdfdUvConn *conn) {
}
void udfdPipeCloseCb(uv_handle_t *pipe) {
+ TAOS_UDF_CHECK_PTR_RVOID(pipe);
SUdfdUvConn *conn = pipe->data;
SUvUdfWork *pWork = conn->pWorkList;
while (pWork != NULL) {
@@ -1363,6 +1448,7 @@ void udfdPipeCloseCb(uv_handle_t *pipe) {
}
void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
+ TAOS_UDF_CHECK_PTR_RVOID(client, buf);
fnDebug("udfd read %zd bytes from client", nread);
if (nread == 0) return;
@@ -1389,6 +1475,7 @@ void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
}
void udfdOnNewConnection(uv_stream_t *server, int status) {
+ TAOS_UDF_CHECK_PTR_RVOID(server);
if (status < 0) {
fnError("udfd new connection error. code: %s", uv_strerror(status));
return;
@@ -1434,6 +1521,7 @@ _exit:
}
void udfdIntrSignalHandler(uv_signal_t *handle, int signum) {
+ TAOS_UDF_CHECK_PTR_RVOID(handle);
fnInfo("udfd signal received: %d\n", signum);
uv_fs_t req;
int32_t code = uv_fs_unlink(global.loop, &req, global.listenPipeName, NULL);
@@ -1482,6 +1570,7 @@ static int32_t udfdInitLog() {
}
void udfdCtrlAllocBufCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
+ TAOS_UDF_CHECK_PTR_RVOID(buf);
buf->base = taosMemoryMalloc(suggested_size);
if (buf->base == NULL) {
fnError("udfd ctrl pipe alloc buffer failed");
@@ -1491,6 +1580,7 @@ void udfdCtrlAllocBufCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *bu
}
void udfdCtrlReadCb(uv_stream_t *q, ssize_t nread, const uv_buf_t *buf) {
+ TAOS_UDF_CHECK_PTR_RVOID(q, buf);
if (nread < 0) {
fnError("udfd ctrl pipe read error. %s", uv_err_name(nread));
taosMemoryFree(buf->base);
@@ -1507,7 +1597,7 @@ static void removeListeningPipe() {
int err = uv_fs_unlink(global.loop, &req, global.listenPipeName, NULL);
uv_fs_req_cleanup(&req);
if(err) {
- fnError("remove listening pipe %s failed, reason:%s, lino:%d", global.listenPipeName, uv_strerror(err), __LINE__);
+ fnInfo("remove listening pipe %s : %s, lino:%d", global.listenPipeName, uv_strerror(err), __LINE__);
}
}
@@ -1580,7 +1670,7 @@ static void udfdGlobalDataDeinit() {
taosHashCleanup(global.udfsHash);
uv_mutex_destroy(&global.udfsMutex);
uv_mutex_destroy(&global.scriptPluginsMutex);
- taosMemoryFree(global.loop);
+ taosMemoryFreeClear(global.loop);
fnInfo("udfd global data deinit");
}
diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c
index 13c5f7208e..8789762a85 100644
--- a/source/libs/geometry/src/geosWrapper.c
+++ b/source/libs/geometry/src/geosWrapper.c
@@ -63,7 +63,7 @@ int32_t initCtxMakePoint() {
int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size) {
int32_t code = TSDB_CODE_FAILED;
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
GEOSGeometry *geom = NULL;
@@ -170,7 +170,7 @@ static int32_t initWktRegex(pcre2_code **ppRegex, pcre2_match_data **ppMatchData
int32_t initCtxGeomFromText() {
int32_t code = TSDB_CODE_FAILED;
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
if (geosCtx->handle == NULL) {
@@ -208,7 +208,7 @@ int32_t initCtxGeomFromText() {
int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t *size) {
int32_t code = TSDB_CODE_FAILED;
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
GEOSGeometry *geom = NULL;
@@ -245,7 +245,7 @@ _exit:
int32_t initCtxAsText() {
int32_t code = TSDB_CODE_FAILED;
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
if (geosCtx->handle == NULL) {
@@ -283,11 +283,11 @@ int32_t initCtxAsText() {
int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT) {
int32_t code = TSDB_CODE_FAILED;
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
- GEOSGeometry *geom = NULL;
- char *wkt = NULL;
+ GEOSGeometry *geom = NULL;
+ char *wkt = NULL;
geom = GEOSWKBReader_read_r(geosCtx->handle, geosCtx->WKBReader, inputGeom, size);
if (geom == NULL) {
@@ -313,10 +313,35 @@ _exit:
return code;
}
+int32_t checkWKB(const unsigned char *wkb, size_t size) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ GEOSGeometry *geom = NULL;
+ SGeosContext *geosCtx = NULL;
+
+ TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
+
+ geom = GEOSWKBReader_read_r(geosCtx->handle, geosCtx->WKBReader, wkb, size);
+ if (geom == NULL) {
+ return TSDB_CODE_FUNC_FUNTION_PARA_VALUE;
+ }
+
+ if (!GEOSisValid_r(geosCtx->handle, geom)) {
+ code = TSDB_CODE_FUNC_FUNTION_PARA_VALUE;
+ goto _exit;
+ }
+
+_exit:
+ if (geom) {
+ GEOSGeom_destroy_r(geosCtx->handle, geom);
+ geom = NULL;
+ }
+ return code;
+}
+
int32_t initCtxRelationFunc() {
int32_t code = TSDB_CODE_FAILED;
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
if (geosCtx->handle == NULL) {
@@ -343,7 +368,7 @@ int32_t doGeosRelation(const GEOSGeometry *geom1, const GEOSPreparedGeometry *pr
_geosPreparedRelationFunc_t preparedRelationFn,
_geosPreparedRelationFunc_t swappedPreparedRelationFn) {
SGeosContext *geosCtx = NULL;
-
+
TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx));
if (!preparedGeom1) {
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 1a5785190b..ba87912670 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -851,6 +851,7 @@ static int32_t slotDescCopy(const SSlotDescNode* pSrc, SSlotDescNode* pDst) {
static int32_t downstreamSourceCopy(const SDownstreamSourceNode* pSrc, SDownstreamSourceNode* pDst) {
COPY_OBJECT_FIELD(addr, sizeof(SQueryNodeAddr));
+ COPY_SCALAR_FIELD(clientId);
COPY_SCALAR_FIELD(taskId);
COPY_SCALAR_FIELD(schedId);
COPY_SCALAR_FIELD(execId);
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 3275cfd838..f7f858db78 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -5259,6 +5259,7 @@ static int32_t jsonToColumnDefNode(const SJson* pJson, void* pObj) {
}
static const char* jkDownstreamSourceAddr = "Addr";
+static const char* jkDownstreamSourceClientId = "ClientId";
static const char* jkDownstreamSourceTaskId = "TaskId";
static const char* jkDownstreamSourceSchedId = "SchedId";
static const char* jkDownstreamSourceExecId = "ExecId";
@@ -5268,6 +5269,9 @@ static int32_t downstreamSourceNodeToJson(const void* pObj, SJson* pJson) {
const SDownstreamSourceNode* pNode = (const SDownstreamSourceNode*)pObj;
int32_t code = tjsonAddObject(pJson, jkDownstreamSourceAddr, queryNodeAddrToJson, &pNode->addr);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDownstreamSourceClientId, pNode->clientId);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDownstreamSourceTaskId, pNode->taskId);
}
@@ -5288,6 +5292,9 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) {
SDownstreamSourceNode* pNode = (SDownstreamSourceNode*)pObj;
int32_t code = tjsonToObject(pJson, jkDownstreamSourceAddr, jsonToQueryNodeAddr, &pNode->addr);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetUBigIntValue(pJson, jkDownstreamSourceClientId, &pNode->clientId);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetUBigIntValue(pJson, jkDownstreamSourceTaskId, &pNode->taskId);
}
diff --git a/source/libs/nodes/src/nodesEqualFuncs.c b/source/libs/nodes/src/nodesEqualFuncs.c
index 241da85267..891843761a 100644
--- a/source/libs/nodes/src/nodesEqualFuncs.c
+++ b/source/libs/nodes/src/nodesEqualFuncs.c
@@ -153,6 +153,12 @@ static bool caseWhenNodeEqual(const SCaseWhenNode* a, const SCaseWhenNode* b) {
return true;
}
+static bool groupingSetNodeEqual(const SGroupingSetNode* a, const SGroupingSetNode* b) {
+ COMPARE_SCALAR_FIELD(groupingSetType);
+ COMPARE_NODE_LIST_FIELD(pParameterList);
+ return true;
+}
+
bool nodesEqualNode(const SNode* a, const SNode* b) {
if (a == b) {
return true;
@@ -181,10 +187,11 @@ bool nodesEqualNode(const SNode* a, const SNode* b) {
return whenThenNodeEqual((const SWhenThenNode*)a, (const SWhenThenNode*)b);
case QUERY_NODE_CASE_WHEN:
return caseWhenNodeEqual((const SCaseWhenNode*)a, (const SCaseWhenNode*)b);
+ case QUERY_NODE_GROUPING_SET:
+ return groupingSetNodeEqual((const SGroupingSetNode*)a, (const SGroupingSetNode*)b);
case QUERY_NODE_REAL_TABLE:
case QUERY_NODE_TEMP_TABLE:
case QUERY_NODE_JOIN_TABLE:
- case QUERY_NODE_GROUPING_SET:
case QUERY_NODE_ORDER_BY_EXPR:
case QUERY_NODE_LIMIT:
return false;
diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c
index 28d0b9fbd4..bf3ea66e47 100644
--- a/source/libs/nodes/src/nodesMsgFuncs.c
+++ b/source/libs/nodes/src/nodesMsgFuncs.c
@@ -1769,6 +1769,9 @@ static int32_t downstreamSourceNodeInlineToMsg(const void* pObj, STlvEncoder* pE
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeValueI32(pEncoder, pNode->fetchMsgType);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU64(pEncoder, pNode->clientId);
+ }
return code;
}
@@ -1793,6 +1796,9 @@ static int32_t msgToDownstreamSourceNodeInlineToMsg(STlvDecoder* pDecoder, void*
if (TSDB_CODE_SUCCESS == code) {
code = tlvDecodeValueI32(pDecoder, &pNode->fetchMsgType);
}
+ if (TSDB_CODE_SUCCESS == code && !tlvDecodeEnd(pDecoder)) {
+ code = tlvDecodeValueU64(pDecoder, &pNode->clientId);
+ }
return code;
}
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index a9d0aa2924..30cc552761 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -2948,3 +2948,46 @@ void nodesSortList(SNodeList** pList, int32_t (*comp)(SNode* pNode1, SNode* pNod
inSize *= 2;
}
}
+
+static SNode* nodesListFindNode(SNodeList* pList, SNode* pNode) {
+ SNode* pFound = NULL;
+ FOREACH(pFound, pList) {
+ if (nodesEqualNode(pFound, pNode)) {
+ break;
+ }
+ }
+ return pFound;
+}
+
+int32_t nodesListDeduplicate(SNodeList** ppList) {
+ if (!ppList || LIST_LENGTH(*ppList) <= 1) return TSDB_CODE_SUCCESS;
+ if (LIST_LENGTH(*ppList) == 2) {
+ SNode* pNode1 = nodesListGetNode(*ppList, 0);
+ SNode* pNode2 = nodesListGetNode(*ppList, 1);
+ if (nodesEqualNode(pNode1, pNode2)) {
+ SListCell* pCell = nodesListGetCell(*ppList, 1);
+ (void)nodesListErase(*ppList, pCell);
+ }
+ return TSDB_CODE_SUCCESS;
+ }
+ SNodeList* pTmp = NULL;
+ int32_t code = nodesMakeList(&pTmp);
+ if (TSDB_CODE_SUCCESS == code) {
+ SNode* pNode = NULL;
+ FOREACH(pNode, *ppList) {
+ SNode* pFound = nodesListFindNode(pTmp, pNode);
+ if (NULL == pFound) {
+ code = nodesCloneNode(pNode, &pFound);
+ if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTmp, pFound);
+ if (TSDB_CODE_SUCCESS != code) break;
+ }
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ nodesDestroyList(*ppList);
+ *ppList = pTmp;
+ } else {
+ nodesDestroyList(pTmp);
+ }
+ return code;
+}
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index 857c7604a9..7298b04eb0 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -115,6 +115,7 @@ typedef struct SParseMetaCache {
SHashObj* pTableName; // key is tbFUid, elements is STableMeta*(append with tbName)
SArray* pDnodes; // element is SEpSet
bool dnodeRequired;
+ bool forceFetchViewMeta;
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 245346273f..1a5e3444c0 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -1377,7 +1377,7 @@ SNode* createAnomalyWindowNode(SAstCreateContext* pCxt, SNode* pExpr, const STok
CHECK_MAKE_NODE(pAnomaly->pCol);
pAnomaly->pExpr = pExpr;
if (pFuncOpt == NULL) {
- tstrncpy(pAnomaly->anomalyOpt, "algo=iqr", TSDB_ANAL_ALGO_OPTION_LEN);
+ tstrncpy(pAnomaly->anomalyOpt, "algo=iqr", TSDB_ANALYTIC_ALGO_OPTION_LEN);
} else {
(void)trimString(pFuncOpt->z, pFuncOpt->n, pAnomaly->anomalyOpt, sizeof(pAnomaly->anomalyOpt));
}
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index eecc04658b..b78e10768f 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -810,7 +810,7 @@ static int32_t collectMetaKeyFromShowCreateView(SCollectMetaKeyCxt* pCxt, SShowC
if (TSDB_CODE_SUCCESS == code) {
code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->viewName, pCxt->pMetaCache);
}
-
+ pCxt->pMetaCache->forceFetchViewMeta = true;
return code;
}
@@ -888,6 +888,7 @@ static int32_t collectMetaKeyFromCreateViewStmt(SCollectMetaKeyCxt* pCxt, SCreat
static int32_t collectMetaKeyFromDropViewStmt(SCollectMetaKeyCxt* pCxt, SDropViewStmt* pStmt) {
int32_t code = reserveViewUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName,
pStmt->viewName, AUTH_TYPE_ALTER, pCxt->pMetaCache);
+ pCxt->pMetaCache->forceFetchViewMeta = true;
return code;
}
diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c
index e757ec8b24..a2e98bece7 100644
--- a/source/libs/parser/src/parCalcConst.c
+++ b/source/libs/parser/src/parCalcConst.c
@@ -329,14 +329,23 @@ static int32_t calcConstGroupBy(SCalcConstContext* pCxt, SSelectStmt* pSelect) {
if (TSDB_CODE_SUCCESS == code) {
SNode* pNode = NULL;
FOREACH(pNode, pSelect->pGroupByList) {
+ bool hasNotValue = false;
SNode* pGroupPara = NULL;
FOREACH(pGroupPara, ((SGroupingSetNode*)pNode)->pParameterList) {
if (QUERY_NODE_VALUE != nodeType(pGroupPara)) {
- return code;
+ hasNotValue = true;
+ break;
+ }
+ }
+ if (!hasNotValue) {
+ if (pSelect->hasAggFuncs) {
+ ERASE_NODE(pSelect->pGroupByList);
+ } else {
+ if (!cell->pPrev && !cell->pNext) continue;
+ ERASE_NODE(pSelect->pGroupByList);
}
}
}
- NODES_DESTORY_LIST(pSelect->pGroupByList);
}
return code;
}
diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c
index 4b91f01a8c..750621bf66 100644
--- a/source/libs/parser/src/parInsertSql.c
+++ b/source/libs/parser/src/parInsertSql.c
@@ -246,7 +246,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, E
return code;
}
-static int parseTimestampOrInterval(const char** end, SToken* pToken, int16_t timePrec, int64_t* ts, int64_t* interval,
+static int32_t parseTimestampOrInterval(const char** end, SToken* pToken, int16_t timePrec, int64_t* ts, int64_t* interval,
SMsgBuf* pMsgBuf, bool* isTs) {
if (pToken->type == TK_NOW) {
*isTs = true;
diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c
index 0979028e6d..c6951d229d 100644
--- a/source/libs/parser/src/parInsertStmt.c
+++ b/source/libs/parser/src/parInsertStmt.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+#include "geosWrapper.h"
#include "os.h"
#include "parInsertUtil.h"
#include "parInt.h"
@@ -192,6 +193,12 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
// strcpy(val.colName, pTagSchema->name);
if (pTagSchema->type == TSDB_DATA_TYPE_BINARY || pTagSchema->type == TSDB_DATA_TYPE_VARBINARY ||
pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) {
+ if (pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) {
+ if (initCtxAsText() || checkWKB(bind[c].buffer, colLen)) {
+ code = buildSyntaxErrMsg(&pBuf, "invalid geometry tag", bind[c].buffer);
+ goto end;
+ }
+ }
val.pData = (uint8_t*)bind[c].buffer;
val.nData = colLen;
} else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) {
@@ -409,7 +416,8 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
}
code = tColDataAddValueByBind(pCol, pBind,
- IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1);
+ IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1,
+ initCtxAsText, checkWKB);
if (code) {
goto _return;
}
@@ -461,7 +469,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bi
}
code = tColDataAddValueByBind(pCol, pBind,
- IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1);
+ IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1,
+ initCtxAsText, checkWKB);
qDebug("stmt col %d bind %d rows data", colIdx, rowNum);
@@ -544,6 +553,12 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c
// strcpy(val.colName, pTagSchema->name);
if (pTagSchema->type == TSDB_DATA_TYPE_BINARY || pTagSchema->type == TSDB_DATA_TYPE_VARBINARY ||
pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) {
+ if (pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) {
+ if (initCtxAsText() || checkWKB(bind[c].buffer, colLen)) {
+ code = buildSyntaxErrMsg(&pBuf, "invalid geometry tag", bind[c].buffer);
+ goto end;
+ }
+ }
val.pData = (uint8_t*)bind[c].buffer;
val.nData = colLen;
} else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) {
@@ -666,11 +681,25 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
int32_t code = 0;
int16_t lastColId = -1;
bool colInOrder = true;
+ int ncharColNums = 0;
if (NULL == *pTSchema) {
*pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion);
}
+ for (int c = 0; c < boundInfo->numOfBound; ++c) {
+ if (TSDB_DATA_TYPE_NCHAR == pSchema[boundInfo->pColIndex[c]].type) {
+ ncharColNums++;
+ }
+ }
+ if (ncharColNums > 0) {
+ ncharBinds = taosArrayInit(ncharColNums, sizeof(ncharBind));
+ if (!ncharBinds) {
+ code = terrno;
+ goto _return;
+ }
+ }
+
for (int c = 0; c < boundInfo->numOfBound; ++c) {
SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]];
if (pColSchema->colId <= lastColId) {
@@ -695,13 +724,6 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
if (code) {
goto _return;
}
- if (!ncharBinds) {
- ncharBinds = taosArrayInit(1, sizeof(ncharBind));
- if (!ncharBinds) {
- code = terrno;
- goto _return;
- }
- }
if (!taosArrayPush(ncharBinds, &ncharBind)) {
code = terrno;
goto _return;
@@ -824,7 +846,8 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind,
}
code = tColDataAddValueByBind2(pCol, pBind,
- IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1);
+ IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1,
+ initCtxAsText, checkWKB);
if (code) {
goto _return;
}
@@ -876,7 +899,8 @@ int32_t qBindStmtSingleColValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* b
}
code = tColDataAddValueByBind2(pCol, pBind,
- IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1);
+ IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1,
+ initCtxAsText, checkWKB);
qDebug("stmt col %d bind %d rows data", colIdx, rowNum);
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 99c03c412c..fcb6361a6b 100755
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -3311,11 +3311,11 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType)
} else {
resultType = gDisplyTypes[type2][type1];
}
-
+
if (resultType == -1) {
return TSDB_CODE_SCALAR_CONVERT_ERROR;
}
-
+
if (commonType->type == newType->type) {
commonType->bytes = TMAX(commonType->bytes, newType->bytes);
return TSDB_CODE_SUCCESS;
@@ -3328,9 +3328,9 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType)
} else {
commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]);
}
-
+
commonType->type = resultType;
-
+
return TSDB_CODE_SUCCESS;
}
@@ -9652,7 +9652,7 @@ static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt)
static int32_t translateCreateAnode(STranslateContext* pCxt, SCreateAnodeStmt* pStmt) {
SMCreateAnodeReq createReq = {0};
createReq.urlLen = strlen(pStmt->url) + 1;
- if (createReq.urlLen > TSDB_ANAL_ANODE_URL_LEN) {
+ if (createReq.urlLen > TSDB_ANALYTIC_ANODE_URL_LEN) {
return TSDB_CODE_MND_ANODE_TOO_LONG_URL;
}
@@ -13127,7 +13127,7 @@ static int32_t extractShowCreateViewResultSchema(int32_t* numOfCols, SSchema** p
}
static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) {
- *numOfCols = 3;
+ *numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS; // SHOW_VARIABLES_RESULT_COLS
*pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema));
if (NULL == (*pSchema)) {
return terrno;
@@ -13138,13 +13138,17 @@ static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pS
strcpy((*pSchema)[0].name, "name");
(*pSchema)[1].type = TSDB_DATA_TYPE_BINARY;
- (*pSchema)[1].bytes = TSDB_CONFIG_VALUE_LEN;
+ (*pSchema)[1].bytes = TSDB_CONFIG_PATH_LEN;
strcpy((*pSchema)[1].name, "value");
(*pSchema)[2].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[2].bytes = TSDB_CONFIG_SCOPE_LEN;
strcpy((*pSchema)[2].name, "scope");
+ (*pSchema)[3].type = TSDB_DATA_TYPE_BINARY;
+ (*pSchema)[3].bytes = TSDB_CONFIG_INFO_LEN;
+ strcpy((*pSchema)[3].name, "info");
+
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index e35eea9e72..44e44982a3 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -817,6 +817,7 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
}
#endif
pCatalogReq->dNodeRequired = pMetaCache->dnodeRequired;
+ pCatalogReq->forceFetchViewMeta = pMetaCache->forceFetchViewMeta;
return code;
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 8ac1acb1a2..e2135bfd63 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -154,6 +154,9 @@ static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCa
}
static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
+ if (!pParam || IS_NULL_TYPE(pParam->buffer_type)) {
+ return TSDB_CODE_APP_ERROR;
+ }
if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
taosMemoryFreeClear(pVal->datum.p);
}
@@ -433,9 +436,6 @@ int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = NULL;
code = nodesCloneNode(pQuery->pPrepareRoot, &pQuery->pRoot);
- if (NULL == pQuery->pRoot) {
- code = code;
- }
}
if (TSDB_CODE_SUCCESS == code) {
rewriteExprAlias(pQuery->pRoot);
@@ -444,6 +444,9 @@ int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx
}
static int32_t setValueByBindParam2(SValueNode* pVal, TAOS_STMT2_BIND* pParam) {
+ if (!pParam || IS_NULL_TYPE(pParam->buffer_type)) {
+ return TSDB_CODE_APP_ERROR;
+ }
if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
taosMemoryFreeClear(pVal->datum.p);
}
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 34c83acee8..fd4e42f256 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -838,8 +838,11 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
}
if (NULL != pSelect->pGroupByList) {
- pAgg->pGroupKeys = NULL;
- code = nodesCloneList(pSelect->pGroupByList, &pAgg->pGroupKeys);
+ code = nodesListDeduplicate(&pSelect->pGroupByList);
+ if (TSDB_CODE_SUCCESS == code) {
+ pAgg->pGroupKeys = NULL;
+ code = nodesCloneList(pSelect->pGroupByList, &pAgg->pGroupKeys);
+ }
}
// rewrite the expression in subsequent clauses
@@ -1534,21 +1537,20 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
pSort->pSortKeys = NULL;
code = nodesCloneList(pSelect->pOrderByList, &pSort->pSortKeys);
- if (NULL == pSort->pSortKeys) {
- code = code;
- }
- SNode* pNode = NULL;
- SOrderByExprNode* firstSortKey = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0);
- if (isPrimaryKeySort(pSelect->pOrderByList)) pSort->node.outputTsOrder = firstSortKey->order;
- if (firstSortKey->pExpr->type == QUERY_NODE_COLUMN) {
- SColumnNode* pCol = (SColumnNode*)firstSortKey->pExpr;
- int16_t projIdx = 1;
- FOREACH(pNode, pSelect->pProjectionList) {
- SExprNode* pExpr = (SExprNode*)pNode;
- if (0 == strcmp(pCol->node.aliasName, pExpr->aliasName)) {
- pCol->projIdx = projIdx; break;
+ if (NULL != pSort->pSortKeys) {
+ SNode* pNode = NULL;
+ SOrderByExprNode* firstSortKey = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0);
+ if (isPrimaryKeySort(pSelect->pOrderByList)) pSort->node.outputTsOrder = firstSortKey->order;
+ if (firstSortKey->pExpr->type == QUERY_NODE_COLUMN) {
+ SColumnNode* pCol = (SColumnNode*)firstSortKey->pExpr;
+ int16_t projIdx = 1;
+ FOREACH(pNode, pSelect->pProjectionList) {
+ SExprNode* pExpr = (SExprNode*)pNode;
+ if (0 == strcmp(pCol->node.aliasName, pExpr->aliasName)) {
+ pCol->projIdx = projIdx; break;
+ }
+ projIdx++;
}
- projIdx++;
}
}
}
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 885faa5461..a1809ff137 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -204,6 +204,7 @@ static void optSetParentOrder(SLogicNode* pNode, EOrder order, SLogicNode* pNode
// case QUERY_NODE_LOGIC_PLAN_WINDOW:
case QUERY_NODE_LOGIC_PLAN_AGG:
case QUERY_NODE_LOGIC_PLAN_SORT:
+ case QUERY_NODE_LOGIC_PLAN_FILL:
if (pNode == pNodeForcePropagate) {
pNode->outputTsOrder = order;
break;
@@ -3491,37 +3492,77 @@ static void eliminateProjPushdownProjIdx(SNodeList* pParentProjects, SNodeList*
}
}
+static int32_t eliminateProjOptFindProjPrefixWithOrderCheck(SProjectLogicNode* pProj, SProjectLogicNode* pChild, SNodeList** pNewChildTargets, bool *orderMatch) {
+ int32_t code = 0;
+ SNode* pProjection = NULL, *pChildTarget = NULL;
+ *orderMatch = true;
+ FORBOTH(pProjection, pProj->pProjections, pChildTarget, pChild->node.pTargets) {
+ if (!pProjection) break;
+ if (0 != strcmp(((SColumnNode*)pProjection)->colName, ((SColumnNode*)pChildTarget)->colName)) {
+ *orderMatch = false;
+ break;
+ }
+ if (pNewChildTargets) {
+ SNode* pNew = NULL;
+ code = nodesCloneNode(pChildTarget, &pNew);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeStrictAppend(pNewChildTargets, pNew);
+ }
+ if (TSDB_CODE_SUCCESS != code && pNewChildTargets) {
+ nodesDestroyList(*pNewChildTargets);
+ *pNewChildTargets = NULL;
+ break;
+ }
+ }
+ }
+ return code;
+}
+
+static int32_t eliminateProjOptPushTargetsToSetOpChildren(SProjectLogicNode* pSetOp) {
+ SNode* pChildProj = NULL;
+ int32_t code = 0;
+ bool orderMatch = false;
+ FOREACH(pChildProj, pSetOp->node.pChildren) {
+ if (QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pChildProj)) {
+ SProjectLogicNode* pChildLogic = (SProjectLogicNode*)pChildProj;
+ SNodeList* pNewChildTargetsForChild = NULL;
+ code = eliminateProjOptFindProjPrefixWithOrderCheck(pSetOp, pChildLogic, &pNewChildTargetsForChild, &orderMatch);
+ if (TSDB_CODE_SUCCESS != code) break;
+ nodesDestroyList(pChildLogic->node.pTargets);
+ pChildLogic->node.pTargets = pNewChildTargetsForChild;
+ alignProjectionWithTarget((SLogicNode*)pChildLogic);
+ if (pChildLogic->isSetOpProj) {
+ code = eliminateProjOptPushTargetsToSetOpChildren(pChildLogic);
+ if (TSDB_CODE_SUCCESS != code) break;
+ }
+ }
+ }
+ return code;
+}
+
static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan,
SProjectLogicNode* pProjectNode) {
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProjectNode->node.pChildren, 0);
int32_t code = 0;
+ bool isSetOpProj = false;
+ bool orderMatch = false;
+ bool sizeMatch = LIST_LENGTH(pProjectNode->pProjections) == LIST_LENGTH(pChild->pTargets);
+ bool needReplaceTargets = true;
if (NULL == pProjectNode->node.pParent) {
SNodeList* pNewChildTargets = NULL;
- code = nodesMakeList(&pNewChildTargets);
- if (TSDB_CODE_SUCCESS != code) {
- return code;
- }
SNode * pProjection = NULL, *pChildTarget = NULL;
- bool orderMatch = true;
- bool needOrderMatch =
+ isSetOpProj =
QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pChild) && ((SProjectLogicNode*)pChild)->isSetOpProj;
- if (needOrderMatch) {
+ if (isSetOpProj) {
// For sql: select ... from (select ... union all select ...);
// When eliminating the outer proj (the outer select), we have to make sure that the outer proj projections and
// union all project targets have same columns in the same order. See detail in TD-30188
- FORBOTH(pProjection, pProjectNode->pProjections, pChildTarget, pChild->pTargets) {
- if (!pProjection) break;
- if (0 != strcmp(((SColumnNode*)pProjection)->colName, ((SColumnNode*)pChildTarget)->colName)) {
- orderMatch = false;
- break;
- }
- SNode* pNew = NULL;
- code = nodesCloneNode(pChildTarget, &pNew);
- if (TSDB_CODE_SUCCESS == code) {
- code = nodesListStrictAppend(pNewChildTargets, pNew);
- }
- if (TSDB_CODE_SUCCESS != code) break;
+ code = eliminateProjOptFindProjPrefixWithOrderCheck(pProjectNode, (SProjectLogicNode*)pChild,
+ sizeMatch ? NULL : &pNewChildTargets, &orderMatch);
+ if (TSDB_CODE_SUCCESS == code && sizeMatch && orderMatch) {
+ pNewChildTargets = pChild->pTargets;
+ needReplaceTargets = false;
}
} else {
FOREACH(pProjection, pProjectNode->pProjections) {
@@ -3530,7 +3571,7 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan*
SNode* pNew = NULL;
code = nodesCloneNode(pChildTarget, &pNew);
if (TSDB_CODE_SUCCESS == code) {
- code = nodesListStrictAppend(pNewChildTargets, pNew);
+ code = nodesListMakeStrictAppend(&pNewChildTargets, pNew);
}
break;
}
@@ -3545,12 +3586,13 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan*
return code;
}
- if (eliminateProjOptCanChildConditionUseChildTargets(pChild, pNewChildTargets) &&
- (!needOrderMatch || (needOrderMatch && orderMatch))) {
- nodesDestroyList(pChild->pTargets);
- pChild->pTargets = pNewChildTargets;
+ if (eliminateProjOptCanChildConditionUseChildTargets(pChild, pNewChildTargets) && (!isSetOpProj || orderMatch)) {
+ if (needReplaceTargets) {
+ nodesDestroyList(pChild->pTargets);
+ pChild->pTargets = pNewChildTargets;
+ }
} else {
- nodesDestroyList(pNewChildTargets);
+ if (needReplaceTargets) nodesDestroyList(pNewChildTargets);
OPTIMIZE_FLAG_SET_MASK(pProjectNode->node.optimizedFlag, OPTIMIZE_FLAG_ELIMINATE_PROJ);
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
@@ -3573,7 +3615,11 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan*
NODES_CLEAR_LIST(pProjectNode->node.pChildren);
nodesDestroyNode((SNode*)pProjectNode);
// if pChild is a project logic node, remove its projection which is not reference by its target.
- alignProjectionWithTarget(pChild);
+ if (needReplaceTargets) {
+ alignProjectionWithTarget(pChild);
+ // Since we have eliminated the outer proj, we need to push down the new targets to the children of the set operation.
+ if (isSetOpProj && orderMatch && !sizeMatch) code = eliminateProjOptPushTargetsToSetOpChildren((SProjectLogicNode*)pChild);
+ }
}
pCxt->optimized = true;
return code;
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index e0e42087f3..e960c0ff5d 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -836,11 +836,9 @@ static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo
nodesDestroyNode(pMergeWin->pTsEnd);
pMergeWin->pTsEnd = NULL;
code = nodesCloneNode(nodesListGetNode(pPartWin->node.pTargets, index), &pMergeWin->pTsEnd);
- if (NULL == pMergeWin->pTsEnd) {
- code = code;
- }
}
- code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow);
+ if (TSDB_CODE_SUCCESS == code)
+ code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
diff --git a/source/libs/qcom/inc/queryInt.h b/source/libs/qcom/inc/queryInt.h
index ee7d4499d2..7820b1390c 100644
--- a/source/libs/qcom/inc/queryInt.h
+++ b/source/libs/qcom/inc/queryInt.h
@@ -23,7 +23,13 @@ extern "C" {
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
-
+#define QUERY_PARAM_CHECK(_p) \
+ do { \
+ if ((_p) == NULL) { \
+ qError("function:%s, param invalid, line:%d", __FUNCTION__, __LINE__); \
+ return TSDB_CODE_TSC_INVALID_INPUT; \
+ } \
+ } while (0)
#ifdef __cplusplus
}
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index 847fc9b8e9..54e92c6a1b 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -59,6 +59,9 @@ const SSchema* tGetTbnameColumnSchema() {
}
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
+ if (!pSchema) {
+ return false;
+ }
int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
@@ -100,7 +103,7 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
}
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
- if (!VALIDNUMOFCOLS(numOfCols)) {
+ if (!pSchema || !VALIDNUMOFCOLS(numOfCols)) {
return false;
}
@@ -127,6 +130,7 @@ bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTag
static STaskQueue taskQueue = {0};
static void processTaskQueue(SQueueInfo *pInfo, SSchedMsg *pSchedMsg) {
+ if(!pSchedMsg || !pSchedMsg->ahandle) return;
__async_exec_fn_t execFn = (__async_exec_fn_t)pSchedMsg->ahandle;
(void)execFn(pSchedMsg->thandle);
taosFreeQitem(pSchedMsg);
@@ -205,7 +209,11 @@ void destroyAhandle(void *ahandle) {
}
int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo,
- bool persistHandle, void* rpcCtx) {
+ bool persistHandle, void* rpcCtx) {
+ QUERY_PARAM_CHECK(pTransporter);
+ QUERY_PARAM_CHECK(epSet);
+ QUERY_PARAM_CHECK(pInfo);
+
char* pMsg = rpcMallocCont(pInfo->msgInfo.len);
if (NULL == pMsg) {
qError("0x%" PRIx64 " msg:%s malloc failed", pInfo->requestId, TMSG_INFO(pInfo->msgType));
@@ -236,6 +244,7 @@ int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransp
return asyncSendMsgToServerExt(pTransporter, epSet, pTransporterId, pInfo, false, NULL);
}
int32_t asyncFreeConnById(void* pTransporter, int64_t pid) {
+ QUERY_PARAM_CHECK(pTransporter);
return rpcFreeConnById(pTransporter, pid);
}
@@ -314,6 +323,8 @@ void destroyQueryExecRes(SExecResult* pRes) {
}
// clang-format on
int32_t dataConverToStr(char* str, int64_t capacity, int type, void* buf, int32_t bufSize, int32_t* len) {
+ QUERY_PARAM_CHECK(str);
+ QUERY_PARAM_CHECK(buf);
int32_t n = 0;
switch (type) {
@@ -420,6 +431,10 @@ int32_t dataConverToStr(char* str, int64_t capacity, int type, void* buf, int32_
}
void parseTagDatatoJson(void* p, char** jsonStr) {
+ if (!p || !jsonStr) {
+ qError("parseTagDatatoJson invalid input, line:%d", __LINE__);
+ return;
+ }
char* string = NULL;
SArray* pTagVals = NULL;
cJSON* json = NULL;
@@ -520,6 +535,7 @@ end:
}
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
+ QUERY_PARAM_CHECK(pDst);
if (NULL == pSrc) {
*pDst = NULL;
return TSDB_CODE_SUCCESS;
@@ -553,6 +569,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
}
void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType) {
+ if(!pMeta || !pName || !pType) return;
int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
for (int32_t i = 0; i < nums; ++i) {
if (0 == strcmp(pName, pMeta->schema[i].name)) {
@@ -576,6 +593,7 @@ void freeVgInfo(SDBVgInfo* vgInfo) {
}
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) {
+ QUERY_PARAM_CHECK(pDst);
if (NULL == pSrc) {
*pDst = NULL;
return TSDB_CODE_SUCCESS;
@@ -617,6 +635,7 @@ int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) {
}
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst) {
+ QUERY_PARAM_CHECK(pDst);
if (NULL == pSrc) {
*pDst = NULL;
return TSDB_CODE_SUCCESS;
@@ -674,6 +693,7 @@ int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst) {
_exit:
tdDestroySVCreateTbReq(*pDst);
taosMemoryFree(*pDst);
+ *pDst = NULL;
return terrno;
}
diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c
index 60c760a60e..9d9c169c05 100644
--- a/source/libs/qcom/src/querymsg.c
+++ b/source/libs/qcom/src/querymsg.c
@@ -29,6 +29,8 @@ int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int
int32_t (*queryProcessMsgRsp[TDMT_MAX])(void *output, char *msg, int32_t msgSize) = {0};
int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
+ QUERY_PARAM_CHECK(pOut);
+ QUERY_PARAM_CHECK(usedbRsp);
memcpy(pOut->db, usedbRsp->db, TSDB_DB_FNAME_LEN);
pOut->dbId = usedbRsp->uid;
@@ -71,10 +73,10 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SBuildTableInput *pInput = input;
- if (NULL == input || NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
STableInfoReq infoReq = {0};
infoReq.option = pInput->option;
@@ -101,10 +103,10 @@ int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int3
}
int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SBuildUseDBInput *pInput = input;
- if (NULL == pInput || NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
SUseDbReq usedbReq = {0};
tstrncpy(usedbReq.db, pInput->db, TSDB_DB_FNAME_LEN);
@@ -131,9 +133,8 @@ int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *ms
}
int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SQnodeListReq qnodeListReq = {0};
qnodeListReq.rowNum = -1;
@@ -155,9 +156,8 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SDnodeListReq dnodeListReq = {0};
dnodeListReq.rowNum = -1;
@@ -179,9 +179,8 @@ int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetSerVerMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SServerVerReq req = {0};
@@ -202,9 +201,9 @@ int32_t queryBuildGetSerVerMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SDbCfgReq dbCfgReq = {0};
tstrncpy(dbCfgReq.db, input, TSDB_DB_FNAME_LEN);
@@ -226,9 +225,9 @@ int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SUserIndexReq indexReq = {0};
tstrncpy(indexReq.indexFName, input, TSDB_INDEX_FNAME_LEN);
@@ -251,9 +250,9 @@ int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t
int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SRetrieveFuncReq funcReq = {0};
funcReq.numOfFuncs = 1;
@@ -288,9 +287,9 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3
}
int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SGetUserAuthReq req = {0};
tstrncpy(req.user, input, TSDB_USER_LEN);
@@ -311,9 +310,9 @@ int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32
}
int32_t queryBuildGetTbIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
STableIndexReq indexReq = {0};
tstrncpy(indexReq.tbFName, input, TSDB_TABLE_FNAME_LEN);
@@ -335,9 +334,9 @@ int32_t queryBuildGetTbIndexMsg(void *input, char **msg, int32_t msgSize, int32_
}
int32_t queryBuildGetTbCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SBuildTableInput *pInput = input;
STableCfgReq cfgReq = {0};
@@ -362,9 +361,9 @@ int32_t queryBuildGetTbCfgMsg(void *input, char **msg, int32_t msgSize, int32_t
}
int32_t queryBuildGetViewMetaMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
SViewMetaReq req = {0};
tstrncpy(req.fullname, input, TSDB_VIEW_FNAME_LEN);
@@ -387,9 +386,9 @@ int32_t queryBuildGetViewMetaMsg(void *input, char **msg, int32_t msgSize, int32
int32_t queryBuildGetTableTSMAMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
STableTSMAInfoReq req = {0};
tstrncpy(req.name, input, TSDB_TABLE_FNAME_LEN);
@@ -411,9 +410,9 @@ int32_t queryBuildGetTableTSMAMsg(void *input, char **msg, int32_t msgSize, int3
int32_t queryBuildGetTSMAMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen,
void *(*mallcFp)(int64_t)) {
- if (NULL == msg || NULL == msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
STableTSMAInfoReq req = {0};
req.fetchingWithTsmaName = true;
@@ -436,9 +435,9 @@ int32_t queryBuildGetTSMAMsg(void *input, char **msg, int32_t msgSize, int32_t *
}
int32_t queryBuildGetStreamProgressMsg(void* input, char** msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int64_t)) {
- if (!msg || !msgLen) {
- return TSDB_CODE_TSC_INVALID_INPUT;
- }
+ QUERY_PARAM_CHECK(input);
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(msgLen);
int32_t len = tSerializeStreamProgressReq(NULL, 0, input);
void* pBuf = (*mallcFp)(len);
@@ -504,6 +503,7 @@ PROCESS_USEDB_OVER:
}
static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
+ QUERY_PARAM_CHECK(pMetaMsg);
if (pMetaMsg->numOfTags < 0 || pMetaMsg->numOfTags > TSDB_MAX_TAGS) {
qError("invalid numOfTags[%d] in table meta rsp msg", pMetaMsg->numOfTags);
return TSDB_CODE_TSC_INVALID_VALUE;
@@ -539,6 +539,8 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
}
int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(pMeta);
pMeta->vgId = msg->vgId;
pMeta->tableType = msg->tableType;
pMeta->uid = msg->tuid;
@@ -551,6 +553,8 @@ int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
}
int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(pMeta);
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
int32_t schemaExtSize = (useCompress(msg->tableType) && msg->pSchemaExt) ? sizeof(SSchemaExt) * msg->numOfColumns : 0;
@@ -605,6 +609,8 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta *
}
int32_t queryCreateTableMetaExFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
+ QUERY_PARAM_CHECK(msg);
+ QUERY_PARAM_CHECK(pMeta);
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
int32_t schemaExtSize = (useCompress(msg->tableType) && msg->pSchemaExt) ? sizeof(SSchemaExt) * msg->numOfColumns : 0;
diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h
index 7a902bdd66..6d81baf91a 100644
--- a/source/libs/qworker/inc/qwInt.h
+++ b/source/libs/qworker/inc/qwInt.h
@@ -215,8 +215,8 @@ typedef struct SQWorkerMgmt {
#define QW_CTX_NOT_EXISTS_ERR_CODE(mgmt) \
(atomic_load_8(&(mgmt)->nodeStopped) ? TSDB_CODE_VND_STOPPED : TSDB_CODE_QRY_TASK_CTX_NOT_EXIST)
-#define QW_FPARAMS_DEF SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId
-#define QW_IDS() sId, qId, tId, rId, eId
+#define QW_FPARAMS_DEF SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId, int32_t eId
+#define QW_IDS() sId, qId, cId, tId, rId, eId
#define QW_FPARAMS() mgmt, QW_IDS()
#define QW_STAT_INC(_item, _n) (void)atomic_add_fetch_64(&(_item), _n)
@@ -257,18 +257,20 @@ typedef struct SQWorkerMgmt {
#define QW_FETCH_RUNNING(ctx) ((ctx)->inFetch)
#define QW_QUERY_NOT_STARTED(ctx) (QW_GET_PHASE(ctx) == -1)
-#define QW_SET_QTID(id, qId, tId, eId) \
- do { \
- *(uint64_t *)(id) = (qId); \
- *(uint64_t *)((char *)(id) + sizeof(qId)) = (tId); \
- *(int32_t *)((char *)(id) + sizeof(qId) + sizeof(tId)) = (eId); \
+#define QW_SET_QTID(id, qId, cId, tId, eId) \
+ do { \
+ *(uint64_t *)(id) = (qId); \
+ *(uint64_t *)((char *)(id) + sizeof(qId)) = (cId); \
+ *(uint64_t *)((char *)(id) + sizeof(qId) + sizeof(cId)) = (tId); \
+ *(int32_t *)((char *)(id) + sizeof(qId) + sizeof(cId) + sizeof(tId)) = (eId); \
} while (0)
-#define QW_GET_QTID(id, qId, tId, eId) \
- do { \
- (qId) = *(uint64_t *)(id); \
- (tId) = *(uint64_t *)((char *)(id) + sizeof(qId)); \
- (eId) = *(int32_t *)((char *)(id) + sizeof(qId) + sizeof(tId)); \
+#define QW_GET_QTID(id, qId, cId, tId, eId) \
+ do { \
+ (qId) = *(uint64_t *)(id); \
+ (cId) = *(uint64_t *)((char *)(id) + sizeof(qId)); \
+ (tId) = *(uint64_t *)((char *)(id) + sizeof(qId) + sizeof(cId)); \
+ (eId) = *(int32_t *)((char *)(id) + sizeof(qId) + sizeof(cId) + sizeof(tId)); \
} while (0)
#define QW_ERR_RET(c) \
@@ -310,25 +312,31 @@ typedef struct SQWorkerMgmt {
#define QW_SCH_ELOG(param, ...) qError("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__)
#define QW_SCH_DLOG(param, ...) qDebug("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__)
-#define QW_TASK_ELOG(param, ...) qError("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
-#define QW_TASK_WLOG(param, ...) qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
-#define QW_TASK_DLOG(param, ...) qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
+#define QW_TASK_ELOG(param, ...) \
+ qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
+#define QW_TASK_WLOG(param, ...) \
+ qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
+#define QW_TASK_DLOG(param, ...) \
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
#define QW_TASK_DLOGL(param, ...) \
- qDebugL("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId, __VA_ARGS__)
+ qDebugL("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
-#define QW_TASK_ELOG_E(param) qError("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId)
-#define QW_TASK_WLOG_E(param) qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId)
-#define QW_TASK_DLOG_E(param) qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, tId, eId)
+#define QW_TASK_ELOG_E(param) \
+ qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
+#define QW_TASK_WLOG_E(param) \
+ qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
+#define QW_TASK_DLOG_E(param) \
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
-#define QW_SCH_TASK_ELOG(param, ...) \
- qError("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, tId, eId, \
- __VA_ARGS__)
-#define QW_SCH_TASK_WLOG(param, ...) \
- qWarn("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, tId, eId, \
- __VA_ARGS__)
-#define QW_SCH_TASK_DLOG(param, ...) \
- qDebug("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, tId, eId, \
- __VA_ARGS__)
+#define QW_SCH_TASK_ELOG(param, ...) \
+ qError("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
+ qId, cId, tId, eId, __VA_ARGS__)
+#define QW_SCH_TASK_WLOG(param, ...) \
+ qWarn("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \
+ cId, tId, eId, __VA_ARGS__)
+#define QW_SCH_TASK_DLOG(param, ...) \
+ qDebug("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
+ qId, cId, tId, eId, __VA_ARGS__)
#define QW_LOCK_DEBUG(...) \
do { \
diff --git a/source/libs/qworker/src/qwDbg.c b/source/libs/qworker/src/qwDbg.c
index d3b8d36b25..897080df3e 100644
--- a/source/libs/qworker/src/qwDbg.c
+++ b/source/libs/qworker/src/qwDbg.c
@@ -96,14 +96,14 @@ void qwDbgDumpSchInfo(SQWorker *mgmt, SQWSchStatus *sch, int32_t i) {
int32_t taskNum = taosHashGetSize(sch->tasksHash);
QW_DLOG("***The %dth scheduler status, hbBrokenTs:%" PRId64 ",taskNum:%d", i, sch->hbBrokenTs, taskNum);
- uint64_t qId, tId;
+ uint64_t qId, cId, tId;
int32_t eId;
SQWTaskStatus *pTask = NULL;
void *pIter = taosHashIterate(sch->tasksHash, NULL);
while (pIter) {
pTask = (SQWTaskStatus *)pIter;
void *key = taosHashGetKey(pIter, NULL);
- QW_GET_QTID(key, qId, tId, eId);
+ QW_GET_QTID(key, qId, cId, tId, eId);
QW_TASK_DLOG("job refId:%" PRIx64 ", code:%x, task status:%d", pTask->refId, pTask->code, pTask->status);
@@ -118,13 +118,13 @@ void qwDbgDumpTasksInfo(SQWorker *mgmt) {
int32_t i = 0;
SQWTaskCtx *ctx = NULL;
- uint64_t qId, tId;
+ uint64_t qId, cId, tId;
int32_t eId;
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
while (pIter) {
ctx = (SQWTaskCtx *)pIter;
void *key = taosHashGetKey(pIter, NULL);
- QW_GET_QTID(key, qId, tId, eId);
+ QW_GET_QTID(key, qId, cId, tId, eId);
QW_TASK_DLOG("%p lock:%x, phase:%d, type:%d, explain:%d, needFetch:%d, localExec:%d, queryMsgType:%d, "
"sId:%" PRId64 ", level:%d, queryGotData:%d, queryRsped:%d, queryEnd:%d, queryContinue:%d, queryInQueue:%d, "
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index 20b81bfc14..7dbad90cc0 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -233,6 +233,7 @@ int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
qMsg.header.contLen = 0;
qMsg.sId = sId;
qMsg.queryId = qId;
+ qMsg.clientId = cId;
qMsg.taskId = tId;
qMsg.refId = rId;
qMsg.execId = eId;
@@ -284,6 +285,7 @@ int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
req->header.vgId = mgmt->nodeId;
req->sId = sId;
req->queryId = qId;
+ req->clientId = cId;
req->taskId = tId;
req->execId = eId;
@@ -312,6 +314,7 @@ int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
qMsg.header.contLen = 0;
qMsg.sId = sId;
qMsg.queryId = qId;
+ qMsg.clientId = cId;
qMsg.taskId = tId;
qMsg.refId = rId;
qMsg.execId = eId;
@@ -416,6 +419,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
+ uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@@ -447,6 +451,7 @@ int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg) {
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
+ uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@@ -479,6 +484,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
+ uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@@ -524,6 +530,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
uint64_t sId = msg->sId;
uint64_t qId = msg->queryId;
+ uint64_t cId = msg->clientId;
uint64_t tId = msg->taskId;
int64_t rId = 0;
int32_t eId = msg->execId;
@@ -557,6 +564,7 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
uint64_t sId = req.sId;
uint64_t qId = req.queryId;
+ uint64_t cId = req.clientId;
uint64_t tId = req.taskId;
int64_t rId = 0;
int32_t eId = req.execId;
@@ -604,12 +612,14 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
msg->sId = be64toh(msg->sId);
msg->queryId = be64toh(msg->queryId);
+ msg->clientId = be64toh(msg->clientId);
msg->taskId = be64toh(msg->taskId);
msg->refId = be64toh(msg->refId);
msg->execId = ntohl(msg->execId);
uint64_t sId = msg->sId;
uint64_t qId = msg->queryId;
+ uint64_t cId = msg->clientId;
uint64_t tId = msg->taskId;
int64_t rId = msg->refId;
int32_t eId = msg->execId;
@@ -646,6 +656,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int6
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
+ uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@@ -684,6 +695,7 @@ int32_t qWorkerProcessNotifyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
uint64_t sId = msg.sId;
uint64_t qId = msg.queryId;
+ uint64_t cId = msg.clientId;
uint64_t tId = msg.taskId;
int64_t rId = msg.refId;
int32_t eId = msg.execId;
@@ -753,6 +765,7 @@ int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SD
uint64_t sId = req.sId;
uint64_t qId = req.queryId;
+ uint64_t cId = req.clientId;
uint64_t tId = req.taskId;
int64_t rId = 0;
int32_t eId = -1;
diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c
index ef07a42629..917579deb0 100644
--- a/source/libs/qworker/src/qwUtil.c
+++ b/source/libs/qworker/src/qwUtil.c
@@ -137,8 +137,8 @@ int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchS
void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); }
int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) {
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
QW_LOCK(rwType, &sch->tasksLock);
*task = taosHashGet(sch->tasksHash, id, sizeof(id));
@@ -153,8 +153,8 @@ int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, S
int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) {
int32_t code = 0;
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskStatus ntask = {0};
ntask.status = status;
@@ -209,8 +209,8 @@ int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch
void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); }
int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
*ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
if (NULL == (*ctx)) {
@@ -222,8 +222,8 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
}
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
*ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
if (NULL == (*ctx)) {
@@ -235,8 +235,8 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
}
int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) {
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskCtx nctx = {0};
@@ -347,6 +347,7 @@ int32_t qwSendExplainResponse(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
(void)memcpy(pExec, taosArrayGet(execInfoList, 0), localRsp.rsp.numOfPlans * sizeof(SExplainExecInfo));
localRsp.rsp.subplanInfo = pExec;
localRsp.qId = qId;
+ localRsp.cId = cId;
localRsp.tId = tId;
localRsp.rId = rId;
localRsp.eId = eId;
@@ -376,8 +377,8 @@ _return:
int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskCtx octx;
SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
@@ -411,8 +412,8 @@ int32_t qwDropTaskStatus(QW_FPARAMS_DEF) {
SQWTaskStatus *task = NULL;
int32_t code = 0;
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) {
QW_TASK_WLOG_E("scheduler does not exist");
@@ -465,8 +466,8 @@ _return:
int32_t qwHandleDynamicTaskEnd(QW_FPARAMS_DEF) {
- char id[sizeof(qId) + sizeof(tId) + sizeof(eId)] = {0};
- QW_SET_QTID(id, qId, tId, eId);
+ char id[sizeof(qId) + sizeof(cId) + sizeof(tId) + sizeof(eId)] = {0};
+ QW_SET_QTID(id, qId, cId, tId, eId);
SQWTaskCtx octx;
SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
@@ -588,14 +589,14 @@ void qwDestroyImpl(void *pMgmt) {
mgmt->hbTimer = NULL;
taosTmrCleanUp(mgmt->timer);
- uint64_t qId, tId;
+ uint64_t qId, cId, tId;
int32_t eId;
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
while (pIter) {
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
void *key = taosHashGetKey(pIter, NULL);
- QW_GET_QTID(key, qId, tId, eId);
+ QW_GET_QTID(key, qId, cId, tId, eId);
qwFreeTaskCtx(ctx);
QW_TASK_DLOG_E("task ctx freed");
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index 9b96c1e519..13e1d0e231 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -19,7 +19,7 @@ SQWorkerMgmt gQwMgmt = {
};
void qwStopAllTasks(SQWorker *mgmt) {
- uint64_t qId, tId, sId;
+ uint64_t qId, cId, tId, sId;
int32_t eId;
int64_t rId = 0;
int32_t code = TSDB_CODE_SUCCESS;
@@ -28,7 +28,7 @@ void qwStopAllTasks(SQWorker *mgmt) {
while (pIter) {
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
void *key = taosHashGetKey(pIter, NULL);
- QW_GET_QTID(key, qId, tId, eId);
+ QW_GET_QTID(key, qId, cId, tId, eId);
QW_LOCK(QW_WRITE, &ctx->lock);
@@ -288,7 +288,7 @@ int32_t qwGenerateSchHbRsp(SQWorker *mgmt, SQWSchStatus *sch, SQWHbInfo *hbInfo)
// TODO GET EXECUTOR API TO GET MORE INFO
- QW_GET_QTID(key, status.queryId, status.taskId, status.execId);
+ QW_GET_QTID(key, status.queryId, status.clientId, status.taskId, status.execId);
status.status = taskStatus->status;
status.refId = taskStatus->refId;
@@ -1473,8 +1473,8 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
- SQWMsg *qwMsg, SArray *explainRes) {
+int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
+ int32_t eId, SQWMsg *qwMsg, SArray *explainRes) {
SQWorker *mgmt = (SQWorker *)pMgmt;
int32_t code = 0;
SQWTaskCtx *ctx = NULL;
@@ -1538,8 +1538,8 @@ _return:
QW_RET(code);
}
-int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId,
- void **pRsp, SArray *explainRes) {
+int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t cId, uint64_t tId, int64_t rId,
+ int32_t eId, void **pRsp, SArray *explainRes) {
SQWorker *mgmt = (SQWorker *)pMgmt;
int32_t code = 0;
int32_t dataLen = 0;
diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h
index 96b9d2da8d..ef643852ea 100644
--- a/source/libs/scheduler/inc/schInt.h
+++ b/source/libs/scheduler/inc/schInt.h
@@ -62,7 +62,7 @@ typedef enum {
#define SCH_DEFAULT_MAX_RETRY_NUM 6
#define SCH_MIN_AYSNC_EXEC_NUM 3
#define SCH_DEFAULT_RETRY_TOTAL_ROUND 3
-#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000
+#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000
typedef struct SSchDebug {
bool lockEnable;
@@ -142,8 +142,9 @@ typedef struct SSchedulerCfg {
} SSchedulerCfg;
typedef struct SSchedulerMgmt {
- uint64_t taskId; // sequential taksId
- uint64_t sId; // schedulerId
+ uint64_t clientId; // unique clientId
+ uint64_t taskId; // sequential taksId
+ uint64_t sId; // schedulerId
SSchedulerCfg cfg;
bool exit;
int32_t jobRef;
@@ -163,6 +164,7 @@ typedef struct SSchTaskCallbackParam {
SSchCallbackParamHeader head;
uint64_t queryId;
int64_t refId;
+ uint64_t clientId;
uint64_t taskId;
int32_t execId;
void *pTrans;
@@ -222,6 +224,7 @@ typedef struct SSchTimerParam {
} SSchTimerParam;
typedef struct SSchTask {
+ uint64_t clientId; // current client id
uint64_t taskId; // task id
SRWLatch lock; // task reentrant lock
int32_t maxExecTimes; // task max exec times
@@ -329,12 +332,14 @@ extern SSchedulerMgmt schMgmt;
#define SCH_LOCK_TASK(_task) SCH_LOCK(SCH_WRITE, &(_task)->lock)
#define SCH_UNLOCK_TASK(_task) SCH_UNLOCK(SCH_WRITE, &(_task)->lock)
-#define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1)
-#define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1)
+#define SCH_CLIENT_ID(_task) ((_task) ? (_task)->clientId : -1)
+#define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1)
+#define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1)
#define SCH_IS_DATA_BIND_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN)
-#define SCH_IS_DATA_BIND_PLAN(_plan) (((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY))
-#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan)
+#define SCH_IS_DATA_BIND_PLAN(_plan) \
+ (((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY))
+#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan)
#define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum)
#define SCH_IS_DATA_MERGE_TASK(task) (!SCH_IS_DATA_BIND_TASK(task))
#define SCH_IS_LOCAL_EXEC_TASK(_job, _task) \
@@ -415,15 +420,15 @@ extern SSchedulerMgmt schMgmt;
#define SCH_SWITCH_EPSET(_addr) ((_addr)->epSet.inUse = ((_addr)->epSet.inUse + 1) % (_addr)->epSet.numOfEps)
#define SCH_TASK_NUM_OF_EPS(_addr) ((_addr)->epSet.numOfEps)
-#define SCH_LOG_TASK_START_TS(_task) \
- do { \
- int64_t us = taosGetTimestampUs(); \
- if (NULL == taosArrayPush((_task)->profile.execTime, &us)) { \
- qError("taosArrayPush task execTime failed, error:%s", tstrerror(terrno)); \
- } \
- if (0 == (_task)->execId) { \
- (_task)->profile.startTs = us; \
- } \
+#define SCH_LOG_TASK_START_TS(_task) \
+ do { \
+ int64_t us = taosGetTimestampUs(); \
+ if (NULL == taosArrayPush((_task)->profile.execTime, &us)) { \
+ qError("taosArrayPush task execTime failed, error:%s", tstrerror(terrno)); \
+ } \
+ if (0 == (_task)->execId) { \
+ (_task)->profile.startTs = us; \
+ } \
} while (0)
#define SCH_LOG_TASK_WAIT_TS(_task) \
@@ -446,24 +451,24 @@ extern SSchedulerMgmt schMgmt;
(_task)->profile.endTs = us; \
} while (0)
-#define SCH_JOB_ELOG(param, ...) qError("qid:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
-#define SCH_JOB_DLOG(param, ...) qDebug("qid:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
+#define SCH_JOB_ELOG(param, ...) qError("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
+#define SCH_JOB_DLOG(param, ...) qDebug("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
-#define SCH_TASK_ELOG(param, ...) \
- qError("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), \
- __VA_ARGS__)
-#define SCH_TASK_DLOG(param, ...) \
- qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), \
- __VA_ARGS__)
-#define SCH_TASK_TLOG(param, ...) \
- qTrace("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), \
- __VA_ARGS__)
-#define SCH_TASK_DLOGL(param, ...) \
- qDebugL("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), \
- __VA_ARGS__)
-#define SCH_TASK_WLOG(param, ...) \
- qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), \
- __VA_ARGS__)
+#define SCH_TASK_ELOG(param, ...) \
+ qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
+ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
+#define SCH_TASK_DLOG(param, ...) \
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
+ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
+#define SCH_TASK_TLOG(param, ...) \
+ qTrace("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
+ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
+#define SCH_TASK_DLOGL(param, ...) \
+ qDebugL("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
+ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
+#define SCH_TASK_WLOG(param, ...) \
+ qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
+ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
#define SCH_SET_ERRNO(_err) \
do { \
@@ -576,7 +581,7 @@ int32_t schDelayLaunchTask(SSchJob *pJob, SSchTask *pTask);
int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void *param);
int32_t schAcquireJob(int64_t refId, SSchJob **ppJob);
int32_t schReleaseJob(int64_t refId);
-int32_t schReleaseJobEx(int64_t refId, int32_t* released);
+int32_t schReleaseJobEx(int64_t refId, int32_t *released);
void schFreeFlowCtrl(SSchJob *pJob);
int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel);
int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask);
@@ -644,7 +649,7 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list);
int32_t schNotifyTaskInHashList(SSchJob *pJob, SHashObj *list, ETaskNotifyType type, SSchTask *pTask);
int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level);
void schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask);
-int32_t schValidateSubplan(SSchJob *pJob, SSubplan* pSubplan, int32_t level, int32_t idx, int32_t taskNum);
+int32_t schValidateSubplan(SSchJob *pJob, SSubplan *pSubplan, int32_t level, int32_t idx, int32_t taskNum);
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel);
int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask);
void schDirectPostJobRes(SSchedulerReq *pReq, int32_t errCode);
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index b15a6a09d3..3321fdb4b5 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -500,8 +500,8 @@ _return:
int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) {
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
- qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId,
- code);
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId,
+ pParam->clientId, pParam->taskId, code);
// called if drop task rsp received code
(void)rpcReleaseHandle(pMsg->handle, TAOS_CONN_CLIENT); // ignore error
@@ -517,8 +517,8 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) {
int32_t schHandleNotifyCallback(void *param, SDataBuf *pMsg, int32_t code) {
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
- qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " task notify rsp received, code:0x%x", pParam->queryId, pParam->taskId,
- code);
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 " task notify rsp received, code:0x%x", pParam->queryId,
+ pParam->clientId, pParam->taskId, code);
if (pMsg) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
@@ -595,6 +595,7 @@ int32_t schMakeCallbackParam(SSchJob *pJob, SSchTask *pTask, int32_t msgType, bo
param->queryId = pJob->queryId;
param->refId = pJob->refId;
+ param->clientId = SCH_CLIENT_ID(pTask);
param->taskId = SCH_TASK_ID(pTask);
param->pTrans = pJob->conn.pTrans;
param->execId = pTask->execId;
@@ -1138,6 +1139,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
req.header.vgId = addr->nodeId;
req.sId = schMgmt.sId;
req.queryId = pJob->queryId;
+ req.clientId = pTask->clientId;
req.taskId = pTask->taskId;
req.phyLen = pTask->msgLen;
req.sqlLen = strlen(pJob->sql);
@@ -1171,6 +1173,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
qMsg.header.contLen = 0;
qMsg.sId = schMgmt.sId;
qMsg.queryId = pJob->queryId;
+ qMsg.clientId = pTask->clientId;
qMsg.taskId = pTask->taskId;
qMsg.refId = pJob->refId;
qMsg.execId = pTask->execId;
@@ -1226,6 +1229,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
req.header.vgId = addr->nodeId;
req.sId = schMgmt.sId;
req.queryId = pJob->queryId;
+ req.clientId = pTask->clientId;
req.taskId = pTask->taskId;
req.execId = pTask->execId;
@@ -1253,6 +1257,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
qMsg.header.contLen = 0;
qMsg.sId = schMgmt.sId;
qMsg.queryId = pJob->queryId;
+ qMsg.clientId = pTask->clientId;
qMsg.taskId = pTask->taskId;
qMsg.refId = pJob->refId;
qMsg.execId = *(int32_t*)param;
@@ -1310,6 +1315,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
qMsg.header.contLen = 0;
qMsg.sId = schMgmt.sId;
qMsg.queryId = pJob->queryId;
+ qMsg.clientId = pTask->clientId;
qMsg.taskId = pTask->taskId;
qMsg.refId = pJob->refId;
qMsg.execId = pTask->execId;
@@ -1339,30 +1345,19 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR);
}
-#if 1
- SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
- code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, (uint32_t)msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
- msg = NULL;
- SCH_ERR_JRET(code);
-
- if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) {
- SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId));
- }
-#else
- if (TDMT_VND_SUBMIT != msgType) {
+ if ((tsBypassFlag & TSDB_BYPASS_RB_RPC_SEND_SUBMIT) && (TDMT_VND_SUBMIT == msgType)) {
+ taosMemoryFree(msg);
+ SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
+ } else {
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
- code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
+ code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, (uint32_t)msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
msg = NULL;
SCH_ERR_JRET(code);
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) {
SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId));
}
- } else {
- taosMemoryFree(msg);
- SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
}
-#endif
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c
index fe24633c12..9be0e3fc40 100644
--- a/source/libs/scheduler/src/schTask.c
+++ b/source/libs/scheduler/src/schTask.c
@@ -66,6 +66,7 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
pTask->execId = -1;
pTask->failedExecId = -2;
pTask->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC;
+ pTask->clientId = getClientId();
pTask->taskId = schGenTaskId();
schInitTaskRetryTimes(pJob, pTask, pLevel);
@@ -305,6 +306,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
SCH_LOCK(SCH_WRITE, &parent->planLock);
SDownstreamSourceNode source = {
.type = QUERY_NODE_DOWNSTREAM_SOURCE,
+ .clientId = pTask->clientId,
.taskId = pTask->taskId,
.schedId = schMgmt.sId,
.execId = pTask->execId,
@@ -996,8 +998,8 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) {
int32_t code = 0;
- qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", pStatus->queryId, pStatus->taskId,
- pStatus->execId, jobTaskStatusStr(pStatus->status));
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", pStatus->queryId,
+ pStatus->clientId, pStatus->taskId, pStatus->execId, jobTaskStatusStr(pStatus->status));
if (schProcessOnCbBegin(&pJob, &pTask, pStatus->queryId, pStatus->refId, pStatus->taskId)) {
continue;
@@ -1043,13 +1045,14 @@ int32_t schHandleExplainRes(SArray *pExplainRes) {
continue;
}
- qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg", localRsp->qId, localRsp->tId);
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg",
+ localRsp->qId, localRsp->cId, localRsp->tId);
pJob = NULL;
(void)schAcquireJob(localRsp->rId, &pJob);
if (NULL == pJob) {
- qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, localRsp->qId,
- localRsp->tId, localRsp->rId);
+ qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64,
+ localRsp->qId, localRsp->cId, localRsp->tId, localRsp->rId);
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_NOT_EXIST);
}
@@ -1068,8 +1071,8 @@ int32_t schHandleExplainRes(SArray *pExplainRes) {
(void)schReleaseJob(pJob->refId);
- qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x", localRsp->qId,
- localRsp->tId, code);
+ qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x",
+ localRsp->qId, localRsp->cId, localRsp->tId, code);
SCH_ERR_JRET(code);
@@ -1147,8 +1150,8 @@ int32_t schLaunchLocalTask(SSchJob *pJob, SSchTask *pTask) {
}
}
- SCH_ERR_JRET(qWorkerProcessLocalQuery(schMgmt.queryMgmt, schMgmt.sId, pJob->queryId, pTask->taskId, pJob->refId,
- pTask->execId, &qwMsg, explainRes));
+ SCH_ERR_JRET(qWorkerProcessLocalQuery(schMgmt.queryMgmt, schMgmt.sId, pJob->queryId, pTask->clientId, pTask->taskId,
+ pJob->refId, pTask->execId, &qwMsg, explainRes));
if (SCH_IS_EXPLAIN_JOB(pJob)) {
SCH_ERR_RET(schHandleExplainRes(explainRes));
@@ -1407,8 +1410,8 @@ int32_t schExecLocalFetch(SSchJob *pJob, SSchTask *pTask) {
}
}
- SCH_ERR_JRET(qWorkerProcessLocalFetch(schMgmt.queryMgmt, schMgmt.sId, pJob->queryId, pTask->taskId, pJob->refId,
- pTask->execId, &pRsp, explainRes));
+ SCH_ERR_JRET(qWorkerProcessLocalFetch(schMgmt.queryMgmt, schMgmt.sId, pJob->queryId, pTask->clientId, pTask->taskId,
+ pJob->refId, pTask->execId, &pRsp, explainRes));
if (SCH_IS_EXPLAIN_JOB(pJob)) {
SCH_ERR_RET(schHandleExplainRes(explainRes));
diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c
index 4697de6f28..ac34099417 100644
--- a/source/libs/scheduler/src/schUtil.c
+++ b/source/libs/scheduler/src/schUtil.c
@@ -293,6 +293,18 @@ void schCloseJobRef(void) {
}
}
+int32_t initClientId(void) {
+ int32_t code = taosGetSystemUUIDU64(&schMgmt.clientId);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to generate clientId since %s", tstrerror(code));
+ SCH_ERR_RET(code);
+ }
+ qInfo("initialize");
+ return TSDB_CODE_SUCCESS;
+}
+
+uint64_t getClientId(void) { return schMgmt.clientId; }
+
uint64_t schGenTaskId(void) { return atomic_add_fetch_64(&schMgmt.taskId, 1); }
#ifdef BUILD_NO_CALL
diff --git a/source/libs/scheduler/test/CMakeLists.txt b/source/libs/scheduler/test/CMakeLists.txt
index 9605cc7a1c..d9572e8dec 100644
--- a/source/libs/scheduler/test/CMakeLists.txt
+++ b/source/libs/scheduler/test/CMakeLists.txt
@@ -11,12 +11,12 @@ IF(NOT TD_DARWIN)
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(
schedulerTest
- PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler grant
+ PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler grant
)
ELSE ()
TARGET_LINK_LIBRARIES(
schedulerTest
- PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler
+ PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler
)
ENDIF()
diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h
index d313acc61d..6a10b21c53 100644
--- a/source/libs/stream/inc/streamBackendRocksdb.h
+++ b/source/libs/stream/inc/streamBackendRocksdb.h
@@ -223,6 +223,7 @@ int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGro
// parname cf
int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]);
int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, void** pVal);
+int32_t streamStateDeleteParName_rocksdb(SStreamState* pState, int64_t groupId);
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove);
diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h
index 863bc76c79..427733e9ec 100644
--- a/source/libs/stream/inc/streamInt.h
+++ b/source/libs/stream/inc/streamInt.h
@@ -192,7 +192,6 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask);
int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask);
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask);
-void streamTaskSetFailedCheckpointId(SStreamTask* pTask);
int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask);
int32_t streamTaskGetNumOfUpstream(const SStreamTask* pTask);
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, float quotaRate, const char*);
@@ -245,6 +244,9 @@ int32_t streamCreateSinkResTrigger(SStreamTrigger** pTrigger);
int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t trigger, SInterval* pInterval,
STimeWindow* pLatestWindow, const char* id);
+// inject stream errors
+void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c
index 09f4e95376..65746b3100 100644
--- a/source/libs/stream/src/streamBackendRocksdb.c
+++ b/source/libs/stream/src/streamBackendRocksdb.c
@@ -4432,6 +4432,12 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi
return code;
}
+int32_t streamStateDeleteParName_rocksdb(SStreamState* pState, int64_t groupId) {
+ int code = 0;
+ STREAM_STATE_DEL_ROCKSDB(pState, "parname", &groupId);
+ return code;
+}
+
int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) {
int code = 0;
STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen);
diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c
index 7724d1c5ff..d8ddd0fd02 100644
--- a/source/libs/stream/src/streamCheckpoint.c
+++ b/source/libs/stream/src/streamCheckpoint.c
@@ -161,33 +161,52 @@ int32_t streamTaskProcessCheckpointTriggerRsp(SStreamTask* pTask, SCheckpointTri
int32_t streamTaskSendCheckpointTriggerMsg(SStreamTask* pTask, int32_t dstTaskId, int32_t downstreamNodeId,
SRpcHandleInfo* pRpcInfo, int32_t code) {
- int32_t size = sizeof(SMsgHead) + sizeof(SCheckpointTriggerRsp);
- void* pBuf = rpcMallocCont(size);
- if (pBuf == NULL) {
+ int32_t ret = 0;
+ int32_t tlen = 0;
+ void* buf = NULL;
+ SEncoder encoder;
+
+ SCheckpointTriggerRsp req = {.streamId = pTask->id.streamId,
+ .upstreamTaskId = pTask->id.taskId,
+ .taskId = dstTaskId,
+ .rspCode = code};
+
+ if (code == TSDB_CODE_SUCCESS) {
+ req.checkpointId = pTask->chkInfo.pActiveInfo->activeId;
+ req.transId = pTask->chkInfo.pActiveInfo->transId;
+ } else {
+ req.checkpointId = -1;
+ req.transId = -1;
+ }
+
+ tEncodeSize(tEncodeCheckpointTriggerRsp, &req, tlen, ret);
+ if (ret < 0) {
+ stError("s-task:%s encode checkpoint-trigger rsp msg failed, code:%s", pTask->id.idStr, tstrerror(code));
+ return ret;
+ }
+
+ buf = rpcMallocCont(tlen + sizeof(SMsgHead));
+ if (buf == NULL) {
+ stError("s-task:%s malloc chkpt-trigger rsp failed for task:0x%x, since out of memory", pTask->id.idStr, dstTaskId);
return terrno;
}
- SCheckpointTriggerRsp* pRsp = POINTER_SHIFT(pBuf, sizeof(SMsgHead));
+ ((SMsgHead*)buf)->vgId = htonl(downstreamNodeId);
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
- ((SMsgHead*)pBuf)->vgId = htonl(downstreamNodeId);
-
- pRsp->streamId = pTask->id.streamId;
- pRsp->upstreamTaskId = pTask->id.taskId;
- pRsp->taskId = dstTaskId;
- pRsp->rspCode = code;
-
- if (code == TSDB_CODE_SUCCESS) {
- pRsp->checkpointId = pTask->chkInfo.pActiveInfo->activeId;
- pRsp->transId = pTask->chkInfo.pActiveInfo->transId;
- } else {
- pRsp->checkpointId = -1;
- pRsp->transId = -1;
+ tEncoderInit(&encoder, abuf, tlen);
+ if ((ret = tEncodeCheckpointTriggerRsp(&encoder, &req)) < 0) {
+ rpcFreeCont(buf);
+ tEncoderClear(&encoder);
+ stError("encode checkpoint-trigger rsp failed, code:%s", tstrerror(code));
+ return ret;
}
+ tEncoderClear(&encoder);
- SRpcMsg rspMsg = {.code = 0, .pCont = pBuf, .contLen = size, .info = *pRpcInfo};
+ SRpcMsg rspMsg = {.code = 0, .pCont = buf, .contLen = tlen + sizeof(SMsgHead), .info = *pRpcInfo};
tmsgSendRsp(&rspMsg);
- return 0;
+ return ret;
}
int32_t continueDispatchCheckpointTriggerBlock(SStreamDataBlock* pBlock, SStreamTask* pTask) {
@@ -222,14 +241,14 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check
stError("s-task:%s vgId:%d current checkpointId:%" PRId64
" recv expired checkpoint-trigger block, checkpointId:%" PRId64 " transId:%d, discard",
id, vgId, pTask->chkInfo.checkpointId, checkpointId, transId);
- return code;
+ return TSDB_CODE_STREAM_INVLD_CHKPT;
}
if (pActiveInfo->failedId >= checkpointId) {
stError("s-task:%s vgId:%d checkpointId:%" PRId64 " transId:%d, has been marked failed, failedId:%" PRId64
" discard the checkpoint-trigger block",
id, vgId, checkpointId, transId, pActiveInfo->failedId);
- return code;
+ return TSDB_CODE_STREAM_INVLD_CHKPT;
}
if (pTask->chkInfo.checkpointId == checkpointId) {
@@ -255,8 +274,7 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check
"the interrupted checkpoint",
id, vgId, pBlock->srcTaskId);
- streamTaskOpenUpstreamInput(pTask, pBlock->srcTaskId);
- return code;
+ return TSDB_CODE_STREAM_INVLD_CHKPT;
}
if (streamTaskGetStatus(pTask).state == TASK_STATUS__CK) {
@@ -264,14 +282,14 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check
stError("s-task:%s vgId:%d active checkpointId:%" PRId64 ", recv invalid checkpoint-trigger checkpointId:%" PRId64
" discard",
id, vgId, pActiveInfo->activeId, checkpointId);
- return code;
+ return TSDB_CODE_STREAM_INVLD_CHKPT;
} else { // checkpointId == pActiveInfo->activeId
if (pActiveInfo->allUpstreamTriggerRecv == 1) {
stDebug(
"s-task:%s vgId:%d all upstream checkpoint-trigger recv, discard this checkpoint-trigger, "
"checkpointId:%" PRId64 " transId:%d",
id, vgId, checkpointId, transId);
- return code;
+ return TSDB_CODE_STREAM_INVLD_CHKPT;
}
if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) {
@@ -283,17 +301,17 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check
}
if (p->upstreamTaskId == pBlock->srcTaskId) {
- stWarn("s-task:%s repeatly recv checkpoint-source msg from task:0x%x vgId:%d, checkpointId:%" PRId64
+ stWarn("s-task:%s repeatly recv checkpoint-trigger msg from task:0x%x vgId:%d, checkpointId:%" PRId64
", prev recvTs:%" PRId64 " discard",
pTask->id.idStr, p->upstreamTaskId, p->upstreamNodeId, p->checkpointId, p->recvTs);
- return code;
+ return TSDB_CODE_STREAM_INVLD_CHKPT;
}
}
}
}
}
- return 0;
+ return TSDB_CODE_SUCCESS;
}
int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) {
@@ -317,6 +335,9 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
code = doCheckBeforeHandleChkptTrigger(pTask, checkpointId, pBlock, transId);
streamMutexUnlock(&pTask->lock);
if (code) {
+ if (taskLevel != TASK_LEVEL__SOURCE) { // the checkpoint-trigger is discard, open the inputQ for upstream tasks
+ streamTaskOpenUpstreamInput(pTask, pBlock->srcTaskId);
+ }
streamFreeQitem((SStreamQueueItem*)pBlock);
return code;
}
@@ -330,6 +351,11 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
pActiveInfo->activeId = checkpointId;
pActiveInfo->transId = transId;
+ if (pTask->chkInfo.startTs == 0) {
+ pTask->chkInfo.startTs = taosGetTimestampMs();
+ pTask->execInfo.checkpoint += 1;
+ }
+
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT);
if (code != TSDB_CODE_SUCCESS) {
stError("s-task:%s handle checkpoint-trigger block failed, code:%s", id, tstrerror(code));
@@ -359,6 +385,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
}
}
+#if 0
+ taosMsleep(20*1000);
+#endif
+
if (taskLevel == TASK_LEVEL__SOURCE) {
int8_t type = pTask->outputInfo.type;
pActiveInfo->allUpstreamTriggerRecv = 1;
@@ -373,6 +403,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
return code;
}
+#if 0
+ chkptFailedByRetrieveReqToSource(pTask, checkpointId);
+#endif
+
if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
stDebug("s-task:%s set childIdx:%d, and add checkpoint-trigger block into outputQ", id, pTask->info.selfChildId);
code = continueDispatchCheckpointTriggerBlock(pBlock, pTask); // todo handle this failure
@@ -382,11 +416,6 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
streamFreeQitem((SStreamQueueItem*)pBlock);
}
} else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) {
- if (pTask->chkInfo.startTs == 0) {
- pTask->chkInfo.startTs = taosGetTimestampMs();
- pTask->execInfo.checkpoint += 1;
- }
-
// todo: handle this
// update the child Id for downstream tasks
code = streamAddCheckpointReadyMsg(pTask, pBlock->srcTaskId, pTask->info.selfChildId, checkpointId);
@@ -562,7 +591,7 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) {
}
streamMutexUnlock(&pInfo->lock);
- stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%" PRId64 ", current checkpointId:%" PRId64,
+ stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%" PRId64 ", latest checkpointId:%" PRId64,
pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId);
}
@@ -682,15 +711,22 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
return TSDB_CODE_SUCCESS;
}
-void streamTaskSetFailedCheckpointId(SStreamTask* pTask) {
+void streamTaskSetFailedCheckpointId(SStreamTask* pTask, int64_t failedId) {
struct SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo;
- if (pInfo->activeId <= 0) {
- stWarn("s-task:%s checkpoint-info is cleared now, not set the failed checkpoint info", pTask->id.idStr);
+ if (failedId <= 0) {
+ stWarn("s-task:%s failedId is 0, not update the failed checkpoint info, current failedId:%" PRId64
+ " activeId:%" PRId64,
+ pTask->id.idStr, pInfo->failedId, pInfo->activeId);
} else {
- pInfo->failedId = pInfo->activeId;
- stDebug("s-task:%s mark and set the failed checkpointId:%" PRId64 " (transId:%d)", pTask->id.idStr, pInfo->activeId,
- pInfo->transId);
+ if (failedId <= pInfo->failedId) {
+ stDebug("s-task:%s failedId:%" PRId64 " not update to:%" PRId64, pTask->id.idStr, pInfo->failedId, failedId);
+ } else {
+ stDebug("s-task:%s mark and set the failed checkpointId:%" PRId64 " (transId:%d) activeId:%" PRId64
+ " prev failedId:%" PRId64,
+ pTask->id.idStr, failedId, pInfo->transId, pInfo->activeId, pInfo->failedId);
+ pInfo->failedId = failedId;
+ }
}
}
@@ -698,7 +734,7 @@ void streamTaskSetCheckpointFailed(SStreamTask* pTask) {
streamMutexLock(&pTask->lock);
ETaskStatus status = streamTaskGetStatus(pTask).state;
if (status == TASK_STATUS__CK) {
- streamTaskSetFailedCheckpointId(pTask);
+ streamTaskSetFailedCheckpointId(pTask, pTask->chkInfo.pActiveInfo->activeId);
}
streamMutexUnlock(&pTask->lock);
}
@@ -876,8 +912,9 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
code = streamSendChkptReportMsg(pTask, &pTask->chkInfo, dropRelHTask);
}
} else { // clear the checkpoint info if failed
+ // set failed checkpoint id before clear the checkpoint info
streamMutexLock(&pTask->lock);
- streamTaskSetFailedCheckpointId(pTask); // set failed checkpoint id before clear the checkpoint info
+ streamTaskSetFailedCheckpointId(pTask, ckId);
streamMutexUnlock(&pTask->lock);
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
@@ -1101,23 +1138,43 @@ int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) {
return TSDB_CODE_INVALID_PARA;
}
- SRetrieveChkptTriggerReq* pReq = rpcMallocCont(sizeof(SRetrieveChkptTriggerReq));
- if (pReq == NULL) {
- code = terrno;
- stError("vgId:%d failed to create msg to retrieve trigger msg for task:%s exec, code:out of memory", vgId, pId);
+ int32_t ret = 0;
+ int32_t tlen = 0;
+ void* buf = NULL;
+ SRpcMsg rpcMsg = {0};
+ SEncoder encoder;
+
+ SRetrieveChkptTriggerReq req = {.streamId = pTask->id.streamId,
+ .downstreamTaskId = pTask->id.taskId,
+ .downstreamNodeId = vgId,
+ .upstreamTaskId = pUpstreamTask->taskId,
+ .upstreamNodeId = pUpstreamTask->nodeId,
+ .checkpointId = checkpointId};
+
+ tEncodeSize(tEncodeRetrieveChkptTriggerReq, &req, tlen, ret);
+ if (ret < 0) {
+ stError("encode retrieve checkpoint-trigger msg failed, code:%s", tstrerror(code));
+ }
+
+ buf = rpcMallocCont(tlen + sizeof(SMsgHead));
+ if (buf == NULL) {
+ stError("vgId:%d failed to create retrieve checkpoint-trigger msg for task:%s exec, code:out of memory", vgId, pId);
continue;
}
- pReq->head.vgId = htonl(pUpstreamTask->nodeId);
- pReq->streamId = pTask->id.streamId;
- pReq->downstreamTaskId = pTask->id.taskId;
- pReq->downstreamNodeId = vgId;
- pReq->upstreamTaskId = pUpstreamTask->taskId;
- pReq->upstreamNodeId = pUpstreamTask->nodeId;
- pReq->checkpointId = checkpointId;
+ ((SRetrieveChkptTriggerReq*)buf)->head.vgId = htonl(pUpstreamTask->nodeId);
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
- SRpcMsg rpcMsg = {0};
- initRpcMsg(&rpcMsg, TDMT_STREAM_RETRIEVE_TRIGGER, pReq, sizeof(SRetrieveChkptTriggerReq));
+ tEncoderInit(&encoder, abuf, tlen);
+ if ((code = tEncodeRetrieveChkptTriggerReq(&encoder, &req)) < 0) {
+ rpcFreeCont(buf);
+ tEncoderClear(&encoder);
+ stError("encode retrieve checkpoint-trigger req failed, code:%s", tstrerror(code));
+ continue;
+ }
+ tEncoderClear(&encoder);
+
+ initRpcMsg(&rpcMsg, TDMT_STREAM_RETRIEVE_TRIGGER, buf, tlen + sizeof(SMsgHead));
code = tmsgSendReq(&pUpstreamTask->epSet, &rpcMsg);
if (code == TSDB_CODE_SUCCESS) {
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index e0fa199199..5807240f5e 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -1170,6 +1170,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) {
stError("s-task:%s invalid number of sent readyMsg:%d to upstream:%d", id, num,
(int32_t)taosArrayGetSize(pTask->upstreamInfo.pList));
+ streamMutexUnlock(&pActiveInfo->lock);
return TSDB_CODE_STREAM_INTERNAL_ERROR;
}
@@ -1412,6 +1413,7 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
if (size > 0) {
STaskCheckpointReadyInfo* pReady = taosArrayGet(pActiveInfo->pReadyMsgList, 0);
if (pReady == NULL) {
+ streamMutexUnlock(&pActiveInfo->lock);
return terrno;
}
diff --git a/source/libs/stream/src/streamErrorInjection.c b/source/libs/stream/src/streamErrorInjection.c
new file mode 100644
index 0000000000..515845ba2b
--- /dev/null
+++ b/source/libs/stream/src/streamErrorInjection.c
@@ -0,0 +1,17 @@
+#include "streamInt.h"
+
+/**
+ * pre-request: checkpoint interval should be 60s
+ * @param pTask
+ * @param checkpointId
+ */
+void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId) {
+ streamMutexLock(&pTask->lock);
+
+ // set current checkpoint failed immediately, set failed checkpoint id before clear the checkpoint info
+ streamTaskSetFailedCheckpointId(pTask, checkpointId);
+ streamMutexUnlock(&pTask->lock);
+
+ // the checkpoint interval should be 60s, and the next checkpoint req should be issued by mnode
+ taosMsleep(65*1000);
+}
\ No newline at end of file
diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c
index 20c3e5a6b9..401aa7530d 100644
--- a/source/libs/stream/src/streamQueue.c
+++ b/source/libs/stream/src/streamQueue.c
@@ -166,6 +166,8 @@ const char* streamQueueItemGetTypeStr(int32_t type) {
return "checkpoint-trigger";
case STREAM_INPUT__TRANS_STATE:
return "trans-state";
+ case STREAM_INPUT__REF_DATA_BLOCK:
+ return "ref-block";
default:
return "datablock";
}
@@ -211,7 +213,7 @@ EExtractDataCode streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueIte
// do not merge blocks for sink node and check point data block
int8_t type = qItem->type;
if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
- type == STREAM_INPUT__TRANS_STATE) {
+ type == STREAM_INPUT__TRANS_STATE || type == STREAM_INPUT__REF_DATA_BLOCK) {
const char* p = streamQueueItemGetTypeStr(type);
if (*pInput == NULL) {
@@ -504,4 +506,4 @@ void streamTaskPutbackToken(STokenBucket* pBucket) {
// size in KB
void streamTaskConsumeQuota(STokenBucket* pBucket, int32_t bytes) { pBucket->quotaRemain -= SIZE_IN_MiB(bytes); }
-void streamTaskInputFail(SStreamTask* pTask) { atomic_store_8(&pTask->inputq.status, TASK_INPUT_STATUS__FAILED); }
\ No newline at end of file
+void streamTaskInputFail(SStreamTask* pTask) { atomic_store_8(&pTask->inputq.status, TASK_INPUT_STATUS__FAILED); }
diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c
index 8c79abfd02..9e131fd526 100644
--- a/source/libs/stream/src/streamSched.c
+++ b/source/libs/stream/src/streamSched.c
@@ -83,13 +83,37 @@ int32_t streamTrySchedExec(SStreamTask* pTask) {
}
int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId, int32_t execType) {
- SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
- if (pRunReq == NULL) {
+ int32_t code = 0;
+ int32_t tlen = 0;
+
+ SStreamTaskRunReq req = {.streamId = streamId, .taskId = taskId, .reqType = execType};
+
+ tEncodeSize(tEncodeStreamTaskRunReq, &req, tlen, code);
+ if (code < 0) {
+ stError("s-task:0x%" PRIx64 " vgId:%d encode stream task run req failed, code:%s", streamId, vgId, tstrerror(code));
+ return code;
+ }
+
+ void* buf = rpcMallocCont(tlen + sizeof(SMsgHead));
+ if (buf == NULL) {
stError("vgId:%d failed to create msg to start stream task:0x%x exec, type:%d, code:%s", vgId, taskId, execType,
tstrerror(terrno));
return terrno;
}
+ ((SMsgHead*)buf)->vgId = vgId;
+ char* bufx = POINTER_SHIFT(buf, sizeof(SMsgHead));
+
+ SEncoder encoder;
+ tEncoderInit(&encoder, (uint8_t*)bufx, tlen);
+ if ((code = tEncodeStreamTaskRunReq(&encoder, &req)) < 0) {
+ rpcFreeCont(buf);
+ tEncoderClear(&encoder);
+ stError("s-task:0x%x vgId:%d encode run task msg failed, code:%s", taskId, vgId, tstrerror(code));
+ return code;
+ }
+ tEncoderClear(&encoder);
+
if (streamId != 0) {
stDebug("vgId:%d create msg to for task:0x%x, exec type:%d, %s", vgId, taskId, execType,
streamTaskGetExecType(execType));
@@ -97,13 +121,8 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3
stDebug("vgId:%d create msg to exec, type:%d, %s", vgId, execType, streamTaskGetExecType(execType));
}
- pRunReq->head.vgId = vgId;
- pRunReq->streamId = streamId;
- pRunReq->taskId = taskId;
- pRunReq->reqType = execType;
-
- SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
- int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
+ SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = buf, .contLen = tlen + sizeof(SMsgHead)};
+ code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
if (code) {
stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId);
}
diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c
index ed12687e41..9c16ff036e 100644
--- a/source/libs/stream/src/streamStartTask.c
+++ b/source/libs/stream/src/streamStartTask.c
@@ -433,6 +433,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
// send hb msg to mnode before closing all tasks.
int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
if (code != TSDB_CODE_SUCCESS) {
+ streamMetaRUnLock(pMeta);
return code;
}
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index 794fc346bf..5461b5899b 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -525,6 +525,18 @@ _end:
return code;
}
+int32_t streamStateDeleteParName(SStreamState* pState, int64_t groupId) {
+ int32_t code = tSimpleHashRemove(pState->parNameMap, &groupId, sizeof(int64_t));
+ if (TSDB_CODE_SUCCESS != code) {
+ qWarn("failed to remove parname from cache, code:%d", code);
+ }
+ code = streamStateDeleteParName_rocksdb(pState, groupId);
+ if (TSDB_CODE_SUCCESS != code) {
+ qWarn("failed to remove parname from rocksdb, code:%d", code);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
void streamStateDestroy(SStreamState* pState, bool remove) {
streamFileStateDestroy(pState->pFileState);
// streamStateDestroy_rocksdb(pState, remove);
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index a044859b80..f46228fd47 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -22,6 +22,7 @@
#include "tstream.h"
#include "ttimer.h"
#include "wal.h"
+#include "streamMsg.h"
static void streamTaskDestroyUpstreamInfo(SUpstreamInfo* pUpstreamInfo);
static int32_t streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet, bool* pUpdated);
@@ -1246,13 +1247,13 @@ void streamTaskDestroyActiveChkptInfo(SActiveCheckpointInfo* pInfo) {
taosMemoryFree(pInfo);
}
-//NOTE: clear the checkpoint id, and keep the failed id
+// NOTE: clear the checkpoint id, and keep the failed id
+// failedId for a task will increase as the checkpoint I.D. increases.
void streamTaskClearActiveInfo(SActiveCheckpointInfo* pInfo) {
pInfo->activeId = 0;
pInfo->transId = 0;
pInfo->allUpstreamTriggerRecv = 0;
pInfo->dispatchTrigger = false;
-// pInfo->failedId = 0;
taosArrayClear(pInfo->pDispatchTriggerList);
taosArrayClear(pInfo->pCheckpointReadyRecvList);
@@ -1303,4 +1304,178 @@ void streamTaskFreeRefId(int64_t* pRefId) {
}
metaRefMgtRemove(pRefId);
+}
+
+
+int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
+ int32_t code = 0;
+ int32_t lino;
+
+ TAOS_CHECK_EXIT(tStartEncode(pEncoder));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->ver));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->id.streamId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->id.taskId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.trigger));
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.taskLevel));
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.type));
+ TAOS_CHECK_EXIT(tEncodeI16(pEncoder, pTask->msgInfo.msgType));
+
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.taskStatus));
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.schedStatus));
+
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.selfChildId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.nodeId));
+ TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.epSet));
+ TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.mnodeEpset));
+
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointId));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointVer));
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.fillHistory));
+
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->hTaskInfo.id.streamId));
+ int32_t taskId = pTask->hTaskInfo.id.taskId;
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId));
+
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->streamTaskId.streamId));
+ taskId = pTask->streamTaskId.taskId;
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId));
+
+ TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.minVer));
+ TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.maxVer));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.skey));
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.ekey));
+
+ int32_t epSz = taosArrayGetSize(pTask->upstreamInfo.pList);
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, epSz));
+ for (int32_t i = 0; i < epSz; i++) {
+ SStreamUpstreamEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i);
+ TAOS_CHECK_EXIT(tEncodeStreamEpInfo(pEncoder, pInfo));
+ }
+
+ if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
+ TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->exec.qmsg));
+ }
+
+ if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.tbSink.stbUid));
+ TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.tbSink.stbFullName));
+ TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, pTask->outputInfo.tbSink.pSchemaWrapper));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) {
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.smaSink.smaId));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.fetchSink.reserved));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.taskId));
+ TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.nodeId));
+ TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->outputInfo.fixedDispatcher.epSet));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+ TAOS_CHECK_EXIT(tSerializeSUseDbRspImp(pEncoder, &pTask->outputInfo.shuffleDispatcher.dbInfo));
+ TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.shuffleDispatcher.stbFullName));
+ }
+ TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->info.delaySchedParam));
+ TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5));
+ TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1));
+
+ tEndEncode(pEncoder);
+_exit:
+ return code;
+}
+
+int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
+ int32_t taskId = 0;
+ int32_t code = 0;
+ int32_t lino;
+
+ TAOS_CHECK_EXIT(tStartDecode(pDecoder));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->ver));
+ if (pTask->ver <= SSTREAM_TASK_INCOMPATIBLE_VER || pTask->ver > SSTREAM_TASK_VER) {
+ TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG);
+ }
+
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->id.streamId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->id.taskId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.trigger));
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.taskLevel));
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.type));
+ TAOS_CHECK_EXIT(tDecodeI16(pDecoder, &pTask->msgInfo.msgType));
+
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.taskStatus));
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.schedStatus));
+
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.selfChildId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.nodeId));
+ TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.epSet));
+ TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.mnodeEpset));
+
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointId));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointVer));
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.fillHistory));
+
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->hTaskInfo.id.streamId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId));
+ pTask->hTaskInfo.id.taskId = taskId;
+
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->streamTaskId.streamId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId));
+ pTask->streamTaskId.taskId = taskId;
+
+ TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.minVer));
+ TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.maxVer));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.skey));
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.ekey));
+
+ int32_t epSz = -1;
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &epSz) < 0);
+
+ if ((pTask->upstreamInfo.pList = taosArrayInit(epSz, POINTER_BYTES)) == NULL) {
+ TAOS_CHECK_EXIT(terrno);
+ }
+ for (int32_t i = 0; i < epSz; i++) {
+ SStreamUpstreamEpInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamUpstreamEpInfo));
+ if (pInfo == NULL) {
+ TAOS_CHECK_EXIT(terrno);
+ }
+ if ((code = tDecodeStreamEpInfo(pDecoder, pInfo)) < 0) {
+ taosMemoryFreeClear(pInfo);
+ goto _exit;
+ }
+ if (taosArrayPush(pTask->upstreamInfo.pList, &pInfo) == NULL) {
+ TAOS_CHECK_EXIT(terrno);
+ }
+ }
+
+ if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
+ TAOS_CHECK_EXIT(tDecodeCStrAlloc(pDecoder, &pTask->exec.qmsg));
+ }
+
+ if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.tbSink.stbUid));
+ TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.tbSink.stbFullName));
+ pTask->outputInfo.tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ if (pTask->outputInfo.tbSink.pSchemaWrapper == NULL) {
+ TAOS_CHECK_EXIT(terrno);
+ }
+ TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, pTask->outputInfo.tbSink.pSchemaWrapper));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) {
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.smaSink.smaId));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.fetchSink.reserved));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.taskId));
+ TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.nodeId));
+ TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->outputInfo.fixedDispatcher.epSet));
+ } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+ TAOS_CHECK_EXIT(tDeserializeSUseDbRspImp(pDecoder, &pTask->outputInfo.shuffleDispatcher.dbInfo));
+ TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.shuffleDispatcher.stbFullName));
+ }
+ TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->info.delaySchedParam));
+ if (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) {
+ TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->subtableWithoutMd5));
+ }
+ TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve));
+
+ tEndDecode(pDecoder);
+
+_exit:
+ return code;
}
\ No newline at end of file
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index a3cfa00127..49d5041369 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -445,6 +445,11 @@ int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
if (!pInfo) {
+ if (tEncodeI32(pEncoder, -1) < 0) {
+ code = TSDB_CODE_FAILED;
+ QUERY_CHECK_CODE(code, lino, _end);
+ }
+ uDebug("%s line:%d. it did not have updateinfo", __func__, __LINE__);
return TSDB_CODE_SUCCESS;
}
@@ -550,6 +555,10 @@ int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo) {
int32_t size = 0;
if (tDecodeI32(pDeCoder, &size) < 0) return -1;
+
+ if (size < 0) {
+ return -1;
+ }
pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
QUERY_CHECK_NULL(pInfo->pTsBuckets, code, lino, _error, terrno);
diff --git a/source/libs/sync/inc/syncIndexMgr.h b/source/libs/sync/inc/syncIndexMgr.h
index 3c372a3b12..ed7a17b4c7 100644
--- a/source/libs/sync/inc/syncIndexMgr.h
+++ b/source/libs/sync/inc/syncIndexMgr.h
@@ -29,6 +29,7 @@ typedef struct SSyncIndexMgr {
SyncTerm privateTerm[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; // for advanced function
int64_t startTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
int64_t recvTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
+ int64_t sentTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
int32_t replicaNum;
int32_t totalReplicaNum;
SSyncNode *pNode;
@@ -45,7 +46,9 @@ void syncIndexMgrCopyIfExist(SSyncIndexMgr * pNewIndex, SSyncIndexMgr
void syncIndexMgrSetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t startTime);
int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t recvTime);
+void syncIndexMgrSetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t sentTime);
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
+int64_t syncIndexMgrGetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term);
SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h
index 0b653ddbe9..b19d1184a7 100644
--- a/source/libs/sync/inc/syncInt.h
+++ b/source/libs/sync/inc/syncInt.h
@@ -234,6 +234,10 @@ struct SSyncNode {
bool isStart;
+ // statis
+ int32_t sendCount;
+ int32_t recvCount;
+ int32_t slowCount;
};
// open/close --------------
diff --git a/source/libs/sync/inc/syncPipeline.h b/source/libs/sync/inc/syncPipeline.h
index 427a3690f2..eeb24d2f16 100644
--- a/source/libs/sync/inc/syncPipeline.h
+++ b/source/libs/sync/inc/syncPipeline.h
@@ -39,6 +39,7 @@ typedef struct SSyncLogReplMgr {
int64_t peerStartTime;
int32_t retryBackoff;
int32_t peerId;
+ int32_t sendCount;
} SSyncLogReplMgr;
typedef struct SSyncLogBufEntry {
diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h
index 540255c200..839b87d500 100644
--- a/source/libs/sync/inc/syncSnapshot.h
+++ b/source/libs/sync/inc/syncSnapshot.h
@@ -91,6 +91,7 @@ typedef struct SSyncSnapshotReceiver {
// update when begin
void *pWriter;
+ TdThreadMutex writerMutex;
SSnapshotParam snapshotParam;
SSnapshot snapshot;
diff --git a/source/libs/sync/inc/syncUtil.h b/source/libs/sync/inc/syncUtil.h
index 1606f47592..7b71491f47 100644
--- a/source/libs/sync/inc/syncUtil.h
+++ b/source/libs/sync/inc/syncUtil.h
@@ -46,12 +46,12 @@ extern "C" {
#define sLDebug(...) if (sDebugFlag & DEBUG_DEBUG) { taosPrintLongString("SYN ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); }
#define sLTrace(...) if (sDebugFlag & DEBUG_TRACE) { taosPrintLongString("SYN ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); }
-#define sNFatal(pNode, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintNodeLog("SYN FATAL ", DEBUG_FATAL, 255, pNode, __VA_ARGS__); }
-#define sNError(pNode, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintNodeLog("SYN ERROR ", DEBUG_ERROR, 255, pNode, __VA_ARGS__); }
-#define sNWarn(pNode, ...) if (sDebugFlag & DEBUG_WARN) { syncPrintNodeLog("SYN WARN ", DEBUG_WARN, 255, pNode, __VA_ARGS__); }
-#define sNInfo(pNode, ...) if (sDebugFlag & DEBUG_INFO) { syncPrintNodeLog("SYN ", DEBUG_INFO, 255, pNode, __VA_ARGS__); }
-#define sNDebug(pNode, ...) if (sDebugFlag & DEBUG_DEBUG) { syncPrintNodeLog("SYN ", DEBUG_DEBUG, sDebugFlag, pNode, __VA_ARGS__); }
-#define sNTrace(pNode, ...) if (sDebugFlag & DEBUG_TRACE) { syncPrintNodeLog("SYN ", DEBUG_TRACE, sDebugFlag, pNode, __VA_ARGS__); }
+#define sNFatal(pNode, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintNodeLog("SYN FATAL ", DEBUG_FATAL, 255, true, pNode, __VA_ARGS__); }
+#define sNError(pNode, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintNodeLog("SYN ERROR ", DEBUG_ERROR, 255, true, pNode, __VA_ARGS__); }
+#define sNWarn(pNode, ...) if (sDebugFlag & DEBUG_WARN) { syncPrintNodeLog("SYN WARN ", DEBUG_WARN, 255, true, pNode, __VA_ARGS__); }
+#define sNInfo(pNode, ...) if (sDebugFlag & DEBUG_INFO) { syncPrintNodeLog("SYN ", DEBUG_INFO, 255, true, pNode, __VA_ARGS__); }
+#define sNDebug(pNode, ...) if (sDebugFlag & DEBUG_DEBUG) { syncPrintNodeLog("SYN ", DEBUG_DEBUG, sDebugFlag, false, pNode, __VA_ARGS__); }
+#define sNTrace(pNode, ...) if (sDebugFlag & DEBUG_TRACE) { syncPrintNodeLog("SYN ", DEBUG_TRACE, sDebugFlag, false, pNode, __VA_ARGS__); }
#define sSFatal(pSender, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintSnapshotSenderLog("SYN FATAL ", DEBUG_FATAL, 255, pSender, __VA_ARGS__); }
#define sSError(pSender, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintSnapshotSenderLog("SYN ERROR ", DEBUG_ERROR, 255, pSender, __VA_ARGS__); }
@@ -85,7 +85,8 @@ void syncUtilMsgHtoN(void* msg);
void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf);
-void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...);
+void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, bool formatTime, SSyncNode* pNode,
+ const char* format, ...);
void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotSender* pSender,
const char* format, ...);
void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver,
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index 0345880874..9fc39ec463 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -104,6 +104,11 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
goto _IGNORE;
}
+ int32_t nRef = atomic_add_fetch_32(&ths->recvCount, 1);
+ if (nRef <= 0) {
+ sError("vgId:%d, recv count is %d", ths->vgId, nRef);
+ }
+
int32_t code = syncBuildAppendEntriesReply(&rpcRsp, ths->vgId);
if (code != 0) {
syncLogRecvAppendEntries(ths, pMsg, "build rsp error");
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 4946912941..ec7354040f 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -155,6 +155,18 @@ void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, i
DID(pRaftId), CID(pRaftId));
}
+void syncIndexMgrSetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t sentTime) {
+ for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
+ if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
+ (pIndexMgr->sentTimeArr)[i] = sentTime;
+ return;
+ }
+ }
+
+ sError("vgId:%d, indexmgr set sent-time:%" PRId64 " for dnode:%d cluster:%d failed", pIndexMgr->pNode->vgId, sentTime,
+ DID(pRaftId), CID(pRaftId));
+}
+
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) {
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
@@ -168,6 +180,19 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId
return TSDB_CODE_SYN_INVALID_ID;
}
+int64_t syncIndexMgrGetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) {
+ for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
+ if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
+ int64_t recvTime = (pIndexMgr->sentTimeArr)[i];
+ return recvTime;
+ }
+ }
+
+ sError("vgId:%d, indexmgr get sent-time from dnode:%d cluster:%d failed", pIndexMgr->pNode->vgId, DID(pRaftId),
+ CID(pRaftId));
+ return TSDB_CODE_SYN_INVALID_ID;
+}
+
void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term) {
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c
index 247b5624c3..7466aaf66e 100644
--- a/source/libs/sync/src/syncReplication.c
+++ b/source/libs/sync/src/syncReplication.c
@@ -88,11 +88,33 @@ int32_t syncNodeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftI
pMsg->destId = *destRaftId;
TAOS_CHECK_RETURN(syncNodeSendMsgById(destRaftId, pSyncNode, pRpcMsg));
+ int32_t nRef = 0;
+ if (pSyncNode != NULL) {
+ nRef = atomic_add_fetch_32(&pSyncNode->sendCount, 1);
+ if (nRef <= 0) {
+ sError("vgId:%d, send count is %d", pSyncNode->vgId, nRef);
+ }
+ }
+
+ SSyncLogReplMgr* mgr = syncNodeGetLogReplMgr(pSyncNode, (SRaftId*)destRaftId);
+ if (mgr != NULL) {
+ nRef = atomic_add_fetch_32(&mgr->sendCount, 1);
+ if (nRef <= 0) {
+ sError("vgId:%d, send count is %d", pSyncNode->vgId, nRef);
+ }
+ }
+
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
int32_t syncNodeSendHeartbeat(SSyncNode* pSyncNode, const SRaftId* destId, SRpcMsg* pMsg) {
- return syncNodeSendMsgById(destId, pSyncNode, pMsg);
+ SRaftId destIdTmp = *destId;
+ TAOS_CHECK_RETURN(syncNodeSendMsgById(destId, pSyncNode, pMsg));
+
+ int64_t tsMs = taosGetTimestampMs();
+ syncIndexMgrSetSentTime(pSyncNode->pMatchIndex, &destIdTmp, tsMs);
+
+ return TSDB_CODE_SUCCESS;
}
int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) {
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 78fda6b093..a89667ad3d 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -429,6 +429,12 @@ int32_t snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId, SSyncSnapsh
pReceiver->startTime = 0;
pReceiver->ack = SYNC_SNAPSHOT_SEQ_BEGIN;
pReceiver->pWriter = NULL;
+ code = taosThreadMutexInit(&pReceiver->writerMutex, NULL);
+ if (code != 0) {
+ taosMemoryFree(pReceiver);
+ pReceiver = NULL;
+ TAOS_RETURN(code);
+ }
pReceiver->pSyncNode = pSyncNode;
pReceiver->fromId = fromId;
pReceiver->term = raftStoreGetTerm(pSyncNode);
@@ -440,6 +446,10 @@ int32_t snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId, SSyncSnapsh
SSyncSnapBuffer *pRcvBuf = NULL;
code = syncSnapBufferCreate(&pRcvBuf);
if (pRcvBuf == NULL) {
+ int32_t ret = taosThreadMutexDestroy(&pReceiver->writerMutex);
+ if (ret != 0) {
+ sError("failed to destroy mutex since %s", tstrerror(ret));
+ }
taosMemoryFree(pReceiver);
pReceiver = NULL;
TAOS_RETURN(code);
@@ -468,6 +478,7 @@ static int32_t snapshotReceiverClearInfoData(SSyncSnapshotReceiver *pReceiver) {
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {
if (pReceiver == NULL) return;
+ (void)taosThreadMutexLock(&pReceiver->writerMutex);
// close writer
if (pReceiver->pWriter != NULL) {
int32_t code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter,
@@ -478,6 +489,9 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {
}
pReceiver->pWriter = NULL;
}
+ (void)taosThreadMutexUnlock(&pReceiver->writerMutex);
+
+ (void)taosThreadMutexDestroy(&pReceiver->writerMutex);
// free snap buf
if (pReceiver->pRcvBuf) {
@@ -556,7 +570,8 @@ void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
int8_t stopped = !atomic_val_compare_exchange_8(&pReceiver->start, true, false);
if (stopped) return;
- (void)taosThreadMutexLock(&pReceiver->pRcvBuf->mutex);
+
+ (void)taosThreadMutexLock(&pReceiver->writerMutex);
{
if (pReceiver->pWriter != NULL) {
int32_t code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter,
@@ -568,7 +583,11 @@ void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
} else {
sRInfo(pReceiver, "snapshot receiver stop, writer is null");
}
+ }
+ (void)taosThreadMutexUnlock(&pReceiver->writerMutex);
+ (void)taosThreadMutexLock(&pReceiver->pRcvBuf->mutex);
+ {
syncSnapBufferReset(pReceiver->pRcvBuf);
(void)snapshotReceiverClearInfoData(pReceiver);
@@ -600,15 +619,19 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
raftStoreSetTerm(pReceiver->pSyncNode, pReceiver->snapshot.lastApplyTerm);
}
- // stop writer, apply data
- code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true,
- &pReceiver->snapshot);
- if (code != 0) {
- sRError(pReceiver, "snapshot receiver apply failed since %s", tstrerror(code));
- TAOS_RETURN(code);
+ (void)taosThreadMutexLock(&pReceiver->writerMutex);
+ if (pReceiver->pWriter != NULL) {
+ // stop writer, apply data
+ code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true,
+ &pReceiver->snapshot);
+ if (code != 0) {
+ sRError(pReceiver, "snapshot receiver apply failed since %s", tstrerror(code));
+ TAOS_RETURN(code);
+ }
+ pReceiver->pWriter = NULL;
+ sRInfo(pReceiver, "snapshot receiver write stopped");
}
- pReceiver->pWriter = NULL;
- sRInfo(pReceiver, "snapshot receiver write stopped");
+ (void)taosThreadMutexUnlock(&pReceiver->writerMutex);
// update progress
pReceiver->ack = SYNC_SNAPSHOT_SEQ_END;
diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c
index 9058b6ecef..65c7f9761e 100644
--- a/source/libs/sync/src/syncUtil.c
+++ b/source/libs/sync/src/syncUtil.c
@@ -22,6 +22,7 @@
#include "syncRaftStore.h"
#include "syncSnapshot.h"
#include "tglobal.h"
+#include "ttime.h"
static void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) {
int32_t len = tsnprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex);
@@ -108,13 +109,40 @@ void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf) {
(void)snprintf(buf, TSDB_ARB_TOKEN_SIZE, "d%d#g%d#%" PRId64 "#%d", nodeId, groupId, currentMs, randVal);
}
+static void syncPrintTime(bool formatTime, int32_t* len, int64_t tsMs, int32_t i, char* buf, int32_t bufLen) {
+ if (formatTime) {
+ char pBuf[TD_TIME_STR_LEN] = {0};
+ if (tsMs > 0) {
+ if (taosFormatUtcTime(pBuf, TD_TIME_STR_LEN, tsMs, TSDB_TIME_PRECISION_MILLI) != 0) {
+ pBuf[0] = '\0';
+ }
+ }
+ (*len) += tsnprintf(buf + (*len), bufLen - (*len), "%d:%s", i, pBuf);
+ } else {
+ (*len) += tsnprintf(buf + (*len), bufLen - (*len), "%d:%" PRId64, i, tsMs);
+ }
+}
+
// for leader
-static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
+static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) {
int32_t len = 0;
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i]));
- len += tsnprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs);
+ syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen);
+ if (i < pSyncNode->replicaNum - 1) {
+ len += tsnprintf(buf + len, bufLen - len, "%s", ",");
+ }
+ }
+ len += tsnprintf(buf + len, bufLen - len, "%s", "}");
+}
+
+static void syncSentHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) {
+ int32_t len = 0;
+ len += tsnprintf(buf + len, bufLen - len, "%s", "{");
+ for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
+ int64_t tsMs = syncIndexMgrGetSentTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i]));
+ syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen);
if (i < pSyncNode->replicaNum - 1) {
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
}
@@ -123,12 +151,12 @@ static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t b
}
// for follower
-static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
+static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) {
int32_t len = 0;
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->replicasId[i]));
- len += tsnprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs);
+ syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen);
if (i < pSyncNode->replicaNum - 1) {
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
}
@@ -152,8 +180,9 @@ static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLe
for (int32_t i = 0; i < pSyncNode->replicaNum; i++) {
SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i];
if (pMgr == NULL) break;
- len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "]", i, pMgr->restored,
+ len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] ", i, pMgr->restored,
pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex);
+ len += tsnprintf(buf + len, bufLen - len, "%d", pMgr->sendCount);
if (i + 1 < pSyncNode->replicaNum) {
len += tsnprintf(buf + len, bufLen - len, "%s", ", ");
}
@@ -173,7 +202,8 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
len += tsnprintf(buf + len, bufLen - len, "%s", "}");
}
-void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) {
+void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, bool formatTime, SSyncNode* pNode,
+ const char* format, ...) {
if (pNode == NULL || pNode->pLogStore == NULL) return;
int64_t currentTerm = raftStoreGetTerm(pNode);
@@ -205,10 +235,13 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
syncLogBufferStates2Str(pNode, bufferStatesStr, sizeof(bufferStatesStr));
char hbrTimeStr[256] = "";
- syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr));
+ syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr), formatTime);
char hbTimeStr[256] = "";
- syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr));
+ syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr), formatTime);
+
+ char sentHbTimeStr[512] = "";
+ syncSentHearbeatTime2Str(pNode, sentHbTimeStr, sizeof(sentHbTimeStr), formatTime);
char eventLog[512]; // {0};
va_list argpointer;
@@ -234,14 +267,15 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
", elect-times:%d, as-leader-times:%d, as-assigned-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, "
"aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64
", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64
- ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s, arb-token:%s",
+ ", buffer:%s, repl-mgrs:%s, members:%s, send hb:%s, recv hb:%s, recv hb-reply:%s, arb-token:%s, msg[sent:%d, recv:%d, slow-recev:%d]",
pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, pNode->assignedCommitIndex,
appliedIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex,
snapshot.lastApplyTerm, pNode->electNum, pNode->becomeLeaderNum, pNode->becomeAssignedLeaderNum,
pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex,
pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish,
syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, bufferStatesStr,
- replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr, pNode->arbToken);
+ replMgrStatesStr, cfgStr, sentHbTimeStr, hbTimeStr, hbrTimeStr, pNode->arbToken, pNode->sendCount, pNode->recvCount,
+ pNode->slowCount);
}
}
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index 5c79b379ed..2ba88cdcc6 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -96,7 +96,7 @@ typedef void* queue[2];
// #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit
// #define TRANS_RETRY_INTERVAL 15 // retry interval (ms)
-#define TRANS_CONN_TIMEOUT 3000 // connect timeout (ms)
+#define TRANS_CONN_TIMEOUT 5000 // connect timeout (ms)
#define TRANS_READ_TIMEOUT 3000 // read timeout (ms)
#define TRANS_PACKET_LIMIT 1024 * 1024 * 512
@@ -452,6 +452,7 @@ void transPrintEpSet(SEpSet* pEpSet);
void transFreeMsg(void* msg);
int32_t transCompressMsg(char* msg, int32_t len);
int32_t transDecompressMsg(char** msg, int32_t* len);
+int32_t transDecompressMsgExt(char const* msg, int32_t len, char** out, int32_t* outLen);
int32_t transOpenRefMgt(int size, void (*func)(void*));
void transCloseRefMgt(int32_t refMgt);
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 2aeffc6395..8377a1456d 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -254,6 +254,7 @@ static FORCE_INLINE void cliMayUpdateFqdnCache(SHashObj* cache, char* dst);
// process data read from server, add decompress etc later
// handle except about conn
+#define REQS_ON_CONN(conn) (conn ? (transQueueSize(&conn->reqsToSend) + transQueueSize(&conn->reqsSentOut)) : 0)
static void doNotifyCb(SCliReq* pReq, SCliThrd* pThrd, int32_t code);
// handle req from app
static void cliHandleReq(SCliThrd* pThrd, SCliReq* pReq);
@@ -289,7 +290,7 @@ int32_t cliMayGetStateByQid(SCliThrd* pThrd, SCliReq* pReq, SCliConn** pConn);
static SCliConn* getConnFromHeapCache(SHashObj* pConnHeapCache, char* key);
static int32_t addConnToHeapCache(SHashObj* pConnHeapCacahe, SCliConn* pConn);
static int32_t delConnFromHeapCache(SHashObj* pConnHeapCache, SCliConn* pConn);
-static int32_t balanceConnHeapCache(SHashObj* pConnHeapCache, SCliConn* pConn);
+static int8_t balanceConnHeapCache(SHashObj* pConnHeapCache, SCliConn* pConn, SCliConn** pNewConn);
// thread obj
static int32_t createThrdObj(void* trans, SCliThrd** pThrd);
@@ -327,14 +328,18 @@ typedef struct {
int64_t lastConnFailTs;
} SHeap;
-int32_t compareHeapNode(const HeapNode* a, const HeapNode* b);
-int32_t transHeapInit(SHeap* heap, int32_t (*cmpFunc)(const HeapNode* a, const HeapNode* b));
-void transHeapDestroy(SHeap* heap);
-int32_t transHeapGet(SHeap* heap, SCliConn** p);
-int32_t transHeapInsert(SHeap* heap, SCliConn* p);
-int32_t transHeapDelete(SHeap* heap, SCliConn* p);
-int32_t transHeapBalance(SHeap* heap, SCliConn* p);
-int32_t transHeapUpdateFailTs(SHeap* heap, SCliConn* p);
+static int32_t compareHeapNode(const HeapNode* a, const HeapNode* b);
+static int32_t transHeapInit(SHeap* heap, int32_t (*cmpFunc)(const HeapNode* a, const HeapNode* b));
+static void transHeapDestroy(SHeap* heap);
+
+static int32_t transHeapGet(SHeap* heap, SCliConn** p);
+static int32_t transHeapInsert(SHeap* heap, SCliConn* p);
+static int32_t transHeapDelete(SHeap* heap, SCliConn* p);
+static int32_t transHeapBalance(SHeap* heap, SCliConn* p);
+static int32_t transHeapUpdateFailTs(SHeap* heap, SCliConn* p);
+static int32_t transHeapMayBalance(SHeap* heap, SCliConn* p);
+
+static FORCE_INLINE void logConnMissHit(SCliConn* pConn);
#define CLI_RELEASE_UV(loop) \
do { \
@@ -494,15 +499,19 @@ int8_t cliMayRecycleConn(SCliConn* conn) {
if (transQueueSize(&conn->reqsToSend) == 0 && transQueueSize(&conn->reqsSentOut) == 0 &&
taosHashGetSize(conn->pQTable) == 0) {
cliResetConnTimer(conn);
+ conn->forceDelFromHeap = 1;
code = delConnFromHeapCache(pThrd->connHeapCache, conn);
if (code == TSDB_CODE_RPC_ASYNC_IN_PROCESS) {
tDebug("%s conn %p failed to remove conn from heap cache since %s", CONN_GET_INST_LABEL(conn), conn,
tstrerror(code));
+
+ TAOS_UNUSED(transHeapMayBalance(conn->heap, conn));
return 1;
} else {
if (code != 0) {
tDebug("%s conn %p failed to remove conn from heap cache since %s", CONN_GET_INST_LABEL(conn), conn,
tstrerror(code));
+ return 0;
}
}
addConnToPool(pThrd->pool, conn);
@@ -510,31 +519,10 @@ int8_t cliMayRecycleConn(SCliConn* conn) {
} else if ((transQueueSize(&conn->reqsToSend) == 0) && (transQueueSize(&conn->reqsSentOut) == 0) &&
(taosHashGetSize(conn->pQTable) != 0)) {
tDebug("%s conn %p do balance directly", CONN_GET_INST_LABEL(conn), conn);
- TAOS_UNUSED(transHeapBalance(conn->heap, conn));
+ TAOS_UNUSED(transHeapMayBalance(conn->heap, conn));
} else {
- SCliConn* topConn = NULL;
- if (conn->heap != NULL) {
- code = transHeapGet(conn->heap, &topConn);
- if (code != 0) {
- tDebug("%s conn %p failed to get top conn since %s", CONN_GET_INST_LABEL(conn), conn, tstrerror(code));
- return 0;
- }
-
- if (topConn == conn) {
- return 0;
- }
- int32_t topReqs = transQueueSize(&topConn->reqsSentOut) + transQueueSize(&topConn->reqsToSend);
- int32_t currReqs = transQueueSize(&conn->reqsSentOut) + transQueueSize(&conn->reqsToSend);
- if (topReqs <= currReqs) {
- tTrace("%s conn %p not balance conn heap since top conn has less req, topConnReqs:%d, currConnReqs:%d",
- CONN_GET_INST_LABEL(conn), conn, topReqs, currReqs);
- return 0;
- } else {
- tDebug("%s conn %p do balance conn heap since top conn has more reqs, topConnReqs:%d, currConnReqs:%d",
- CONN_GET_INST_LABEL(conn), conn, topReqs, currReqs);
- TAOS_UNUSED(transHeapBalance(conn->heap, conn));
- }
- }
+ tTrace("%s conn %p may do balance", CONN_GET_INST_LABEL(conn), conn);
+ TAOS_UNUSED(transHeapMayBalance(conn->heap, conn));
}
return 0;
}
@@ -737,7 +725,8 @@ void cliConnTimeout(uv_timer_t* handle) {
return;
}
- tTrace("%s conn %p conn timeout", CONN_GET_INST_LABEL(conn), conn);
+ cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, conn->dstAddr);
+ tTrace("%s conn %p failed to connect %s since conn timeout", CONN_GET_INST_LABEL(conn), conn, conn->dstAddr);
TAOS_UNUSED(transUnrefCliHandle(conn));
}
@@ -785,15 +774,8 @@ void cliConnCheckTimoutMsg(SCliConn* conn) {
if (transQueueSize(&conn->reqsSentOut) == 0) {
return;
}
- code = cliConnRemoveTimeoutMsg(conn);
- if (code != 0) {
- tDebug("%s conn %p do remove timeout msg", CONN_GET_INST_LABEL(conn), conn);
- if (!cliMayRecycleConn(conn)) {
- TAOS_UNUSED(transHeapBalance(conn->heap, conn));
- }
- } else {
- TAOS_UNUSED(cliMayRecycleConn(conn));
- }
+ TAOS_UNUSED(cliConnRemoveTimeoutMsg(conn));
+ TAOS_UNUSED(cliMayRecycleConn(conn));
}
void cliConnTimeout__checkReq(uv_timer_t* handle) {
SCliConn* conn = handle->data;
@@ -1353,13 +1335,31 @@ static void cliBatchSendCb(uv_write_t* req, int status) {
}
}
bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msgLen) {
+ int32_t code = 0;
SCliThrd* pThrd = pConn->hostThrd;
STrans* pInst = pThrd->pInst;
if (pConn->userInited == 1) {
return false;
}
STransMsgHead* pHead = *ppHead;
- STransMsgHead* tHead = taosMemoryCalloc(1, *msgLen + sizeof(pInst->user));
+ int32_t len = *msgLen;
+ char* oriMsg = NULL;
+ int32_t oriLen = 0;
+
+ if (pHead->comp == 1) {
+ int32_t msgLen = htonl(pHead->msgLen);
+ code = transDecompressMsgExt((char*)(pHead), msgLen, &oriMsg, &oriLen);
+ if (code < 0) {
+ tError("failed to decompress since %s", tstrerror(code));
+ return false;
+ } else {
+ tDebug("decompress msg and resent, compress size %d, raw size %d", msgLen, oriLen);
+ }
+
+ pHead = (STransMsgHead*)oriMsg;
+ len = oriLen;
+ }
+ STransMsgHead* tHead = taosMemoryCalloc(1, len + sizeof(pInst->user));
if (tHead == NULL) {
return false;
}
@@ -1367,14 +1367,17 @@ bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msg
memcpy((char*)tHead + TRANS_MSG_OVERHEAD, pInst->user, sizeof(pInst->user));
memcpy((char*)tHead + TRANS_MSG_OVERHEAD + sizeof(pInst->user), (char*)pHead + TRANS_MSG_OVERHEAD,
- *msgLen - TRANS_MSG_OVERHEAD);
+ len - TRANS_MSG_OVERHEAD);
tHead->withUserInfo = 1;
*ppHead = tHead;
- *msgLen += sizeof(pInst->user);
+ *msgLen = len + sizeof(pInst->user);
pConn->pInitUserReq = tHead;
pConn->userInited = 1;
+ if (oriMsg != NULL) {
+ taosMemoryFree(oriMsg);
+ }
return true;
}
int32_t cliBatchSend(SCliConn* pConn, int8_t direct) {
@@ -1440,9 +1443,8 @@ int32_t cliBatchSend(SCliConn* pConn, int8_t direct) {
pReq->contLen = 0;
}
- int32_t msgLen = transMsgLenFromCont(pReq->contLen);
-
STransMsgHead* pHead = transHeadFromCont(pReq->pCont);
+ int32_t msgLen = transMsgLenFromCont(pReq->contLen);
char* content = pReq->pCont;
int32_t contLen = pReq->contLen;
@@ -1698,7 +1700,7 @@ void cliConnCb(uv_connect_t* req, int status) {
STUB_RAND_NETWORK_ERR(status);
if (status != 0) {
- tDebug("%s conn %p failed to connect to %s since %s", CONN_GET_INST_LABEL(pConn), pConn, pConn->dstAddr,
+ tError("%s conn %p failed to connect to %s since %s", CONN_GET_INST_LABEL(pConn), pConn, pConn->dstAddr,
uv_strerror(status));
cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, pConn->dstAddr);
TAOS_UNUSED(transUnrefCliHandle(pConn));
@@ -1851,15 +1853,20 @@ static FORCE_INLINE int32_t cliUpdateFqdnCache(SHashObj* cache, char* fqdn) {
if (code == 0) {
size_t len = strlen(fqdn);
uint32_t* v = taosHashGet(cache, fqdn, len);
- if (addr != *v) {
- char old[TSDB_FQDN_LEN] = {0}, new[TSDB_FQDN_LEN] = {0};
- tinet_ntoa(old, *v);
- tinet_ntoa(new, addr);
- tWarn("update ip of fqdn:%s, old: %s, new: %s", fqdn, old, new);
- code = taosHashPut(cache, fqdn, strlen(fqdn), &addr, sizeof(addr));
+ if (v != NULL) {
+ if (addr != *v) {
+ char old[TSDB_FQDN_LEN] = {0}, new[TSDB_FQDN_LEN] = {0};
+ tinet_ntoa(old, *v);
+ tinet_ntoa(new, addr);
+ tWarn("update ip of fqdn:%s, old: %s, new: %s", fqdn, old, new);
+ code = taosHashPut(cache, fqdn, len, &addr, sizeof(addr));
+ }
+ } else {
+ code = taosHashPut(cache, fqdn, len, &addr, sizeof(addr));
}
} else {
code = TSDB_CODE_RPC_FQDN_ERROR; // TSDB_CODE_RPC_INVALID_FQDN;
+ tWarn("failed to get ip from fqdn:%s since %s", fqdn, tstrerror(code));
}
return code;
}
@@ -2952,10 +2959,8 @@ void cliMayResetRespCode(SCliReq* pReq, STransMsg* pResp) {
// check whole vnodes is offline on this vgroup
if (((pCtx->epSet != NULL) && pCtx->epsetRetryCnt >= pCtx->epSet->numOfEps) || pCtx->retryStep > 0) {
- if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
- pResp->code = TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED;
- } else if (pResp->code == TSDB_CODE_RPC_BROKEN_LINK) {
- pResp->code = TSDB_CODE_RPC_SOMENODE_BROKEN_LINK;
+ if (pResp->code == TSDB_CODE_RPC_BROKEN_LINK) {
+ pResp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; // TSDB_CODE_RPC_SOMENODE_BROKEN_LINK;
}
}
}
@@ -3804,6 +3809,8 @@ static FORCE_INLINE int8_t shouldSWitchToOtherConn(SCliConn* pConn, char* key) {
int32_t totalReqs = reqsNum + reqsSentOut;
if (totalReqs >= pInst->shareConnLimit) {
+ logConnMissHit(pConn);
+
if (pConn->list == NULL && pConn->dstAddr != NULL) {
pConn->list = taosHashGet((SHashObj*)pThrd->pool, pConn->dstAddr, strlen(pConn->dstAddr));
if (pConn->list != NULL) {
@@ -3860,11 +3867,12 @@ static SCliConn* getConnFromHeapCache(SHashObj* pConnHeapCache, char* key) {
} else {
tTrace("conn %p get conn from heap cache for key:%s", pConn, key);
if (shouldSWitchToOtherConn(pConn, key)) {
- code = balanceConnHeapCache(pConnHeapCache, pConn);
- if (code != 0) {
- tTrace("failed to balance conn heap cache for key:%s", key);
+ SCliConn* pNewConn = NULL;
+ code = balanceConnHeapCache(pConnHeapCache, pConn, &pNewConn);
+ if (code == 1) {
+ tTrace("conn %p start to handle reqs", pNewConn);
+ return pNewConn;
}
- logConnMissHit(pConn);
return NULL;
}
}
@@ -3916,15 +3924,19 @@ static int32_t delConnFromHeapCache(SHashObj* pConnHeapCache, SCliConn* pConn) {
return code;
}
-static int32_t balanceConnHeapCache(SHashObj* pConnHeapCache, SCliConn* pConn) {
+static int8_t balanceConnHeapCache(SHashObj* pConnHeapCache, SCliConn* pConn, SCliConn** pNewConn) {
+ SCliThrd* pThrd = pConn->hostThrd;
+ STrans* pInst = pThrd->pInst;
+ SCliConn* pTopConn = NULL;
if (pConn->heap != NULL && pConn->inHeap != 0) {
- SHeap* heap = pConn->heap;
- tTrace("conn %p'heap may should do balance, numOfConn:%d", pConn, (int)(heap->heap->nelts));
- int64_t now = taosGetTimestampMs();
- if (((now - heap->lastUpdateTs) / 1000) > 30) {
- heap->lastUpdateTs = now;
- tTrace("conn %p'heap do balance, numOfConn:%d", pConn, (int)(heap->heap->nelts));
- return transHeapBalance(pConn->heap, pConn);
+ TAOS_UNUSED(transHeapBalance(pConn->heap, pConn));
+ if (transHeapGet(pConn->heap, &pTopConn) == 0 && pConn != pTopConn) {
+ int32_t curReqs = REQS_ON_CONN(pConn);
+ int32_t topReqs = REQS_ON_CONN(pTopConn);
+ if (curReqs > topReqs && topReqs < pInst->shareConnLimit) {
+ *pNewConn = pTopConn;
+ return 1;
+ }
}
}
return 0;
@@ -3934,8 +3946,8 @@ int32_t compareHeapNode(const HeapNode* a, const HeapNode* b) {
SCliConn* args1 = container_of(a, SCliConn, node);
SCliConn* args2 = container_of(b, SCliConn, node);
- int32_t totalReq1 = transQueueSize(&args1->reqsToSend) + transQueueSize(&args1->reqsSentOut);
- int32_t totalReq2 = transQueueSize(&args2->reqsToSend) + transQueueSize(&args2->reqsSentOut);
+ int32_t totalReq1 = REQS_ON_CONN(args1);
+ int32_t totalReq2 = REQS_ON_CONN(args2);
if (totalReq1 > totalReq2) {
return 0;
}
@@ -4016,6 +4028,30 @@ int32_t transHeapUpdateFailTs(SHeap* heap, SCliConn* p) {
heap->lastConnFailTs = taosGetTimestampMs();
return 0;
}
+int32_t transHeapMayBalance(SHeap* heap, SCliConn* p) {
+ if (p->inHeap == 0 || heap == NULL || heap->heap == NULL) {
+ return 0;
+ }
+ SCliThrd* pThrd = p->hostThrd;
+ STrans* pInst = pThrd->pInst;
+ int32_t balanceLimit = pInst->shareConnLimit >= 4 ? pInst->shareConnLimit / 2 : 2;
+
+ SCliConn* topConn = NULL;
+ int32_t code = transHeapGet(heap, &topConn);
+ if (code != 0) {
+ return code;
+ }
+
+ if (topConn == p) return code;
+
+ int32_t reqsOnTop = REQS_ON_CONN(topConn);
+ int32_t reqsOnCur = REQS_ON_CONN(p);
+
+ if (reqsOnTop >= balanceLimit && reqsOnCur < balanceLimit) {
+ TAOS_UNUSED(transHeapBalance(heap, p));
+ }
+ return code;
+}
int32_t transHeapBalance(SHeap* heap, SCliConn* p) {
if (p->inHeap == 0 || heap == NULL || heap->heap == NULL) {
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index 66bd4a08f3..c0edcd54e4 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -77,6 +77,11 @@ int32_t transDecompressMsg(char** msg, int32_t* len) {
STransMsgHead* pNewHead = (STransMsgHead*)buf;
int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content,
tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen);
+
+ if (decompLen != oriLen) {
+ taosMemoryFree(buf);
+ return TSDB_CODE_INVALID_MSG;
+ }
memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead));
*len = oriLen + sizeof(STransMsgHead);
@@ -84,9 +89,36 @@ int32_t transDecompressMsg(char** msg, int32_t* len) {
taosMemoryFree(pHead);
*msg = buf;
+ return 0;
+}
+int32_t transDecompressMsgExt(char const* msg, int32_t len, char** out, int32_t* outLen) {
+ STransMsgHead* pHead = (STransMsgHead*)msg;
+ char* pCont = transContFromHead(pHead);
+
+ STransCompMsg* pComp = (STransCompMsg*)pCont;
+ int32_t oriLen = htonl(pComp->contLen);
+
+ int32_t tlen = len;
+ char* buf = taosMemoryCalloc(1, oriLen + sizeof(STransMsgHead));
+ if (buf == NULL) {
+ return terrno;
+ }
+
+ STransMsgHead* pNewHead = (STransMsgHead*)buf;
+ int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content,
+ tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen);
if (decompLen != oriLen) {
+ tError("msgLen:%d, originLen:%d, decompLen:%d", len, oriLen, decompLen);
+ taosMemoryFree(buf);
return TSDB_CODE_INVALID_MSG;
}
+ memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead));
+
+ *out = buf;
+ *outLen = oriLen + sizeof(STransMsgHead);
+ pNewHead->msgLen = *outLen;
+ pNewHead->comp = 0;
+
return 0;
}
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index 6d52c8d6cb..ce2b9218b5 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -415,18 +415,15 @@ static void printFileSet(int32_t vgId, SArray* fileSet, const char* str) {
int32_t sz = taosArrayGetSize(fileSet);
for (int32_t i = 0; i < sz; i++) {
SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i);
- wInfo("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64
- ", createTs:%" PRId64 ", closeTs:%" PRId64,
- vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset,
- pFileInfo->createTs, pFileInfo->closeTs);
+ wTrace("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64
+ ", createTs:%" PRId64 ", closeTs:%" PRId64,
+ vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset,
+ pFileInfo->createTs, pFileInfo->closeTs);
}
}
int32_t walCheckAndRepairMeta(SWal* pWal) {
// load log files, get first/snapshot/last version info
- if (pWal->cfg.level == TAOS_WAL_SKIP) {
- return TSDB_CODE_SUCCESS;
- }
int32_t code = 0;
const char* logPattern = "^[0-9]+.log$";
const char* idxPattern = "^[0-9]+.idx$";
diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c
index d8a58efe4e..e2c5e66158 100644
--- a/source/libs/wal/src/walMgmt.c
+++ b/source/libs/wal/src/walMgmt.c
@@ -90,6 +90,45 @@ static int32_t walInitLock(SWal *pWal) {
return 0;
}
+int32_t walInitWriteFileForSkip(SWal *pWal) {
+ TdFilePtr pIdxTFile = NULL, pLogTFile = NULL;
+ int64_t fileFirstVer = 0;
+ int32_t code = 0;
+
+ char fnameStr[WAL_FILE_LEN];
+ walBuildIdxName(pWal, fileFirstVer, fnameStr);
+ pIdxTFile = taosOpenFile(fnameStr, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
+ if (pIdxTFile == NULL) {
+ wError("vgId:%d, failed to open file since %s", pWal->cfg.vgId, tstrerror(terrno));
+ code = terrno;
+ goto _exit;
+ }
+ walBuildLogName(pWal, fileFirstVer, fnameStr);
+ pLogTFile = taosOpenFile(fnameStr, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
+ if (pLogTFile == NULL) {
+ wError("vgId:%d, failed to open file since %s", pWal->cfg.vgId, tstrerror(terrno));
+ code = terrno;
+ goto _exit;
+ }
+ // switch file
+ pWal->pIdxFile = pIdxTFile;
+ pWal->pLogFile = pLogTFile;
+ SWalFileInfo fileInfo;
+ (void)memset(&fileInfo, -1, sizeof(SWalFileInfo));
+ if (!taosArrayPush(pWal->fileInfoSet, &fileInfo)) {
+ wError("vgId:%d, failed to push fileInfo into array since %s", pWal->cfg.vgId, tstrerror(terrno));
+ code = terrno;
+ goto _exit;
+ }
+ pWal->writeCur = 0;
+_exit:
+ if (code != TSDB_CODE_SUCCESS) {
+ (void)taosCloseFile(&pIdxTFile);
+ (void)taosCloseFile(&pLogTFile);
+ }
+ TAOS_RETURN(code);
+}
+
SWal *walOpen(const char *path, SWalCfg *pCfg) {
int32_t code = 0;
SWal *pWal = taosMemoryCalloc(1, sizeof(SWal));
@@ -165,17 +204,25 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
if (code < 0) {
wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code));
}
+ if (pWal->cfg.level != TAOS_WAL_SKIP) {
+ code = walCheckAndRepairMeta(pWal);
+ if (code < 0) {
+ wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code));
+ goto _err;
+ }
- code = walCheckAndRepairMeta(pWal);
- if (code < 0) {
- wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code));
- goto _err;
- }
-
- code = walCheckAndRepairIdx(pWal);
- if (code < 0) {
- wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code));
- goto _err;
+ code = walCheckAndRepairIdx(pWal);
+ if (code < 0) {
+ wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code));
+ goto _err;
+ }
+ } else {
+ code = walInitWriteFileForSkip(pWal);
+ if (code < 0) {
+ wError("vgId:%d, cannot open wal since init write file for wal_level = 0 failed since %s", pWal->cfg.vgId,
+ tstrerror(code));
+ goto _err;
+ }
}
// add ref
@@ -217,6 +264,14 @@ int32_t walAlter(SWal *pWal, SWalCfg *pCfg) {
pWal->cfg.vgId, pWal->cfg.level, pWal->cfg.fsyncPeriod, pWal->cfg.retentionPeriod, pWal->cfg.retentionSize,
pCfg->level, pCfg->fsyncPeriod, pCfg->retentionPeriod, pCfg->retentionSize);
+ if (pWal->cfg.level == TAOS_WAL_SKIP && pCfg->level != TAOS_WAL_SKIP) {
+ wInfo("vgId:%d, remove all wals, path:%s", pWal->cfg.vgId, pWal->path);
+ taosRemoveDir(pWal->path);
+ if (taosMkDir(pWal->path) != 0) {
+ wError("vgId:%d, path:%s, failed to create directory since %s", pWal->cfg.vgId, pWal->path, tstrerror(terrno));
+ }
+ }
+
pWal->cfg.level = pCfg->level;
pWal->cfg.fsyncPeriod = pCfg->fsyncPeriod;
pWal->cfg.retentionPeriod = pCfg->retentionPeriod;
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index 610adfb0e1..da5e1f47e9 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -89,6 +89,8 @@ int32_t walNextValidMsg(SWalReader *pReader) {
if (type == TDMT_VND_SUBMIT || ((type == TDMT_VND_DELETE) && (pReader->cond.deleteMsg == 1)) ||
(IS_META_MSG(type) && pReader->cond.scanMeta)) {
TAOS_RETURN(walFetchBody(pReader));
+ } else if (type == TDMT_VND_DROP_TABLE && pReader->cond.scanDropCtb) {
+ TAOS_RETURN(walFetchBody(pReader));
} else {
TAOS_CHECK_RETURN(walSkipFetchBody(pReader));
diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c
index 1a9652b3bb..66ead2fd26 100644
--- a/source/libs/wal/src/walWrite.c
+++ b/source/libs/wal/src/walWrite.c
@@ -376,6 +376,10 @@ static FORCE_INLINE int32_t walCheckAndRoll(SWal *pWal) {
int32_t walBeginSnapshot(SWal *pWal, int64_t ver, int64_t logRetention) {
int32_t code = 0;
+ if (pWal->cfg.level == TAOS_WAL_SKIP) {
+ TAOS_RETURN(TSDB_CODE_SUCCESS);
+ }
+
if (logRetention < 0) {
TAOS_RETURN(TSDB_CODE_FAILED);
}
@@ -404,6 +408,10 @@ _exit:
int32_t walEndSnapshot(SWal *pWal) {
int32_t code = 0, lino = 0;
+ if (pWal->cfg.level == TAOS_WAL_SKIP) {
+ TAOS_RETURN(TSDB_CODE_SUCCESS);
+ }
+
TAOS_UNUSED(taosThreadRwlockWrlock(&pWal->mutex));
int64_t ver = pWal->vers.verInSnapshotting;
diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp
index 3e6fab116f..a958ad74e0 100644
--- a/source/libs/wal/test/walMetaTest.cpp
+++ b/source/libs/wal/test/walMetaTest.cpp
@@ -510,4 +510,27 @@ TEST_F(WalSkipLevel, restart) {
TearDown();
SetUp();
+}
+
+TEST_F(WalSkipLevel, roll) {
+ int code;
+ int i;
+ for (i = 0; i < 100; i++) {
+ code = walAppendLog(pWal, i, 0, syncMeta, (void*)ranStr, ranStrLen);
+ ASSERT_EQ(code, 0);
+ code = walCommit(pWal, i);
+ }
+ walBeginSnapshot(pWal, i - 1, 0);
+ walEndSnapshot(pWal);
+ code = walAppendLog(pWal, 5, 0, syncMeta, (void*)ranStr, ranStrLen);
+ ASSERT_NE(code, 0);
+ for (; i < 200; i++) {
+ code = walAppendLog(pWal, i, 0, syncMeta, (void*)ranStr, ranStrLen);
+ ASSERT_EQ(code, 0);
+ code = walCommit(pWal, i);
+ }
+ code = walBeginSnapshot(pWal, i - 1, 0);
+ ASSERT_EQ(code, 0);
+ code = walEndSnapshot(pWal);
+ ASSERT_EQ(code, 0);
}
\ No newline at end of file
diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c
index 84de563cda..777c6a9216 100644
--- a/source/os/src/osDir.c
+++ b/source/os/src/osDir.c
@@ -116,7 +116,10 @@ void taosRemoveDir(const char *dirname) {
return;
}
-bool taosDirExist(const char *dirname) { return taosCheckExistFile(dirname); }
+bool taosDirExist(const char *dirname) {
+ if (dirname == NULL || strlen(dirname) >= TDDIRMAXLEN) return false;
+ return taosCheckExistFile(dirname);
+}
int32_t taosMkDir(const char *dirname) {
if (taosDirExist(dirname)) return 0;
@@ -333,6 +336,8 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
}
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
+ OS_PARAM_CHECK(dirname);
+ OS_PARAM_CHECK(outname);
wordexp_t full_path;
int32_t code = wordexp(dirname, &full_path, 0);
switch (code) {
@@ -355,6 +360,8 @@ int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
}
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) {
+ OS_PARAM_CHECK(dirname);
+ OS_PARAM_CHECK(realPath);
char tmp[PATH_MAX] = {0};
#ifdef WINDOWS
if (_fullpath(tmp, dirname, maxlen) != NULL) {
@@ -386,6 +393,10 @@ bool taosIsDir(const char *dirname) {
}
char *taosDirName(char *name) {
+ if(name == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
#ifdef WINDOWS
char Drive1[MAX_PATH], Dir1[MAX_PATH];
_splitpath(name, Drive1, Dir1, NULL, NULL);
@@ -412,12 +423,16 @@ char *taosDirName(char *name) {
}
char *taosDirEntryBaseName(char *name) {
+ if(name == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
#ifdef WINDOWS
char Filename1[MAX_PATH], Ext1[MAX_PATH];
_splitpath(name, NULL, NULL, Filename1, Ext1);
return name + (strlen(name) - strlen(Filename1) - strlen(Ext1));
#else
- if (name == NULL || (name[0] == '/' && name[1] == '\0')) return name;
+ if ((name[0] == '/' && name[1] == '\0')) return name;
char *pPoint = strrchr(name, '/');
if (pPoint != NULL) {
if (*(pPoint + 1) == '\0') {
@@ -515,9 +530,9 @@ bool taosDirEntryIsDir(TdDirEntryPtr pDirEntry) {
}
char *taosGetDirEntryName(TdDirEntryPtr pDirEntry) {
- /*if (pDirEntry == NULL) {*/
- /*return NULL;*/
- /*}*/
+ if (pDirEntry == NULL) {
+ return NULL;
+ }
#ifdef WINDOWS
return pDirEntry->findFileData.cFileName;
#else
diff --git a/source/os/src/osEnv.c b/source/os/src/osEnv.c
index 05c9936c2e..41b34a9030 100644
--- a/source/os/src/osEnv.c
+++ b/source/os/src/osEnv.c
@@ -127,8 +127,13 @@ bool osTempSpaceSufficient() { return tsTempSpace.size.avail > tsTempSpace.reser
int32_t osSetTimezone(const char *tz) { return taosSetSystemTimezone(tz, tsTimezoneStr, &tsDaylight, &tsTimezone); }
void osSetSystemLocale(const char *inLocale, const char *inCharSet) {
- (void)memcpy(tsLocale, inLocale, strlen(inLocale) + 1);
- (void)memcpy(tsCharset, inCharSet, strlen(inCharSet) + 1);
+ if (inLocale) (void)memcpy(tsLocale, inLocale, strlen(inLocale) + 1);
+ if (inCharSet) (void)memcpy(tsCharset, inCharSet, strlen(inCharSet) + 1);
}
-void osSetProcPath(int32_t argc, char **argv) { tsProcPath = argv[0]; }
+void osSetProcPath(int32_t argc, char **argv) {
+ if (argv == NULL || argc < 1) {
+ return; // no command line arguments
+ }
+ tsProcPath = argv[0];
+}
diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c
index 2f18c6e697..c2484860ad 100644
--- a/source/os/src/osFile.c
+++ b/source/os/src/osFile.c
@@ -80,6 +80,7 @@ typedef struct TdFile {
#endif
void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, char *dstPath) {
+ if (inputTmpDir == NULL || fileNamePrefix == NULL) return;
#ifdef WINDOWS
char tmpPath[PATH_MAX];
@@ -120,6 +121,10 @@ void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, cha
}
int64_t taosCopyFile(const char *from, const char *to) {
+ if (from == NULL || to == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return -1;
+ }
#ifdef WINDOWS
if (CopyFile(from, to, 0)) {
return 1;
@@ -133,15 +138,15 @@ int64_t taosCopyFile(const char *from, const char *to) {
int64_t bytes;
int32_t code = TSDB_CODE_SUCCESS;
- // fidfrom = open(from, O_RDONLY);
- TdFilePtr pFileFrom = taosOpenFile(from, TD_FILE_READ);
+ TdFilePtr pFileFrom = NULL;
+ TdFilePtr pFileTo = NULL;
+ pFileFrom = taosOpenFile(from, TD_FILE_READ);
if (pFileFrom == NULL) {
code = terrno;
goto _err;
}
- // fidto = open(to, O_WRONLY | O_CREAT | O_EXCL, 0755);
- TdFilePtr pFileTo = taosOpenFile(to, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_EXCL);
+ pFileTo = taosOpenFile(to, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_EXCL);
if (pFileTo == NULL) {
code = terrno;
goto _err;
@@ -193,6 +198,10 @@ _err:
}
TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions) {
+ if(path == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
TdFilePtr fp = taosOpenFile(path, tdFileOptions);
if (!fp) {
if (terrno == TAOS_SYSTEM_ERROR(ENOENT)) {
@@ -213,6 +222,7 @@ TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions) {
}
int32_t taosRemoveFile(const char *path) {
+ OS_PARAM_CHECK(path);
int32_t code = remove(path);
if (-1 == code) {
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -222,6 +232,8 @@ int32_t taosRemoveFile(const char *path) {
}
int32_t taosRenameFile(const char *oldName, const char *newName) {
+ OS_PARAM_CHECK(oldName);
+ OS_PARAM_CHECK(newName);
#ifdef WINDOWS
bool finished = false;
@@ -262,6 +274,7 @@ int32_t taosRenameFile(const char *oldName, const char *newName) {
}
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime) {
+ OS_PARAM_CHECK(path);
#ifdef WINDOWS
struct _stati64 fileStat;
int32_t code = _stati64(path, &fileStat);
@@ -335,6 +348,10 @@ int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) {
}
FILE *taosOpenFileForStream(const char *path, int32_t tdFileOptions) {
+ if (path == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
char *mode = NULL;
if (tdFileOptions & TD_FILE_APPEND) {
mode = (tdFileOptions & TD_FILE_TEXT) ? "at+" : "ab+";
@@ -358,6 +375,10 @@ FILE *taosOpenFileForStream(const char *path, int32_t tdFileOptions) {
#ifdef WINDOWS
HANDLE taosOpenFileNotStream(const char *path, int32_t tdFileOptions) {
+ if (path == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return INVALID_HANDLE_VALUE;
+ }
DWORD openMode = 0;
DWORD access = 0;
DWORD fileFlag = FILE_ATTRIBUTE_NORMAL;
@@ -408,6 +429,10 @@ HANDLE taosOpenFileNotStream(const char *path, int32_t tdFileOptions) {
}
int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) {
+ if (pFile == NULL || buf == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
#if FILE_WITH_LOCK
(void)taosThreadRwlockRdlock(&(pFile->rwlock));
#endif
@@ -435,7 +460,7 @@ int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) {
}
int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) {
- if (pFile == NULL || pFile->hFile == NULL) {
+ if (pFile == NULL || pFile->hFile == NULL || buf == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return 0;
}
@@ -457,7 +482,7 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) {
}
int64_t taosPWriteFile(TdFilePtr pFile, const void *buf, int64_t count, int64_t offset) {
- if (pFile == NULL) {
+ if (pFile == NULL || buf == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return 0;
}
@@ -719,6 +744,10 @@ bool lastErrorIsFileNotExist() {
#else
int taosOpenFileNotStream(const char *path, int32_t tdFileOptions) {
+ if (path == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return -1;
+ }
int access = O_BINARY;
access |= (tdFileOptions & TD_FILE_CREATE) ? O_CREAT : 0;
if ((tdFileOptions & TD_FILE_WRITE) && (tdFileOptions & TD_FILE_READ)) {
@@ -742,6 +771,10 @@ int taosOpenFileNotStream(const char *path, int32_t tdFileOptions) {
}
int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) {
+ if (pFile == NULL || buf == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return -1;
+ }
STUB_RAND_IO_ERR(terrno)
#if FILE_WITH_LOCK
(void)taosThreadRwlockRdlock(&(pFile->rwlock));
@@ -797,7 +830,7 @@ int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) {
int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) {
STUB_RAND_IO_ERR(terrno)
- if (pFile == NULL) {
+ if (pFile == NULL || buf == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return 0;
}
@@ -843,7 +876,7 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) {
int64_t taosPWriteFile(TdFilePtr pFile, const void *buf, int64_t count, int64_t offset) {
STUB_RAND_IO_ERR(terrno)
- if (pFile == NULL) {
+ if (pFile == NULL || buf == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return 0;
}
@@ -1050,6 +1083,10 @@ bool lastErrorIsFileNotExist() { return terrno == TAOS_SYSTEM_ERROR(ENOENT); }
#endif // WINDOWS
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
+ if (path == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
STUB_RAND_IO_ERR(NULL)
FILE *fp = NULL;
#ifdef WINDOWS
@@ -1399,6 +1436,10 @@ int32_t taosEOFFile(TdFilePtr pFile) {
}
bool taosCheckAccessFile(const char *pathname, int32_t tdFileAccessOptions) {
+ if (pathname == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return false; // invalid parameter
+ }
int flags = 0;
if (tdFileAccessOptions & TD_FILE_ACCESS_EXIST_OK) {
@@ -1422,6 +1463,8 @@ bool taosCheckAccessFile(const char *pathname, int32_t tdFileAccessOptions) {
bool taosCheckExistFile(const char *pathname) { return taosCheckAccessFile(pathname, TD_FILE_ACCESS_EXIST_OK); };
int32_t taosCompressFile(char *srcFileName, char *destFileName) {
+ OS_PARAM_CHECK(srcFileName);
+ OS_PARAM_CHECK(destFileName);
int32_t compressSize = 163840;
int32_t ret = 0;
int32_t len = 0;
@@ -1516,6 +1559,10 @@ int32_t taosLinkFile(char *src, char *dst) {
}
FILE *taosOpenCFile(const char *filename, const char *mode) {
+ if (filename == NULL || mode == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
STUB_RAND_IO_ERR(NULL)
FILE *f = fopen(filename, mode);
if (NULL == f) {
@@ -1525,6 +1572,10 @@ FILE *taosOpenCFile(const char *filename, const char *mode) {
}
int taosSeekCFile(FILE *file, int64_t offset, int whence) {
+ if(NULL == file) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
#ifdef WINDOWS
return _fseeki64(file, offset, whence);
#else
@@ -1538,6 +1589,10 @@ int taosSeekCFile(FILE *file, int64_t offset, int whence) {
}
size_t taosReadFromCFile(void *buffer, size_t size, size_t count, FILE *stream) {
+ if (buffer == NULL || stream == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
STUB_RAND_IO_ERR(terrno)
return fread(buffer, size, count, stream);
}
diff --git a/source/os/src/osLocale.c b/source/os/src/osLocale.c
index becf0d5a70..21f781c7e4 100644
--- a/source/os/src/osLocale.c
+++ b/source/os/src/osLocale.c
@@ -52,6 +52,9 @@ typedef struct CharsetPair {
} CharsetPair;
char *taosCharsetReplace(char *charsetstr) {
+ if (charsetstr == NULL) {
+ return NULL;
+ }
CharsetPair charsetRep[] = {
{"utf8", "UTF-8"},
{"936", "CP936"},
@@ -76,6 +79,8 @@ char *taosCharsetReplace(char *charsetstr) {
* In case that the setLocale failed to be executed, the right charset needs to be set.
*/
int32_t taosSetSystemLocale(const char *inLocale, const char *inCharSet) {
+ OS_PARAM_CHECK(inLocale);
+ OS_PARAM_CHECK(inCharSet);
if (!taosValidateEncodec(inCharSet)) {
return terrno;
}
@@ -90,6 +95,7 @@ int32_t taosSetSystemLocale(const char *inLocale, const char *inCharSet) {
}
void taosGetSystemLocale(char *outLocale, char *outCharset) {
+ if (outLocale == NULL || outCharset == NULL) return;
#ifdef WINDOWS
char *locale = setlocale(LC_CTYPE, "en_US.UTF-8");
if (locale != NULL) {
diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c
index 538d5bf63e..9327b33f79 100644
--- a/source/os/src/osSemaphore.c
+++ b/source/os/src/osSemaphore.c
@@ -29,7 +29,11 @@
bool taosCheckPthreadValid(TdThread thread) { return thread.p != NULL; }
-void taosResetPthread(TdThread* thread) { thread->p = 0; }
+void taosResetPthread(TdThread* thread) {
+ if (thread != NULL) {
+ thread->p = NULL;
+ }
+}
int64_t taosGetPthreadId(TdThread thread) {
#ifdef PTW32_VERSION
@@ -46,6 +50,7 @@ bool taosComparePthread(TdThread first, TdThread second) { return first.p == sec
int32_t taosGetPId() { return GetCurrentProcessId(); }
int32_t taosGetAppName(char* name, int32_t* len) {
+ OS_PARAM_CHECK(name);
char filepath[1024] = {0};
if (GetModuleFileName(NULL, filepath, MAX_PATH) == 0) {
@@ -72,7 +77,11 @@ int32_t taosGetAppName(char* name, int32_t* len) {
return 0;
}
+int32_t taosGetPIdByName(const char* name, int32_t* pPId) { return -1;}
+
int32_t tsem_wait(tsem_t* sem) {
+ OS_PARAM_CHECK(sem);
+ OS_PARAM_CHECK(*sem);
DWORD ret = WaitForSingleObject(*sem, INFINITE);
if (ret == WAIT_OBJECT_0) {
return 0;
@@ -82,6 +91,8 @@ int32_t tsem_wait(tsem_t* sem) {
}
int32_t tsem_timewait(tsem_t* sem, int64_t timeout_ms) {
+ OS_PARAM_CHECK(sem);
+ OS_PARAM_CHECK(*sem);
DWORD result = WaitForSingleObject(*sem, timeout_ms);
if (result == WAIT_OBJECT_0) {
return 0; // Semaphore acquired
@@ -94,16 +105,21 @@ int32_t tsem_timewait(tsem_t* sem, int64_t timeout_ms) {
// Inter-process sharing is not currently supported. The pshared parameter is invalid.
int32_t tsem_init(tsem_t* sem, int pshared, unsigned int value) {
+ OS_PARAM_CHECK(sem);
*sem = CreateSemaphore(NULL, value, LONG_MAX, NULL);
return (*sem != NULL) ? 0 : TAOS_SYSTEM_WINAPI_ERROR(GetLastError());
}
int32_t tsem_post(tsem_t* sem) {
+ OS_PARAM_CHECK(sem);
+ OS_PARAM_CHECK(*sem);
if (ReleaseSemaphore(*sem, 1, NULL)) return 0;
return TAOS_SYSTEM_WINAPI_ERROR(GetLastError());
}
int32_t tsem_destroy(tsem_t* sem) {
+ OS_PARAM_CHECK(sem);
+ OS_PARAM_CHECK(*sem);
if (CloseHandle(*sem)) return 0;
return TAOS_SYSTEM_WINAPI_ERROR(GetLastError());
}
@@ -113,6 +129,7 @@ int32_t tsem_destroy(tsem_t* sem) {
#include
int32_t tsem_init(tsem_t *psem, int flags, unsigned int count) {
+ OS_PARAM_CHECK(psem);
*psem = dispatch_semaphore_create(count);
if (*psem == NULL) return TAOS_SYSTEM_ERROR(errno);
return 0;
@@ -126,19 +143,19 @@ int32_t tsem_destroy(tsem_t *psem) {
}
int32_t tsem_post(tsem_t *psem) {
- if (psem == NULL || *psem == NULL) return -1;
+ if (psem == NULL || *psem == NULL) return TSDB_CODE_INVALID_PARA;
(void)dispatch_semaphore_signal(*psem);
return 0;
}
int32_t tsem_wait(tsem_t *psem) {
- if (psem == NULL || *psem == NULL) return -1;
+ if (psem == NULL || *psem == NULL) return TSDB_CODE_INVALID_PARA;
dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER);
return 0;
}
int32_t tsem_timewait(tsem_t *psem, int64_t milis) {
- if (psem == NULL || *psem == NULL) return -1;
+ if (psem == NULL || *psem == NULL) return TSDB_CODE_INVALID_PARA;
dispatch_time_t time = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(milis * USEC_PER_SEC));
if (dispatch_semaphore_wait(*psem, time) == 0) {
return 0;
@@ -156,13 +173,18 @@ int64_t taosGetSelfPthreadId() {
int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; }
-void taosResetPthread(TdThread *thread) { *thread = NULL; }
+void taosResetPthread(TdThread *thread) {
+ if (thread) {
+ *thread = NULL;
+ }
+}
bool taosComparePthread(TdThread first, TdThread second) { return taosThreadEqual(first, second) ? true : false; }
int32_t taosGetPId() { return (int32_t)getpid(); }
int32_t taosGetAppName(char *name, int32_t *len) {
+ OS_PARAM_CHECK(name);
char buf[PATH_MAX + 1];
buf[0] = '\0';
proc_name(getpid(), buf, sizeof(buf) - 1);
@@ -173,6 +195,8 @@ int32_t taosGetAppName(char *name, int32_t *len) {
return 0;
}
+int32_t taosGetPIdByName(const char* name, int32_t* pPId) {return -1;}
+
#else
/*
@@ -192,7 +216,11 @@ int64_t taosGetSelfPthreadId() {
}
int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; }
-void taosResetPthread(TdThread* thread) { *thread = 0; }
+void taosResetPthread(TdThread* thread) {
+ if (thread) {
+ *thread = 0;
+ }
+}
bool taosComparePthread(TdThread first, TdThread second) { return first == second; }
int32_t taosGetPId() {
@@ -203,6 +231,7 @@ int32_t taosGetPId() {
}
int32_t taosGetAppName(char* name, int32_t* len) {
+ OS_PARAM_CHECK(name);
const char* self = "/proc/self/exe";
char path[PATH_MAX] = {0};
@@ -228,6 +257,61 @@ int32_t taosGetAppName(char* name, int32_t* len) {
return 0;
}
+int32_t taosGetPIdByName(const char* name, int32_t* pPId) {
+ OS_PARAM_CHECK(name);
+ OS_PARAM_CHECK(pPId);
+ DIR* dir = NULL;
+ struct dirent* ptr = NULL;
+ FILE* fp = NULL;
+ char filepath[512];
+ char bufx[50];
+ char buf[1024] = {0};
+
+ *pPId = -1;
+ dir = opendir("/proc");
+ if (dir == NULL) {
+ return TAOS_SYSTEM_ERROR(errno);
+ }
+
+ while ((ptr = readdir(dir)) != NULL) {
+ if ((strcmp(ptr->d_name, ".") == 0) || (strcmp(ptr->d_name, "..") == 0)) {
+ continue;
+ }
+
+ if (DT_DIR != ptr->d_type) {
+ continue;
+ }
+
+ int32_t ret = tsnprintf(filepath, tListLen(filepath), "/proc/%s/status", ptr->d_name);
+ if (ret == -1) {
+ continue;
+ }
+
+ fp = fopen(filepath, "r");
+ if (NULL != fp) {
+ if (fgets(buf, tListLen(buf) - 1, fp) == NULL) {
+ TAOS_UNUSED(fclose(fp));
+ continue;
+ }
+
+ ret = sscanf(buf, "%*s %s", bufx);
+ if (!strcmp(bufx, name)) {
+ char* end = NULL;
+ *pPId = taosStr2Int32(ptr->d_name, &end, 10);
+ }
+ TAOS_UNUSED(fclose(fp));
+ }
+ }
+
+ TAOS_UNUSED(closedir(dir));
+
+ if ((*pPId) == -1) {
+ return TAOS_SYSTEM_ERROR(ESRCH);
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+}
+
int32_t tsem_init(tsem_t* psem, int flags, unsigned int count) {
if (sem_init(psem, flags, count) == 0) {
return 0;
@@ -237,6 +321,7 @@ int32_t tsem_init(tsem_t* psem, int flags, unsigned int count) {
}
int32_t tsem_timewait(tsem_t* sem, int64_t ms) {
+ OS_PARAM_CHECK(sem);
int ret = 0;
struct timespec ts = {0};
@@ -265,6 +350,7 @@ int32_t tsem_timewait(tsem_t* sem, int64_t ms) {
}
int32_t tsem_wait(tsem_t* sem) {
+ OS_PARAM_CHECK(sem);
int ret = 0;
do {
ret = sem_wait(sem);
@@ -279,6 +365,7 @@ int32_t tsem_wait(tsem_t* sem) {
}
int tsem2_init(tsem2_t* sem, int pshared, unsigned int value) {
+ OS_PARAM_CHECK(sem);
int ret = taosThreadMutexInit(&sem->mutex, NULL);
if (ret != 0) return ret;
@@ -308,6 +395,7 @@ int tsem2_init(tsem2_t* sem, int pshared, unsigned int value) {
}
int32_t tsem_post(tsem_t* psem) {
+ OS_PARAM_CHECK(psem);
if (sem_post(psem) == 0) {
return 0;
} else {
@@ -316,6 +404,7 @@ int32_t tsem_post(tsem_t* psem) {
}
int32_t tsem_destroy(tsem_t* sem) {
+ OS_PARAM_CHECK(sem);
if (sem_destroy(sem) == 0) {
return 0;
} else {
@@ -324,6 +413,7 @@ int32_t tsem_destroy(tsem_t* sem) {
}
int tsem2_post(tsem2_t* sem) {
+ OS_PARAM_CHECK(sem);
int32_t code = taosThreadMutexLock(&sem->mutex);
if (code) {
return code;
@@ -344,6 +434,7 @@ int tsem2_post(tsem2_t* sem) {
}
int tsem2_destroy(tsem2_t* sem) {
+ OS_PARAM_CHECK(sem);
(void)taosThreadMutexDestroy(&sem->mutex);
(void)taosThreadCondDestroy(&sem->cond);
(void)taosThreadCondAttrDestroy(&sem->attr);
@@ -352,6 +443,7 @@ int tsem2_destroy(tsem2_t* sem) {
}
int32_t tsem2_wait(tsem2_t* sem) {
+ OS_PARAM_CHECK(sem);
int32_t code = taosThreadMutexLock(&sem->mutex);
if (code) {
return code;
@@ -377,6 +469,7 @@ int32_t tsem2_wait(tsem2_t* sem) {
}
int32_t tsem2_timewait(tsem2_t* sem, int64_t ms) {
+ OS_PARAM_CHECK(sem);
int32_t code = 0;
code = taosThreadMutexLock(&sem->mutex);
diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c
index 5f983d5480..32b1023ed7 100644
--- a/source/os/src/osSocket.c
+++ b/source/os/src/osSocket.c
@@ -233,6 +233,8 @@ int32_t taosBlockSIGPIPE() {
}
int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) {
+ OS_PARAM_CHECK(fqdn);
+ OS_PARAM_CHECK(ip);
#ifdef WINDOWS
// Initialize Winsock
WSADATA wsaData;
@@ -309,6 +311,7 @@ int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip) {
}
int32_t taosGetFqdn(char *fqdn) {
+ OS_PARAM_CHECK(fqdn);
#ifdef WINDOWS
// Initialize Winsock
WSADATA wsaData;
@@ -384,6 +387,9 @@ int32_t taosGetFqdn(char *fqdn) {
}
void tinet_ntoa(char *ipstr, uint32_t ip) {
+ if (ipstr == NULL) {
+ return;
+ }
(void)snprintf(ipstr, TD_IP_LEN, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24);
}
diff --git a/source/os/src/osString.c b/source/os/src/osString.c
index 18da778227..0ee4f1c496 100644
--- a/source/os/src/osString.c
+++ b/source/os/src/osString.c
@@ -25,15 +25,19 @@ extern int wcwidth(wchar_t c);
extern int wcswidth(const wchar_t *s, size_t n);
char *tstrdup(const char *str) {
+ if (str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
#ifdef WINDOWS
return _strdup(str);
#else
- char* p = strdup(str);
- if (str != NULL && NULL == p) {
+ char *p = strdup(str);
+ if (NULL == p) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
return p;
-
+
#endif
}
@@ -41,11 +45,19 @@ char *tstrdup(const char *str) {
// No errors are expected to occur
char *strsep(char **stringp, const char *delim) {
+ if (stringp == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
char *s;
const char *spanp;
int32_t c, sc;
char *tok;
if ((s = *stringp) == NULL) return (NULL);
+ if (delim == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
for (tok = s;;) {
c = *s++;
spanp = delim;
@@ -81,6 +93,10 @@ char *taosStrndup(const char *s, int size) {
/* Copy no more than N characters of SRC to DEST, returning the address of
the terminating '\0' in DEST, if any, or else DEST + N. */
char *stpncpy(char *dest, const char *src, int n) {
+ if (dest == NULL || src == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
size_t size = strnlen(src, n);
memcpy(dest, src, size);
dest += size;
@@ -117,6 +133,8 @@ int32_t taosStr2int64(const char *str, int64_t *val) {
}
int32_t taosStr2int16(const char *str, int16_t *val) {
+ OS_PARAM_CHECK(str);
+ OS_PARAM_CHECK(val);
int64_t tmp = 0;
int32_t code = taosStr2int64(str, &tmp);
if (code) {
@@ -130,6 +148,8 @@ int32_t taosStr2int16(const char *str, int16_t *val) {
}
int32_t taosStr2int32(const char *str, int32_t *val) {
+ OS_PARAM_CHECK(str);
+ OS_PARAM_CHECK(val);
int64_t tmp = 0;
int32_t code = taosStr2int64(str, &tmp);
if (code) {
@@ -143,6 +163,8 @@ int32_t taosStr2int32(const char *str, int32_t *val) {
}
int32_t taosStr2int8(const char *str, int8_t *val) {
+ OS_PARAM_CHECK(str);
+ OS_PARAM_CHECK(val);
int64_t tmp = 0;
int32_t code = taosStr2int64(str, &tmp);
if (code) {
@@ -156,6 +178,9 @@ int32_t taosStr2int8(const char *str, int8_t *val) {
}
int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes) {
+ if ((f1_ucs4 == NULL || f2_ucs4 == NULL)) {
+ return TSDB_CODE_INVALID_PARA;
+ }
for (int32_t i = 0; i < bytes; i += sizeof(TdUcs4)) {
int32_t f1 = *(int32_t *)((char *)f1_ucs4 + i);
int32_t f2 = *(int32_t *)((char *)f2_ucs4 + i);
@@ -191,6 +216,9 @@ int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes) {
}
int32_t tasoUcs4Copy(TdUcs4 *target_ucs4, TdUcs4 *source_ucs4, int32_t len_ucs4) {
+ if (target_ucs4 == NULL || source_ucs4 == NULL || len_ucs4 <= 0) {
+ return TSDB_CODE_INVALID_PARA;
+ }
if (taosMemorySize(target_ucs4) < len_ucs4 * sizeof(TdUcs4)) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
@@ -261,6 +289,10 @@ void taosConvDestroy() {
}
iconv_t taosAcquireConv(int32_t *idx, ConvType type) {
+ if(idx == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return (iconv_t)-1;
+ }
if (gConvMaxNum[type] <= 0) {
*idx = -1;
if (type == M2C) {
@@ -321,6 +353,13 @@ void taosReleaseConv(int32_t idx, iconv_t conv, ConvType type) {
}
bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len) {
+ if (ucs4_max_len == 0) {
+ return true;
+ }
+ if(ucs4_max_len < 0 || mbs == NULL || ucs4 == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return false;
+ }
#ifdef DISALLOW_NCHAR_WITHOUT_ICONV
printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n");
terrno = TSDB_CODE_APP_ERROR;
@@ -359,6 +398,13 @@ bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4
// if success, return the number of bytes written to mbs ( >= 0)
// otherwise return error code ( < 0)
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) {
+ if (ucs4_max_len == 0) {
+ return 0;
+ }
+ if(ucs4_max_len < 0 || ucs4 == NULL || mbs == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
#ifdef DISALLOW_NCHAR_WITHOUT_ICONV
printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n");
terrno = TSDB_CODE_APP_ERROR;
@@ -390,6 +436,13 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) {
// if success, return the number of bytes written to mbs ( >= 0)
// otherwise return error code ( < 0)
int32_t taosUcs4ToMbsEx(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, iconv_t conv) {
+ if (ucs4_max_len == 0) {
+ return 0;
+ }
+ if(ucs4_max_len < 0 || ucs4 == NULL || mbs == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
#ifdef DISALLOW_NCHAR_WITHOUT_ICONV
printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n");
terrno = TSDB_CODE_APP_ERROR;
@@ -408,6 +461,10 @@ int32_t taosUcs4ToMbsEx(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, iconv_t c
}
bool taosValidateEncodec(const char *encodec) {
+ if(encodec == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return false;
+ }
#ifdef DISALLOW_NCHAR_WITHOUT_ICONV
printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n");
terrno = TSDB_CODE_APP_ERROR;
@@ -443,7 +500,7 @@ int32_t taosUcs4len(TdUcs4 *ucs4) {
// dst buffer size should be at least 2*len + 1
int32_t taosHexEncode(const unsigned char *src, char *dst, int32_t len, int32_t bufSize) {
- if (!dst) {
+ if (!dst || !src || bufSize <= 0) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@@ -456,7 +513,7 @@ int32_t taosHexEncode(const unsigned char *src, char *dst, int32_t len, int32_t
}
int32_t taosHexDecode(const char *src, char *dst, int32_t len) {
- if (!dst) {
+ if(!src || !dst || len <= 0) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
@@ -475,19 +532,42 @@ int32_t taosHexDecode(const char *src, char *dst, int32_t len) {
int32_t taosWcharWidth(TdWchar wchar) { return wcwidth(wchar); }
-int32_t taosWcharsWidth(TdWchar *pWchar, int32_t size) { return wcswidth(pWchar, size); }
+int32_t taosWcharsWidth(TdWchar *pWchar, int32_t size) {
+ if (pWchar == NULL || size <= 0) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+ return wcswidth(pWchar, size);
+}
-int32_t taosMbToWchar(TdWchar *pWchar, const char *pStr, int32_t size) { return mbtowc(pWchar, pStr, size); }
+int32_t taosMbToWchar(TdWchar *pWchar, const char *pStr, int32_t size) {
+ if (pWchar == NULL || pStr == NULL || size <= 0) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+ return mbtowc(pWchar, pStr, size);
+}
-int32_t taosMbsToWchars(TdWchar *pWchars, const char *pStrs, int32_t size) { return mbstowcs(pWchars, pStrs, size); }
+int32_t taosMbsToWchars(TdWchar *pWchars, const char *pStrs, int32_t size) {
+ if (pWchars == NULL || pStrs == NULL || size <= 0) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+ return mbstowcs(pWchars, pStrs, size);
+}
-int32_t taosWcharToMb(char *pStr, TdWchar wchar) { return wctomb(pStr, wchar); }
+int32_t taosWcharToMb(char *pStr, TdWchar wchar) {
+ OS_PARAM_CHECK(pStr);
+ return wctomb(pStr, wchar); }
char *taosStrCaseStr(const char *str, const char *pattern) {
+ if (str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
+ if (!pattern || !*pattern) return (char *)str;
+
size_t i;
-
- if (!*pattern) return (char *)str;
-
for (; *str; str++) {
if (toupper(*str) == toupper(*pattern)) {
for (i = 1;; i++) {
@@ -500,6 +580,10 @@ char *taosStrCaseStr(const char *str, const char *pattern) {
}
int64_t taosStr2Int64(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
int64_t tmp = strtoll(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -508,6 +592,10 @@ int64_t taosStr2Int64(const char *str, char **pEnd, int32_t radix) {
}
uint64_t taosStr2UInt64(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
uint64_t tmp = strtoull(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -516,6 +604,10 @@ uint64_t taosStr2UInt64(const char *str, char **pEnd, int32_t radix) {
}
int32_t taosStr2Int32(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
int32_t tmp = strtol(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -524,6 +616,10 @@ int32_t taosStr2Int32(const char *str, char **pEnd, int32_t radix) {
}
uint32_t taosStr2UInt32(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
uint32_t tmp = strtol(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -532,6 +628,10 @@ uint32_t taosStr2UInt32(const char *str, char **pEnd, int32_t radix) {
}
int16_t taosStr2Int16(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
int32_t tmp = strtol(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -540,6 +640,10 @@ int16_t taosStr2Int16(const char *str, char **pEnd, int32_t radix) {
}
uint16_t taosStr2UInt16(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
uint32_t tmp = strtoul(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -548,11 +652,19 @@ uint16_t taosStr2UInt16(const char *str, char **pEnd, int32_t radix) {
}
int8_t taosStr2Int8(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
int32_t tmp = strtol(str, pEnd, radix);
return tmp;
}
uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
uint32_t tmp = strtoul(str, pEnd, radix);
#if defined(DARWIN) || defined(_ALPINE)
if (errno == EINVAL) errno = 0;
@@ -561,11 +673,19 @@ uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix) {
}
double taosStr2Double(const char *str, char **pEnd) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
double tmp = strtod(str, pEnd);
return tmp;
}
float taosStr2Float(const char *str, char **pEnd) {
+ if(str == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
float tmp = strtof(str, pEnd);
return tmp;
}
@@ -578,6 +698,10 @@ bool isHex(const char *z, uint32_t n) {
}
bool isValidateHex(const char *z, uint32_t n) {
+ if(!z) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return false;
+ }
if ((n & 1) != 0) return false;
for (size_t i = HEX_PREFIX_LEN; i < n; i++) {
if (isxdigit(z[i]) == 0) {
@@ -588,6 +712,9 @@ bool isValidateHex(const char *z, uint32_t n) {
}
int32_t taosHex2Ascii(const char *z, uint32_t n, void **data, uint32_t *size) {
+ OS_PARAM_CHECK(z);
+ OS_PARAM_CHECK(data);
+ OS_PARAM_CHECK(size);
n -= HEX_PREFIX_LEN; // remove 0x
z += HEX_PREFIX_LEN;
*size = n / HEX_PREFIX_LEN;
@@ -712,6 +839,10 @@ int32_t taosAscii2Hex(const char *z, uint32_t n, void **data, uint32_t *size) {
}
int64_t tsnprintf(char *dst, int64_t size, const char *format, ...) {
+ if (dst == NULL || format == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return 0;
+ }
if (size <= 0) return 0;
if (size == 1) {
dst[0] = '\0';
diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c
index bd1a058291..dc3258bf9c 100644
--- a/source/os/src/osSysinfo.c
+++ b/source/os/src/osSysinfo.c
@@ -154,6 +154,7 @@ static void taosGetProcIOnfos() {
#endif
static int32_t taosGetSysCpuInfo(SysCpuInfo *cpuInfo) {
+ OS_PARAM_CHECK(cpuInfo);
int32_t code = 0;
#ifdef WINDOWS
FILETIME pre_idleTime = {0};
@@ -206,6 +207,7 @@ static int32_t taosGetSysCpuInfo(SysCpuInfo *cpuInfo) {
}
static int32_t taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) {
+ OS_PARAM_CHECK(cpuInfo);
int32_t code = 0;
#ifdef WINDOWS
@@ -287,6 +289,7 @@ void taosGetSystemInfo() {
}
int32_t taosGetEmail(char *email, int32_t maxLen) {
+ OS_PARAM_CHECK(email);
#ifdef WINDOWS
return 0;
#elif defined(_TD_DARWIN_64)
@@ -330,6 +333,7 @@ int32_t taosGetEmail(char *email, int32_t maxLen) {
#ifdef WINDOWS
bool getWinVersionReleaseName(char *releaseName, int32_t maxLen) {
+ if(releaseName == NULL) return false;
TCHAR szFileName[MAX_PATH];
DWORD dwHandle;
DWORD dwLen;
@@ -367,6 +371,7 @@ bool getWinVersionReleaseName(char *releaseName, int32_t maxLen) {
#endif
int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t maxLen) {
+ OS_PARAM_CHECK(releaseName);
#ifdef WINDOWS
if (!getWinVersionReleaseName(releaseName, maxLen)) {
snprintf(releaseName, maxLen, "Windows");
@@ -437,6 +442,8 @@ int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t
}
int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
+ OS_PARAM_CHECK(cpuModel);
+ OS_PARAM_CHECK(numOfCores);
#ifdef WINDOWS
char value[100];
DWORD bufferSize = sizeof(value);
@@ -541,6 +548,7 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
// Returns the container's CPU quota if successful, otherwise returns the physical CPU cores
static int32_t taosCntrGetCpuCores(float *numOfCores) {
+ OS_PARAM_CHECK(numOfCores);
#ifdef WINDOWS
return TSDB_CODE_UNSUPPORT_OS;
#elif defined(_TD_DARWIN_64)
@@ -600,6 +608,7 @@ _end:
}
int32_t taosGetCpuCores(float *numOfCores, bool physical) {
+ OS_PARAM_CHECK(numOfCores);
#ifdef WINDOWS
SYSTEM_INFO info;
GetSystemInfo(&info);
@@ -702,6 +711,7 @@ int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma, ch
}
int32_t taosGetTotalMemory(int64_t *totalKB) {
+ OS_PARAM_CHECK(totalKB);
#ifdef WINDOWS
MEMORYSTATUSEX memsStat;
memsStat.dwLength = sizeof(memsStat);
@@ -723,6 +733,7 @@ int32_t taosGetTotalMemory(int64_t *totalKB) {
}
int32_t taosGetProcMemory(int64_t *usedKB) {
+ OS_PARAM_CHECK(usedKB);
#ifdef WINDOWS
unsigned bytes_used = 0;
@@ -769,6 +780,7 @@ int32_t taosGetProcMemory(int64_t *usedKB) {
}
int32_t taosGetSysMemory(int64_t *usedKB) {
+ OS_PARAM_CHECK(usedKB);
#ifdef WINDOWS
MEMORYSTATUSEX memsStat;
memsStat.dwLength = sizeof(memsStat);
@@ -794,6 +806,8 @@ int32_t taosGetSysMemory(int64_t *usedKB) {
}
int32_t taosGetDiskSize(char *dataDir, SDiskSize *diskSize) {
+ OS_PARAM_CHECK(dataDir);
+ OS_PARAM_CHECK(diskSize);
#if defined(WINDOWS)
unsigned _int64 i64FreeBytesToCaller;
unsigned _int64 i64TotalBytes;
@@ -839,21 +853,25 @@ int32_t taosGetDiskSize(char *dataDir, SDiskSize *diskSize) {
}
int32_t taosGetProcIO(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, int64_t *write_bytes) {
+ OS_PARAM_CHECK(rchars);
+ OS_PARAM_CHECK(wchars);
+ OS_PARAM_CHECK(read_bytes);
+ OS_PARAM_CHECK(write_bytes);
#ifdef WINDOWS
IO_COUNTERS io_counter;
if (GetProcessIoCounters(GetCurrentProcess(), &io_counter)) {
- if (rchars) *rchars = io_counter.ReadTransferCount;
- if (wchars) *wchars = io_counter.WriteTransferCount;
- if (read_bytes) *read_bytes = 0;
- if (write_bytes) *write_bytes = 0;
+ *rchars = io_counter.ReadTransferCount;
+ *wchars = io_counter.WriteTransferCount;
+ *read_bytes = 0;
+ *write_bytes = 0;
return 0;
}
return TAOS_SYSTEM_WINAPI_ERROR(GetLastError());
#elif defined(_TD_DARWIN_64)
- if (rchars) *rchars = 0;
- if (wchars) *wchars = 0;
- if (read_bytes) *read_bytes = 0;
- if (write_bytes) *write_bytes = 0;
+ *rchars = 0;
+ *wchars = 0;
+ *read_bytes = 0;
+ *write_bytes = 0;
return 0;
#else
TdFilePtr pFile = taosOpenFile(tsProcIOFile, TD_FILE_READ | TD_FILE_STREAM);
@@ -900,6 +918,9 @@ int32_t taosGetProcIO(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, int
}
int32_t taosGetProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, int64_t *write_bytes) {
+ if (rchars == NULL || wchars == NULL || read_bytes == NULL || write_bytes == NULL) {
+ return TSDB_CODE_INVALID_PARA;
+ }
static int64_t last_rchars = -1;
static int64_t last_wchars = -1;
static int64_t last_read_bytes = -1;
@@ -932,13 +953,15 @@ int32_t taosGetProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes
return 0;
}
void taosSetDefaultProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, int64_t *write_bytes) {
- *rchars = 0;
- *wchars = 0;
- *read_bytes = 0;
- *write_bytes = 0;
+ if(rchars) *rchars = 0;
+ if(wchars) *wchars = 0;
+ if(read_bytes) *read_bytes = 0;
+ if(write_bytes) *write_bytes = 0;
}
int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
+ OS_PARAM_CHECK(receive_bytes);
+ OS_PARAM_CHECK(transmit_bytes);
*receive_bytes = 0;
*transmit_bytes = 0;
@@ -994,6 +1017,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
}
int32_t taosGetCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) {
+ OS_PARAM_CHECK(receive_bytes);
+ OS_PARAM_CHECK(transmit_bytes);
static int64_t last_receive_bytes = -1;
static int64_t last_transmit_bytes = -1;
int64_t cur_receive_bytes = 0;
@@ -1017,8 +1042,8 @@ int32_t taosGetCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) {
return 0;
}
void taosSetDefaultCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) {
- *receive_bytes = 0;
- *transmit_bytes = 0;
+ if (receive_bytes) *receive_bytes = 0;
+ if (transmit_bytes) *transmit_bytes = 0;
}
void taosKillSystem() {
@@ -1037,6 +1062,7 @@ void taosKillSystem() {
#define UUIDLEN (36)
int32_t taosGetSystemUUIDLimit36(char *uid, int32_t uidlen) {
+ OS_PARAM_CHECK(uid);
#ifdef WINDOWS
GUID guid;
HRESULT h = CoCreateGuid(&guid);
@@ -1334,6 +1360,7 @@ int32_t getMacLocalHostNameBySCD(char *hostname, size_t maxLen) {
#endif
int32_t taosGetlocalhostname(char *hostname, size_t maxLen) {
+ OS_PARAM_CHECK(hostname);
#ifdef _TD_DARWIN_64
int res = getMacLocalHostNameBySCD(hostname, maxLen);
if (res != 0) {
diff --git a/source/os/src/osSystem.c b/source/os/src/osSystem.c
index fe52369a53..fefada2142 100644
--- a/source/os/src/osSystem.c
+++ b/source/os/src/osSystem.c
@@ -107,26 +107,6 @@ void* taosLoadDll(const char* filename) {
#endif
}
-void* taosLoadSym(void* handle, char* name) {
-#if defined(WINDOWS)
- return NULL;
-#elif defined(_TD_DARWIN_64)
- return NULL;
-#else
- void* sym = dlsym(handle, name);
- char* error = NULL;
-
- if ((error = dlerror()) != NULL) {
- // printf("load sym:%s failed, error:%s", name, dlerror());
- return NULL;
- }
-
- // printf("sym %s loaded", name);
-
- return sym;
-#endif
-}
-
void taosCloseDll(void* handle) {
#if defined(WINDOWS)
return;
diff --git a/source/os/src/osThread.c b/source/os/src/osThread.c
index 6a8c705cde..f888835d95 100644
--- a/source/os/src/osThread.c
+++ b/source/os/src/osThread.c
@@ -18,6 +18,8 @@
#include "os.h"
int32_t taosThreadCreate(TdThread *tid, const TdThreadAttr *attr, void *(*start)(void *), void *arg) {
+ OS_PARAM_CHECK(tid);
+ OS_PARAM_CHECK(start);
int32_t code = pthread_create(tid, attr, start, arg);
if (code) {
taosThreadClear(tid);
@@ -28,6 +30,7 @@ int32_t taosThreadCreate(TdThread *tid, const TdThreadAttr *attr, void *(*start)
}
int32_t taosThreadAttrDestroy(TdThreadAttr *attr) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_destroy(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -37,6 +40,8 @@ int32_t taosThreadAttrDestroy(TdThreadAttr *attr) {
}
int32_t taosThreadAttrGetDetachState(const TdThreadAttr *attr, int32_t *detachstate) {
+ OS_PARAM_CHECK(attr);
+ OS_PARAM_CHECK(detachstate);
int32_t code = pthread_attr_getdetachstate(attr, detachstate);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -46,6 +51,8 @@ int32_t taosThreadAttrGetDetachState(const TdThreadAttr *attr, int32_t *detachst
}
int32_t taosThreadAttrGetInheritSched(const TdThreadAttr *attr, int32_t *inheritsched) {
+ OS_PARAM_CHECK(attr);
+ OS_PARAM_CHECK(inheritsched);
int32_t code = pthread_attr_getinheritsched(attr, inheritsched);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -55,6 +62,8 @@ int32_t taosThreadAttrGetInheritSched(const TdThreadAttr *attr, int32_t *inherit
}
int32_t taosThreadAttrGetSchedParam(const TdThreadAttr *attr, struct sched_param *param) {
+ OS_PARAM_CHECK(attr);
+ OS_PARAM_CHECK(param);
int32_t code = pthread_attr_getschedparam(attr, param);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -64,6 +73,8 @@ int32_t taosThreadAttrGetSchedParam(const TdThreadAttr *attr, struct sched_param
}
int32_t taosThreadAttrGetSchedPolicy(const TdThreadAttr *attr, int32_t *policy) {
+ OS_PARAM_CHECK(attr);
+ OS_PARAM_CHECK(policy);
int32_t code = pthread_attr_getschedpolicy(attr, policy);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -73,6 +84,8 @@ int32_t taosThreadAttrGetSchedPolicy(const TdThreadAttr *attr, int32_t *policy)
}
int32_t taosThreadAttrGetScope(const TdThreadAttr *attr, int32_t *contentionscope) {
+ OS_PARAM_CHECK(attr);
+ OS_PARAM_CHECK(contentionscope);
int32_t code = pthread_attr_getscope(attr, contentionscope);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -82,6 +95,8 @@ int32_t taosThreadAttrGetScope(const TdThreadAttr *attr, int32_t *contentionscop
}
int32_t taosThreadAttrGetStackSize(const TdThreadAttr *attr, size_t *stacksize) {
+ OS_PARAM_CHECK(attr);
+ OS_PARAM_CHECK(stacksize);
int32_t code = pthread_attr_getstacksize(attr, stacksize);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -90,7 +105,8 @@ int32_t taosThreadAttrGetStackSize(const TdThreadAttr *attr, size_t *stacksize)
return code;
}
-int32_t taosThreadAttrInit(TdThreadAttr *attr) {
+int32_t taosThreadAttrInit(TdThreadAttr *attr) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_init(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -100,6 +116,7 @@ int32_t taosThreadAttrInit(TdThreadAttr *attr) {
}
int32_t taosThreadAttrSetDetachState(TdThreadAttr *attr, int32_t detachstate) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_setdetachstate(attr, detachstate);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -109,6 +126,7 @@ int32_t taosThreadAttrSetDetachState(TdThreadAttr *attr, int32_t detachstate) {
}
int32_t taosThreadAttrSetInheritSched(TdThreadAttr *attr, int32_t inheritsched) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_setinheritsched(attr, inheritsched);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -118,6 +136,7 @@ int32_t taosThreadAttrSetInheritSched(TdThreadAttr *attr, int32_t inheritsched)
}
int32_t taosThreadAttrSetSchedParam(TdThreadAttr *attr, const struct sched_param *param) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_setschedparam(attr, param);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -127,6 +146,7 @@ int32_t taosThreadAttrSetSchedParam(TdThreadAttr *attr, const struct sched_param
}
int32_t taosThreadAttrSetSchedPolicy(TdThreadAttr *attr, int32_t policy) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_setschedpolicy(attr, policy);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -136,6 +156,7 @@ int32_t taosThreadAttrSetSchedPolicy(TdThreadAttr *attr, int32_t policy) {
}
int32_t taosThreadAttrSetScope(TdThreadAttr *attr, int32_t contentionscope) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_setscope(attr, contentionscope);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -145,6 +166,7 @@ int32_t taosThreadAttrSetScope(TdThreadAttr *attr, int32_t contentionscope) {
}
int32_t taosThreadAttrSetStackSize(TdThreadAttr *attr, size_t stacksize) {
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_attr_setstacksize(attr, stacksize);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -163,6 +185,7 @@ int32_t taosThreadCancel(TdThread thread) {
}
int32_t taosThreadCondDestroy(TdThreadCond *cond) {
+ OS_PARAM_CHECK(cond);
#ifdef __USE_WIN_THREAD
return 0;
#else
@@ -176,6 +199,7 @@ int32_t taosThreadCondDestroy(TdThreadCond *cond) {
}
int32_t taosThreadCondInit(TdThreadCond *cond, const TdThreadCondAttr *attr) {
+ OS_PARAM_CHECK(cond);
#ifdef __USE_WIN_THREAD
InitializeConditionVariable(cond);
return 0;
@@ -190,6 +214,7 @@ int32_t taosThreadCondInit(TdThreadCond *cond, const TdThreadCondAttr *attr) {
}
int32_t taosThreadCondSignal(TdThreadCond *cond) {
+ OS_PARAM_CHECK(cond);
#ifdef __USE_WIN_THREAD
WakeConditionVariable(cond);
return 0;
@@ -204,6 +229,7 @@ int32_t taosThreadCondSignal(TdThreadCond *cond) {
}
int32_t taosThreadCondBroadcast(TdThreadCond *cond) {
+ OS_PARAM_CHECK(cond);
#ifdef __USE_WIN_THREAD
WakeAllConditionVariable(cond);
return 0;
@@ -218,6 +244,8 @@ int32_t taosThreadCondBroadcast(TdThreadCond *cond) {
}
int32_t taosThreadCondWait(TdThreadCond *cond, TdThreadMutex *mutex) {
+ OS_PARAM_CHECK(cond);
+ OS_PARAM_CHECK(mutex);
#ifdef __USE_WIN_THREAD
if (!SleepConditionVariableCS(cond, mutex, INFINITE)) {
return EINVAL;
@@ -234,8 +262,10 @@ int32_t taosThreadCondWait(TdThreadCond *cond, TdThreadMutex *mutex) {
}
int32_t taosThreadCondTimedWait(TdThreadCond *cond, TdThreadMutex *mutex, const struct timespec *abstime) {
-#ifdef __USE_WIN_THREAD
if (!abstime) return 0;
+ OS_PARAM_CHECK(cond);
+ OS_PARAM_CHECK(mutex);
+#ifdef __USE_WIN_THREAD
if (SleepConditionVariableCS(cond, mutex, (DWORD)(abstime->tv_sec * 1e3 + abstime->tv_nsec / 1e6))) return 0;
DWORD error = GetLastError();
if (error == ERROR_TIMEOUT) {
@@ -258,6 +288,7 @@ int32_t taosThreadCondAttrDestroy(TdThreadCondAttr *attr) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_condattr_destroy(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -268,10 +299,12 @@ int32_t taosThreadCondAttrDestroy(TdThreadCondAttr *attr) {
}
int32_t taosThreadCondAttrGetPshared(const TdThreadCondAttr *attr, int32_t *pshared) {
+ OS_PARAM_CHECK(pshared);
#ifdef __USE_WIN_THREAD
if (pshared) *pshared = PTHREAD_PROCESS_PRIVATE;
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_condattr_getpshared(attr, pshared);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -285,6 +318,7 @@ int32_t taosThreadCondAttrInit(TdThreadCondAttr *attr) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_condattr_init(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -300,6 +334,7 @@ int32_t taosThreadCondAttrSetclock(TdThreadCondAttr *attr, int clockId) {
#elif defined(__APPLE__)
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_condattr_setclock(attr, clockId);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -310,6 +345,7 @@ int32_t taosThreadCondAttrSetclock(TdThreadCondAttr *attr, int clockId) {
}
int32_t taosThreadCondAttrSetPshared(TdThreadCondAttr *attr, int32_t pshared) {
+ OS_PARAM_CHECK(attr);
#ifdef __USE_WIN_THREAD
return 0;
#else
@@ -336,10 +372,12 @@ int32_t taosThreadEqual(TdThread t1, TdThread t2) {
}
void taosThreadExit(void *valuePtr) {
- return pthread_exit(valuePtr);
+ if(valuePtr) return pthread_exit(valuePtr);
}
int32_t taosThreadGetSchedParam(TdThread thread, int32_t *policy, struct sched_param *param) {
+ OS_PARAM_CHECK(policy);
+ OS_PARAM_CHECK(param);
int32_t code = pthread_getschedparam(thread, policy, param);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -362,6 +400,7 @@ int32_t taosThreadJoin(TdThread thread, void **valuePtr) {
}
int32_t taosThreadKeyCreate(TdThreadKey *key, void (*destructor)(void *)) {
+ OS_PARAM_CHECK(key);
int32_t code = pthread_key_create(key, destructor);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -393,6 +432,7 @@ int32_t taosThreadKill(TdThread thread, int32_t sig) {
// }
int32_t taosThreadMutexDestroy(TdThreadMutex *mutex) {
+ OS_PARAM_CHECK(mutex);
#ifdef __USE_WIN_THREAD
DeleteCriticalSection(mutex);
return 0;
@@ -407,6 +447,7 @@ int32_t taosThreadMutexDestroy(TdThreadMutex *mutex) {
}
int32_t taosThreadMutexInit(TdThreadMutex *mutex, const TdThreadMutexAttr *attr) {
+ OS_PARAM_CHECK(mutex);
#ifdef __USE_WIN_THREAD
/**
* Windows Server 2003 and Windows XP: In low memory situations, InitializeCriticalSection can raise a
@@ -426,6 +467,7 @@ int32_t taosThreadMutexInit(TdThreadMutex *mutex, const TdThreadMutexAttr *attr)
}
int32_t taosThreadMutexLock(TdThreadMutex *mutex) {
+ OS_PARAM_CHECK(mutex);
#ifdef __USE_WIN_THREAD
EnterCriticalSection(mutex);
return 0;
@@ -444,6 +486,7 @@ int32_t taosThreadMutexLock(TdThreadMutex *mutex) {
// }
int32_t taosThreadMutexTryLock(TdThreadMutex *mutex) {
+ OS_PARAM_CHECK(mutex);
#ifdef __USE_WIN_THREAD
if (TryEnterCriticalSection(mutex)) return 0;
return EBUSY;
@@ -457,6 +500,7 @@ int32_t taosThreadMutexTryLock(TdThreadMutex *mutex) {
}
int32_t taosThreadMutexUnlock(TdThreadMutex *mutex) {
+ OS_PARAM_CHECK(mutex);
#ifdef __USE_WIN_THREAD
LeaveCriticalSection(mutex);
return 0;
@@ -474,6 +518,7 @@ int32_t taosThreadMutexAttrDestroy(TdThreadMutexAttr *attr) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_mutexattr_destroy(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -484,10 +529,12 @@ int32_t taosThreadMutexAttrDestroy(TdThreadMutexAttr *attr) {
}
int32_t taosThreadMutexAttrGetPshared(const TdThreadMutexAttr *attr, int32_t *pshared) {
+ OS_PARAM_CHECK(pshared);
#ifdef __USE_WIN_THREAD
if (pshared) *pshared = PTHREAD_PROCESS_PRIVATE;
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_mutexattr_getpshared(attr, pshared);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -502,10 +549,12 @@ int32_t taosThreadMutexAttrGetPshared(const TdThreadMutexAttr *attr, int32_t *ps
// }
int32_t taosThreadMutexAttrGetType(const TdThreadMutexAttr *attr, int32_t *kind) {
+ OS_PARAM_CHECK(kind);
#ifdef __USE_WIN_THREAD
if (kind) *kind = PTHREAD_MUTEX_NORMAL;
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_mutexattr_gettype(attr, kind);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -519,6 +568,7 @@ int32_t taosThreadMutexAttrInit(TdThreadMutexAttr *attr) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_mutexattr_init(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -532,6 +582,7 @@ int32_t taosThreadMutexAttrSetPshared(TdThreadMutexAttr *attr, int32_t pshared)
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_mutexattr_setpshared(attr, pshared);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -549,6 +600,7 @@ int32_t taosThreadMutexAttrSetType(TdThreadMutexAttr *attr, int32_t kind) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_mutexattr_settype(attr, kind);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -574,6 +626,7 @@ int32_t taosThreadRwlockDestroy(TdThreadRwlock *rwlock) {
*/
return 0;
#else
+ OS_PARAM_CHECK(rwlock);
int32_t code = pthread_rwlock_destroy(rwlock);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -584,6 +637,7 @@ int32_t taosThreadRwlockDestroy(TdThreadRwlock *rwlock) {
}
int32_t taosThreadRwlockInit(TdThreadRwlock *rwlock, const TdThreadRwlockAttr *attr) {
+ OS_PARAM_CHECK(rwlock);
#ifdef __USE_WIN_THREAD
memset(rwlock, 0, sizeof(*rwlock));
InitializeSRWLock(&rwlock->lock);
@@ -599,6 +653,7 @@ int32_t taosThreadRwlockInit(TdThreadRwlock *rwlock, const TdThreadRwlockAttr *a
}
int32_t taosThreadRwlockRdlock(TdThreadRwlock *rwlock) {
+ OS_PARAM_CHECK(rwlock);
#ifdef __USE_WIN_THREAD
AcquireSRWLockShared(&rwlock->lock);
return 0;
@@ -621,6 +676,7 @@ int32_t taosThreadRwlockRdlock(TdThreadRwlock *rwlock) {
// }
int32_t taosThreadRwlockTryRdlock(TdThreadRwlock *rwlock) {
+ OS_PARAM_CHECK(rwlock);
#ifdef __USE_WIN_THREAD
if (!TryAcquireSRWLockShared(&rwlock->lock)) return EBUSY;
return 0;
@@ -635,6 +691,7 @@ int32_t taosThreadRwlockTryRdlock(TdThreadRwlock *rwlock) {
}
int32_t taosThreadRwlockTryWrlock(TdThreadRwlock *rwlock) {
+ OS_PARAM_CHECK(rwlock);
#ifdef __USE_WIN_THREAD
if (!TryAcquireSRWLockExclusive(&rwlock->lock)) return EBUSY;
atomic_store_8(&rwlock->excl, 1);
@@ -650,6 +707,7 @@ int32_t taosThreadRwlockTryWrlock(TdThreadRwlock *rwlock) {
}
int32_t taosThreadRwlockUnlock(TdThreadRwlock *rwlock) {
+ OS_PARAM_CHECK(rwlock);
#ifdef __USE_WIN_THREAD
if (1 == atomic_val_compare_exchange_8(&rwlock->excl, 1, 0)) {
ReleaseSRWLockExclusive(&rwlock->lock);
@@ -668,6 +726,7 @@ int32_t taosThreadRwlockUnlock(TdThreadRwlock *rwlock) {
}
int32_t taosThreadRwlockWrlock(TdThreadRwlock *rwlock) {
+ OS_PARAM_CHECK(rwlock);
#ifdef __USE_WIN_THREAD
AcquireSRWLockExclusive(&rwlock->lock);
atomic_store_8(&rwlock->excl, 1);
@@ -686,6 +745,7 @@ int32_t taosThreadRwlockAttrDestroy(TdThreadRwlockAttr *attr) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_rwlockattr_destroy(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -696,6 +756,7 @@ int32_t taosThreadRwlockAttrDestroy(TdThreadRwlockAttr *attr) {
}
int32_t taosThreadRwlockAttrGetPshared(const TdThreadRwlockAttr *attr, int32_t *pshared) {
+ OS_PARAM_CHECK(pshared);
#ifdef __USE_WIN_THREAD
if (pshared) *pshared = PTHREAD_PROCESS_PRIVATE;
return 0;
@@ -713,6 +774,7 @@ int32_t taosThreadRwlockAttrInit(TdThreadRwlockAttr *attr) {
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_rwlockattr_init(attr);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -726,6 +788,7 @@ int32_t taosThreadRwlockAttrSetPshared(TdThreadRwlockAttr *attr, int32_t pshared
#ifdef __USE_WIN_THREAD
return 0;
#else
+ OS_PARAM_CHECK(attr);
int32_t code = pthread_rwlockattr_setpshared(attr, pshared);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -756,6 +819,7 @@ int32_t taosThreadSetCancelType(int32_t type, int32_t *oldtype) {
}
int32_t taosThreadSetSchedParam(TdThread thread, int32_t policy, const struct sched_param *param) {
+ OS_PARAM_CHECK(param);
int32_t code = pthread_setschedparam(thread, policy, param);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -765,6 +829,7 @@ int32_t taosThreadSetSchedParam(TdThread thread, int32_t policy, const struct sc
}
int32_t taosThreadSetSpecific(TdThreadKey key, const void *value) {
+ OS_PARAM_CHECK(value);
int32_t code = pthread_setspecific(key, value);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
@@ -774,6 +839,7 @@ int32_t taosThreadSetSpecific(TdThreadKey key, const void *value) {
}
int32_t taosThreadSpinDestroy(TdThreadSpinlock *lock) {
+ OS_PARAM_CHECK(lock);
#ifdef TD_USE_SPINLOCK_AS_MUTEX
return pthread_mutex_destroy((pthread_mutex_t *)lock);
#else
@@ -787,6 +853,7 @@ int32_t taosThreadSpinDestroy(TdThreadSpinlock *lock) {
}
int32_t taosThreadSpinInit(TdThreadSpinlock *lock, int32_t pshared) {
+ OS_PARAM_CHECK(lock);
#ifdef TD_USE_SPINLOCK_AS_MUTEX
if (pshared != 0) return TSDB_CODE_INVALID_PARA;
return pthread_mutex_init((pthread_mutex_t *)lock, NULL);
@@ -801,6 +868,7 @@ int32_t taosThreadSpinInit(TdThreadSpinlock *lock, int32_t pshared) {
}
int32_t taosThreadSpinLock(TdThreadSpinlock *lock) {
+ OS_PARAM_CHECK(lock);
#ifdef TD_USE_SPINLOCK_AS_MUTEX
return pthread_mutex_lock((pthread_mutex_t *)lock);
#else
@@ -814,6 +882,7 @@ int32_t taosThreadSpinLock(TdThreadSpinlock *lock) {
}
int32_t taosThreadSpinTrylock(TdThreadSpinlock *lock) {
+ OS_PARAM_CHECK(lock);
#ifdef TD_USE_SPINLOCK_AS_MUTEX
return pthread_mutex_trylock((pthread_mutex_t *)lock);
#else
@@ -826,6 +895,7 @@ int32_t taosThreadSpinTrylock(TdThreadSpinlock *lock) {
}
int32_t taosThreadSpinUnlock(TdThreadSpinlock *lock) {
+ OS_PARAM_CHECK(lock);
#ifdef TD_USE_SPINLOCK_AS_MUTEX
return pthread_mutex_unlock((pthread_mutex_t *)lock);
#else
@@ -843,6 +913,7 @@ void taosThreadTestCancel(void) {
}
void taosThreadClear(TdThread *thread) {
+ if (!thread) return;
(void)memset(thread, 0, sizeof(TdThread));
}
diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c
index d4d9936154..60339fc646 100644
--- a/source/os/src/osTime.c
+++ b/source/os/src/osTime.c
@@ -81,6 +81,7 @@ static const char *am_pm[2] = {"AM", "PM"};
#endif
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm) {
+ if (!buf || !fmt || !tm) return NULL;
#ifdef WINDOWS
char c;
const char *bp;
@@ -345,6 +346,9 @@ char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm) {
}
int32_t taosGetTimeOfDay(struct timeval *tv) {
+ if (tv == NULL) {
+ return TSDB_CODE_INVALID_PARA;
+ }
int32_t code = 0;
#ifdef WINDOWS
LARGE_INTEGER t;
@@ -365,12 +369,15 @@ int32_t taosGetTimeOfDay(struct timeval *tv) {
#endif
}
-time_t taosTime(time_t *t) {
+int32_t taosTime(time_t *t) {
+ if (t == NULL) {
+ return TSDB_CODE_INVALID_PARA;
+ }
time_t r = time(t);
if (r == (time_t)-1) {
- terrno = TAOS_SYSTEM_ERROR(errno);
+ return TAOS_SYSTEM_ERROR(errno);
}
- return r;
+ return 0;
}
/*
diff --git a/source/os/test/osSemaphoreTests.cpp b/source/os/test/osSemaphoreTests.cpp
index 1576d2845d..1e412f5149 100644
--- a/source/os/test/osSemaphoreTests.cpp
+++ b/source/os/test/osSemaphoreTests.cpp
@@ -245,3 +245,12 @@ TEST(osSemaphoreTests, Performance4_2) {
(void)tsem2_destroy(&sem);
}
}
+
+TEST(osSemaphoreTests, GetPID) {
+#ifdef LINUX
+ pid_t pid = 0;
+ int32_t ret = taosGetPIdByName("osSemaphoreTest", &pid);
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(pid, taosGetPId());
+#endif
+}
diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c
index 99d91700a2..68bbbb7e99 100644
--- a/source/util/src/tanalytics.c
+++ b/source/util/src/tanalytics.c
@@ -34,7 +34,7 @@ typedef struct {
} SCurlResp;
static SAlgoMgmt tsAlgos = {0};
-static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContLen);
+static int32_t taosAnalBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *pContLen);
const char *taosAnalAlgoStr(EAnalAlgoType type) {
switch (type) {
@@ -127,28 +127,44 @@ void taosAnalUpdate(int64_t newVer, SHashObj *pHash) {
}
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) {
- char buf[TSDB_ANAL_ALGO_OPTION_LEN] = {0};
- int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName);
+ char buf[TSDB_ANALYTIC_ALGO_OPTION_LEN] = {0};
+ char *pStart = NULL;
+ char *pEnd = NULL;
- char *pos1 = strstr(option, buf);
- char *pos2 = strstr(option, ANAL_ALGO_SPLIT);
- if (pos1 != NULL) {
- if (optMaxLen > 0) {
- int32_t copyLen = optMaxLen;
- if (pos2 != NULL) {
- copyLen = (int32_t)(pos2 - pos1 - strlen(optName));
- copyLen = MIN(copyLen, optMaxLen);
- }
- tstrncpy(optValue, pos1 + bufLen, copyLen);
- }
- return true;
- } else {
+ pStart = strstr(option, optName);
+ if (pStart == NULL) {
return false;
}
+
+ pEnd = strstr(pStart, ANAL_ALGO_SPLIT);
+ if (optMaxLen > 0) {
+ if (pEnd > pStart) {
+ int32_t len = (int32_t)(pEnd - pStart);
+ len = MIN(len + 1, TSDB_ANALYTIC_ALGO_OPTION_LEN);
+ tstrncpy(buf, pStart, len);
+ } else {
+ int32_t len = MIN(tListLen(buf), strlen(pStart) + 1);
+ tstrncpy(buf, pStart, len);
+ }
+
+ char *pRight = strstr(buf, "=");
+ if (pRight == NULL) {
+ return false;
+ } else {
+ pRight += 1;
+ }
+
+ int32_t unused = strtrim(pRight);
+
+ int32_t vLen = MIN(optMaxLen, strlen(pRight) + 1);
+ tstrncpy(optValue, pRight, vLen);
+ }
+
+ return true;
}
bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValue) {
- char buf[TSDB_ANAL_ALGO_OPTION_LEN] = {0};
+ char buf[TSDB_ANALYTIC_ALGO_OPTION_LEN] = {0};
int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName);
char *pos1 = strstr(option, buf);
@@ -163,7 +179,7 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValu
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) {
int32_t code = 0;
- char name[TSDB_ANAL_ALGO_KEY_LEN] = {0};
+ char name[TSDB_ANALYTIC_ALGO_KEY_LEN] = {0};
int32_t nameLen = 1 + tsnprintf(name, sizeof(name) - 1, "%d:%s", type, algoName);
char *unused = strntolower(name, name, nameLen);
@@ -175,7 +191,7 @@ int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url,
uDebug("algo:%s, type:%s, url:%s", algoName, taosAnalAlgoStr(type), url);
} else {
url[0] = 0;
- terrno = TSDB_CODE_ANAL_ALGO_NOT_FOUND;
+ terrno = TSDB_CODE_ANA_ALGO_NOT_FOUND;
code = terrno;
uError("algo:%s, type:%s, url not found", algoName, taosAnalAlgoStr(type));
}
@@ -276,16 +292,16 @@ _OVER:
return code;
}
-SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) {
+SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) {
int32_t code = -1;
char *pCont = NULL;
int64_t contentLen;
SJson *pJson = NULL;
SCurlResp curlRsp = {0};
- if (type == ANAL_HTTP_TYPE_GET) {
+ if (type == ANALYTICS_HTTP_TYPE_GET) {
if (taosCurlGetRequest(url, &curlRsp) != 0) {
- terrno = TSDB_CODE_ANAL_URL_CANT_ACCESS;
+ terrno = TSDB_CODE_ANA_URL_CANT_ACCESS;
goto _OVER;
}
} else {
@@ -295,20 +311,20 @@ SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBu
goto _OVER;
}
if (taosCurlPostRequest(url, &curlRsp, pCont, contentLen) != 0) {
- terrno = TSDB_CODE_ANAL_URL_CANT_ACCESS;
+ terrno = TSDB_CODE_ANA_URL_CANT_ACCESS;
goto _OVER;
}
}
if (curlRsp.data == NULL || curlRsp.dataLen == 0) {
- terrno = TSDB_CODE_ANAL_URL_RSP_IS_NULL;
+ terrno = TSDB_CODE_ANA_URL_RSP_IS_NULL;
goto _OVER;
}
pJson = tjsonParse(curlRsp.data);
if (pJson == NULL) {
if (curlRsp.data[0] == '<') {
- terrno = TSDB_CODE_ANAL_ANODE_RETURN_ERROR;
+ terrno = TSDB_CODE_ANA_ANODE_RETURN_ERROR;
} else {
terrno = TSDB_CODE_INVALID_JSON_FORMAT;
}
@@ -360,7 +376,7 @@ _OVER:
return code;
}
-static int32_t taosAnalJsonBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) {
+static int32_t taosAnalJsonBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) {
char buf[64] = {0};
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %" PRId64 ",\n", optName, optVal);
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
@@ -369,7 +385,7 @@ static int32_t taosAnalJsonBufWriteOptInt(SAnalBuf *pBuf, const char *optName, i
return 0;
}
-static int32_t taosAnalJsonBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) {
+static int32_t taosAnalJsonBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) {
char buf[128] = {0};
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": \"%s\",\n", optName, optVal);
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
@@ -378,7 +394,7 @@ static int32_t taosAnalJsonBufWriteOptStr(SAnalBuf *pBuf, const char *optName, c
return 0;
}
-static int32_t taosAnalJsonBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) {
+static int32_t taosAnalJsonBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) {
char buf[128] = {0};
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %f,\n", optName, optVal);
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
@@ -387,7 +403,7 @@ static int32_t taosAnalJsonBufWriteOptFloat(SAnalBuf *pBuf, const char *optName,
return 0;
}
-static int32_t taosAnalJsonBufWriteStr(SAnalBuf *pBuf, const char *buf, int32_t bufLen) {
+static int32_t taosAnalJsonBufWriteStr(SAnalyticBuf *pBuf, const char *buf, int32_t bufLen) {
if (bufLen <= 0) {
bufLen = strlen(buf);
}
@@ -397,9 +413,9 @@ static int32_t taosAnalJsonBufWriteStr(SAnalBuf *pBuf, const char *buf, int32_t
return 0;
}
-static int32_t taosAnalJsonBufWriteStart(SAnalBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "{\n", 0); }
+static int32_t taosAnalJsonBufWriteStart(SAnalyticBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "{\n", 0); }
-static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
+static int32_t tsosAnalJsonBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) {
pBuf->filePtr = taosOpenFile(pBuf->fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH);
if (pBuf->filePtr == NULL) {
return terrno;
@@ -409,7 +425,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
if (pBuf->pCols == NULL) return TSDB_CODE_OUT_OF_MEMORY;
pBuf->numOfCols = numOfCols;
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) {
return taosAnalJsonBufWriteStart(pBuf);
}
@@ -426,7 +442,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
return taosAnalJsonBufWriteStart(pBuf);
}
-static int32_t taosAnalJsonBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
+static int32_t taosAnalJsonBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
char buf[128] = {0};
bool first = (colIndex == 0);
bool last = (colIndex == pBuf->numOfCols - 1);
@@ -452,16 +468,16 @@ static int32_t taosAnalJsonBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int
return 0;
}
-static int32_t taosAnalJsonBufWriteDataBegin(SAnalBuf *pBuf) {
+static int32_t taosAnalJsonBufWriteDataBegin(SAnalyticBuf *pBuf) {
return taosAnalJsonBufWriteStr(pBuf, "\"data\": [\n", 0);
}
-static int32_t taosAnalJsonBufWriteStrUseCol(SAnalBuf *pBuf, const char *buf, int32_t bufLen, int32_t colIndex) {
+static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf, int32_t bufLen, int32_t colIndex) {
if (bufLen <= 0) {
bufLen = strlen(buf);
}
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) {
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
return terrno;
}
@@ -474,11 +490,11 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalBuf *pBuf, const char *buf, in
return 0;
}
-static int32_t taosAnalJsonBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) {
+static int32_t taosAnalJsonBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) {
return taosAnalJsonBufWriteStrUseCol(pBuf, "[\n", 0, colIndex);
}
-static int32_t taosAnalJsonBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) {
+static int32_t taosAnalJsonBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) {
if (colIndex == pBuf->numOfCols - 1) {
return taosAnalJsonBufWriteStrUseCol(pBuf, "\n]\n", 0, colIndex);
@@ -487,7 +503,7 @@ static int32_t taosAnalJsonBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) {
}
}
-static int32_t taosAnalJsonBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
+static int32_t taosAnalJsonBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
char buf[64];
int32_t bufLen = 0;
@@ -541,12 +557,12 @@ static int32_t taosAnalJsonBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int
return taosAnalJsonBufWriteStrUseCol(pBuf, buf, bufLen, colIndex);
}
-static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) {
+static int32_t taosAnalJsonBufWriteDataEnd(SAnalyticBuf *pBuf) {
int32_t code = 0;
char *pCont = NULL;
int64_t contLen = 0;
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
@@ -570,14 +586,14 @@ static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) {
return taosAnalJsonBufWriteStr(pBuf, "],\n", 0);
}
-static int32_t taosAnalJsonBufWriteEnd(SAnalBuf *pBuf) {
+static int32_t taosAnalJsonBufWriteEnd(SAnalyticBuf *pBuf) {
int32_t code = taosAnalJsonBufWriteOptInt(pBuf, "rows", pBuf->pCols[0].numOfRows);
if (code != 0) return code;
return taosAnalJsonBufWriteStr(pBuf, "\"protocol\": 1.0\n}", 0);
}
-int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) {
+int32_t taosAnalJsonBufClose(SAnalyticBuf *pBuf) {
int32_t code = taosAnalJsonBufWriteEnd(pBuf);
if (code != 0) return code;
@@ -588,7 +604,7 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) {
if (code != 0) return code;
}
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
if (pCol->filePtr != NULL) {
@@ -603,14 +619,14 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) {
return 0;
}
-void taosAnalBufDestroy(SAnalBuf *pBuf) {
+void taosAnalBufDestroy(SAnalyticBuf *pBuf) {
if (pBuf->fileName[0] != 0) {
if (pBuf->filePtr != NULL) (void)taosCloseFile(&pBuf->filePtr);
// taosRemoveFile(pBuf->fileName);
pBuf->fileName[0] = 0;
}
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
if (pCol->fileName[0] != 0) {
@@ -627,102 +643,102 @@ void taosAnalBufDestroy(SAnalBuf *pBuf) {
pBuf->numOfCols = 0;
}
-int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return tsosAnalJsonBufOpen(pBuf, numOfCols);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteOptStr(pBuf, optName, optVal);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteOptInt(pBuf, optName, optVal);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteOptFloat(pBuf, optName, optVal);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteColMeta(pBuf, colIndex, colType, colName);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteDataBegin(pBuf);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteColBegin(pBuf, colIndex);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteColData(pBuf, colIndex, colType, colValue);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteColEnd(pBuf, colIndex);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufWriteDataEnd(pBuf);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-int32_t taosAnalBufClose(SAnalBuf *pBuf) {
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+int32_t taosAnalBufClose(SAnalyticBuf *pBuf) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufClose(pBuf);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
-static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContLen) {
+static int32_t taosAnalBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *pContLen) {
*ppCont = NULL;
*pContLen = 0;
- if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
+ if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
return taosAnalJsonBufGetCont(pBuf->fileName, ppCont, pContLen);
} else {
- return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
+ return TSDB_CODE_ANA_BUF_INVALID_TYPE;
}
}
@@ -730,7 +746,7 @@ static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContL
int32_t taosAnalyticsInit() { return 0; }
void taosAnalyticsCleanup() {}
-SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) { return NULL; }
+SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) { return NULL; }
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; }
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { return true; }
@@ -738,18 +754,18 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optV
int64_t taosAnalGetVersion() { return 0; }
void taosAnalUpdate(int64_t newVer, SHashObj *pHash) {}
-int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { return 0; }
-int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) { return 0; }
-int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) { return 0; }
-int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) { return 0; }
-int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { return 0; }
-int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf) { return 0; }
-int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) { return 0; }
-int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { return 0; }
-int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) { return 0; }
-int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf) { return 0; }
-int32_t taosAnalBufClose(SAnalBuf *pBuf) { return 0; }
-void taosAnalBufDestroy(SAnalBuf *pBuf) {}
+int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { return 0; }
+int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) { return 0; }
+int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) { return 0; }
+int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) { return 0; }
+int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { return 0; }
+int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf) { return 0; }
+int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) { return 0; }
+int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { return 0; }
+int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) { return 0; }
+int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf) { return 0; }
+int32_t taosAnalBufClose(SAnalyticBuf *pBuf) { return 0; }
+void taosAnalBufDestroy(SAnalyticBuf *pBuf) {}
const char *taosAnalAlgoStr(EAnalAlgoType algoType) { return 0; }
EAnalAlgoType taosAnalAlgoInt(const char *algoName) { return 0; }
diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c
index d6852b0566..d2a2b1fb9a 100644
--- a/source/util/src/tconfig.c
+++ b/source/util/src/tconfig.c
@@ -25,7 +25,7 @@
#include "tunit.h"
#include "tutil.h"
-#define CFG_NAME_PRINT_LEN 24
+#define CFG_NAME_PRINT_LEN 32
#define CFG_SRC_PRINT_LEN 12
struct SConfig {
@@ -881,11 +881,26 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) {
case CFG_DTYPE_CHARSET:
case CFG_DTYPE_TIMEZONE:
case CFG_DTYPE_NONE:
+ if (strcasecmp(pItem->name, "dataDir") == 0) {
+ size_t sz = taosArrayGetSize(pItem->array);
+ if (sz > 1) {
+ for (size_t j = 0; j < sz; ++j) {
+ SDiskCfg *pCfg = taosArrayGet(pItem->array, j);
+ if (dump) {
+ (void)printf("%s %s %s l:%d p:%d d:%"PRIi8"\n", src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable);
+ } else {
+ uInfo("%s %s %s l:%d p:%d d:%"PRIi8, src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable);
+ }
+ }
+ break;
+ }
+ }
if (dump) {
(void)printf("%s %s %s\n", src, name, pItem->str);
} else {
uInfo("%s %s %s", src, name, pItem->str);
}
+
break;
}
}
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 0d8a85155a..9e8a85d301 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -361,13 +361,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO, "Anode too many algori
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_LONG_ALGO_NAME, "Anode too long algorithm name")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE, "Anode too many algorithm type")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_RSP_IS_NULL, "Analysis service response is NULL")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_CANT_ACCESS, "Analysis service can't access")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_FOUND, "Analysis algorithm not found")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_LOAD, "Analysis algorithm not loaded")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_BUF_INVALID_TYPE, "Analysis invalid buffer type")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ANODE_RETURN_ERROR, "Analysis failed since anode return error")
-TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_URL_RSP_IS_NULL, "Analysis service response is NULL")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_URL_CANT_ACCESS, "Analysis service can't access")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ALGO_NOT_FOUND, "Analysis algorithm is missing")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ALGO_NOT_LOAD, "Analysis algorithm not loaded")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_BUF_INVALID_TYPE, "Analysis invalid buffer type")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_RETURN_ERROR, "Analysis failed since anode return error")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode")
+TAOS_DEFINE_ERROR(TSDB_CODE_ANA_WN_DATA, "white-noise data not processed")
// mnode-sma
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
@@ -853,7 +854,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_IVLD_STATUS, "Invalid task status
TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_CONFLICT_EVENT, "Stream conflict event")
TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INTERNAL_ERROR, "Stream internal error")
TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_NOT_LEADER, "Stream task not on leader vnode")
-TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INPUTQ_FULL, "Task input queue is full")
+TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INPUTQ_FULL, "Task input queue is full")
+TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INVLD_CHKPT, "Invalid checkpoint trigger msg")
// TDLite
TAOS_DEFINE_ERROR(TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS, "Invalid TDLite open flags")
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 76d0139521..f70b145dbc 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -154,16 +154,26 @@ static int32_t taosStartLog() {
return 0;
}
-static void getDay(char *buf, int32_t bufSize) {
- time_t t = taosTime(NULL);
+static int32_t getDay(char *buf, int32_t bufSize) {
+ time_t t;
+ int32_t code = taosTime(&t);
+ if(code != 0) {
+ return code;
+ }
struct tm tmInfo;
if (taosLocalTime(&t, &tmInfo, buf, bufSize) != NULL) {
TAOS_UNUSED(strftime(buf, bufSize, "%Y-%m-%d", &tmInfo));
}
+ return 0;
}
static int64_t getTimestampToday() {
- time_t t = taosTime(NULL);
+ time_t t;
+ int32_t code = taosTime(&t);
+ if (code != 0) {
+ uError("failed to get time, reason:%s", tstrerror(code));
+ return 0;
+ }
struct tm tm;
if (taosLocalTime(&t, &tm, NULL, 0) == NULL) {
return 0;
@@ -203,7 +213,11 @@ int32_t taosInitSlowLog() {
char name[PATH_MAX + TD_TIME_STR_LEN] = {0};
char day[TD_TIME_STR_LEN] = {0};
- getDay(day, sizeof(day));
+ int32_t code = getDay(day, sizeof(day));
+ if (code != 0) {
+ (void)printf("failed to get day, reason:%s\n", tstrerror(code));
+ return code;
+ }
(void)snprintf(name, PATH_MAX + TD_TIME_STR_LEN, "%s.%s", tsLogObj.slowLogName, day);
tsLogObj.timestampToday = getTimestampToday();
@@ -434,7 +448,12 @@ static void taosOpenNewSlowLogFile() {
atomic_store_32(&tsLogObj.slowHandle->lock, 0);
char day[TD_TIME_STR_LEN] = {0};
- getDay(day, sizeof(day));
+ int32_t code = getDay(day, sizeof(day));
+ if (code != 0) {
+ uError("failed to get day, reason:%s", tstrerror(code));
+ (void)taosThreadMutexUnlock(&tsLogObj.logMutex);
+ return;
+ }
TdFilePtr pFile = NULL;
char name[PATH_MAX + TD_TIME_STR_LEN] = {0};
(void)snprintf(name, PATH_MAX + TD_TIME_STR_LEN, "%s.%s", tsLogObj.slowLogName, day);
diff --git a/tests/army/alter/alterConfig.py b/tests/army/alter/alterConfig.py
index f8c52551e3..6a22dd014f 100644
--- a/tests/army/alter/alterConfig.py
+++ b/tests/army/alter/alterConfig.py
@@ -100,6 +100,79 @@ class TDTestCase(TBase):
tdSql.query('show dnodes')
tdSql.checkData(0, 3, "64")
+ def checkKeyValue(self, res, key, value, ikey = 0, ival = 1):
+ result = False
+ for row in res:
+ if row[ikey] == key:
+ if row[ival] != value:
+ raise Exception(f"key:{key} value:{row[ival]} != {value}")
+ else:
+ tdLog.info(f"key:{key} value:{row[ival]} == {value}")
+ result = True
+ break
+ if not result:
+ raise Exception(f"key:{key} not found")
+
+ def checkRows(self, sql, nExpect, nRetry):
+ for i in range(nRetry):
+ res = tdSql.getResult(sql)
+ if len(res) == nExpect:
+ break
+ time.sleep(1)
+ if len(res) != nExpect:
+ raise Exception(f"rows:{len(res)} != {nExpect}")
+
+ def alterBypassFlag(self):
+ """Add test case for altering bypassFlag(TD-32907)
+ """
+ tdSql.execute(f"drop database if exists db")
+ tdSql.execute(f"create database db")
+ tdSql.execute("use db")
+ self.checkKeyValue(tdSql.getResult("show local variables;"), "bypassFlag", "0")
+ self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2)
+ tdSql.execute("alter local 'bypassFlag 1'")
+ self.checkKeyValue(tdSql.getResult("show local variables;"), "bypassFlag", "1")
+ self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2)
+ tdSql.execute("create table stb0(ts timestamp, c0 int) tags(t0 int)")
+ tdSql.execute("create table ctb0 using stb0 tags(0)")
+ tdSql.execute("insert into ctb0 values(now, 1)")
+ tdSql.query("select * from stb0")
+ tdSql.checkRows(0)
+ tdSql.execute("alter local 'bypassFlag 0'")
+ tdSql.execute("alter all dnodes 'bypassFlag 2'")
+ self.checkKeyValue(tdSql.getResult("show local variables"), "bypassFlag", "0")
+ self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "2", 1, 2)
+ tdSql.execute("insert into ctb0 values(now, 2)")
+ tdSql.query("select * from stb0")
+ tdSql.checkRows(0)
+ tdSql.execute("alter all dnodes 'bypassFlag 4'")
+ self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "4", 1, 2)
+ tdSql.execute("insert into ctb0 values(now, 4)")
+ tdSql.execute("insert into ctb1 using stb0 tags(1) values(now, 10)")
+ tdSql.query("select * from stb0")
+ tdSql.checkRows(0)
+ tdSql.query("show db.tables")
+ tdSql.checkRows(2)
+ tdSql.execute("alter all dnodes 'bypassFlag 8'")
+ self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "8", 1, 2)
+ tdSql.execute("insert into ctb0 values(now, 8)")
+ tdSql.execute("insert into ctb1 values(now, 18)")
+ tdSql.query("select * from stb0")
+ tdSql.checkRows(2)
+ tdSql.execute("flush database db")
+ self.checkRows("select * from stb0", 0, 10)
+ tdSql.execute("alter all dnodes 'bypassFlag 0'")
+ self.checkKeyValue(tdSql.getResult("show local variables"), "bypassFlag", "0")
+ self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2)
+ tdSql.execute("insert into ctb0 values(now, 80)")
+ tdSql.execute("insert into ctb1 values(now, 180)")
+ tdSql.query("select * from stb0")
+ tdSql.checkRows(2)
+ tdSql.execute("flush database db")
+ for i in range(5):
+ self.checkRows("select * from stb0", 2, 1)
+ time.sleep(1)
+
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
@@ -110,6 +183,8 @@ class TDTestCase(TBase):
self.alterTtlConfig()
# TS-5390
self.alterCachemodel()
+ # TD-32907
+ self.alterBypassFlag()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/army/output.txt b/tests/army/output.txt
new file mode 100644
index 0000000000..ed3bd5da1a
--- /dev/null
+++ b/tests/army/output.txt
@@ -0,0 +1,91 @@
+[10/28 19:12:21.666563] SUCC: created database (db_sub)
+[10/28 19:12:21.694603] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 19:12:21.823202] SUCC: Spent 0.1290 seconds to create 1000 table(s) with 8 thread(s) speed: 7752 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 19:12:22.127442] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 441047.79 records/second
+[10/28 19:12:22.128649] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 440895.33 records/second
+[10/28 19:12:22.129478] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 440151.69 records/second
+[10/28 19:12:22.133756] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 433268.05 records/second
+[10/28 19:12:22.135211] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 430329.63 records/second
+[10/28 19:12:22.137335] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 425800.08 records/second
+[10/28 19:12:22.138252] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 426330.15 records/second
+[10/28 19:12:22.141351] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 422778.64 records/second
+[10/28 19:12:22.141585] SUCC: Spent 0.311648 (real 0.289041) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3208748.33 (real 3459716.79) records/second
+[10/28 19:12:22.141590] SUCC: insert delay, min: 0.9600ms, avg: 2.3123ms, p90: 3.1790ms, p95: 3.5080ms, p99: 4.2230ms, max: 4.9040ms
+[10/28 19:28:50.798427] SUCC: created database (db_sub)
+[10/28 19:28:50.828326] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 19:28:50.936429] SUCC: Spent 0.1080 seconds to create 1000 table(s) with 8 thread(s) speed: 9259 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 19:28:51.187235] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 539204.48 records/second
+[10/28 19:28:51.189941] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 532329.43 records/second
+[10/28 19:28:51.191551] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 530954.66 records/second
+[10/28 19:28:51.191858] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 529259.59 records/second
+[10/28 19:28:51.192459] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 530229.44 records/second
+[10/28 19:28:51.195372] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 522099.42 records/second
+[10/28 19:28:51.197727] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 516620.72 records/second
+[10/28 19:28:51.197883] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 517125.12 records/second
+[10/28 19:28:51.198123] SUCC: Spent 0.255536 (real 0.237135) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3913342.93 (real 4217007.19) records/second
+[10/28 19:28:51.198130] SUCC: insert delay, min: 0.9200ms, avg: 1.8971ms, p90: 2.6870ms, p95: 2.9520ms, p99: 3.5880ms, max: 4.0710ms
+[10/28 19:31:44.377691] SUCC: created database (db_sub)
+[10/28 19:31:44.392998] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 19:31:44.696768] SUCC: Spent 0.3040 seconds to create 1000 table(s) with 8 thread(s) speed: 3289 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 19:31:45.126910] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 304775.47 records/second
+[10/28 19:31:45.131979] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 301117.75 records/second
+[10/28 19:31:45.135106] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 299854.39 records/second
+[10/28 19:31:45.135675] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 298322.24 records/second
+[10/28 19:31:45.137069] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 297733.89 records/second
+[10/28 19:31:45.137952] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 296900.13 records/second
+[10/28 19:31:45.138834] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 295170.54 records/second
+[10/28 19:31:45.145048] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 291966.71 records/second
+[10/28 19:31:45.145369] SUCC: Spent 0.442506 (real 0.419200) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 2259856.36 (real 2385496.18) records/second
+[10/28 19:31:45.145377] SUCC: insert delay, min: 1.0400ms, avg: 3.3536ms, p90: 5.3120ms, p95: 7.9660ms, p99: 13.1570ms, max: 19.1410ms
+[10/28 19:44:19.873056] SUCC: created database (db_sub)
+[10/28 19:44:19.904701] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 19:44:20.053846] SUCC: Spent 0.1490 seconds to create 1000 table(s) with 8 thread(s) speed: 6711 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 19:44:20.328698] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 485742.49 records/second
+[10/28 19:44:20.330777] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 481686.29 records/second
+[10/28 19:44:20.331290] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 480911.65 records/second
+[10/28 19:44:20.331665] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 481043.06 records/second
+[10/28 19:44:20.333451] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 477172.09 records/second
+[10/28 19:44:20.334745] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 475675.84 records/second
+[10/28 19:44:20.335056] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 474158.37 records/second
+[10/28 19:44:20.337919] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 470816.89 records/second
+[10/28 19:44:20.338144] SUCC: Spent 0.277921 (real 0.261310) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3598144.80 (real 3826872.30) records/second
+[10/28 19:44:20.338153] SUCC: insert delay, min: 0.9180ms, avg: 2.0905ms, p90: 2.6490ms, p95: 3.0620ms, p99: 4.1480ms, max: 4.7840ms
+[10/28 19:58:27.100989] SUCC: created database (db_sub)
+[10/28 19:58:27.115572] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 19:58:27.362948] SUCC: Spent 0.2470 seconds to create 1000 table(s) with 8 thread(s) speed: 4049 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 19:58:27.807669] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 291891.03 records/second
+[10/28 19:58:27.818785] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 285413.54 records/second
+[10/28 19:58:27.819649] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 284193.61 records/second
+[10/28 19:58:27.819844] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 284352.64 records/second
+[10/28 19:58:27.820170] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 284576.63 records/second
+[10/28 19:58:27.821489] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 283781.33 records/second
+[10/28 19:58:27.822061] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 283112.24 records/second
+[10/28 19:58:27.823513] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 282730.59 records/second
+[10/28 19:58:27.823779] SUCC: Spent 0.455783 (real 0.438625) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 2194026.54 (real 2279851.81) records/second
+[10/28 19:58:27.823786] SUCC: insert delay, min: 0.9780ms, avg: 3.5090ms, p90: 5.5650ms, p95: 6.8600ms, p99: 10.6010ms, max: 13.4400ms
+[10/28 20:00:06.417182] SUCC: created database (db_sub)
+[10/28 20:00:06.448202] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 20:00:06.596961] SUCC: Spent 0.1480 seconds to create 1000 table(s) with 8 thread(s) speed: 6757 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 20:00:06.895455] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 443978.76 records/second
+[10/28 20:00:06.896986] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 442549.94 records/second
+[10/28 20:00:06.897536] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 440927.99 records/second
+[10/28 20:00:06.898905] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 439131.15 records/second
+[10/28 20:00:06.899024] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 439628.46 records/second
+[10/28 20:00:06.901861] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 435197.37 records/second
+[10/28 20:00:06.902305] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 434812.86 records/second
+[10/28 20:00:06.904698] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 433406.26 records/second
+[10/28 20:00:06.904905] SUCC: Spent 0.301788 (real 0.284949) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3313584.37 (real 3509399.93) records/second
+[10/28 20:00:06.904912] SUCC: insert delay, min: 0.8770ms, avg: 2.2796ms, p90: 3.1340ms, p95: 3.6480ms, p99: 4.8280ms, max: 6.0880ms
+[10/28 20:05:34.756207] SUCC: created database (db_sub)
+[10/28 20:05:34.784793] INFO: start creating 1000 table(s) with 8 thread(s)
+[10/28 20:05:34.927068] SUCC: Spent 0.1430 seconds to create 1000 table(s) with 8 thread(s) speed: 6993 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
+[10/28 20:05:35.213741] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 466952.82 records/second
+[10/28 20:05:35.215403] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 463804.68 records/second
+[10/28 20:05:35.221132] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 453322.31 records/second
+[10/28 20:05:35.221224] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 453671.11 records/second
+[10/28 20:05:35.222003] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 452641.07 records/second
+[10/28 20:05:35.222536] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 451796.89 records/second
+[10/28 20:05:35.223663] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 449643.52 records/second
+[10/28 20:05:35.225246] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 447768.68 records/second
+[10/28 20:05:35.225659] SUCC: Spent 0.290871 (real 0.274808) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3437950.16 (real 3638904.25) records/second
+[10/28 20:05:35.225666] SUCC: insert delay, min: 0.9360ms, avg: 2.1985ms, p90: 2.9290ms, p95: 3.4580ms, p99: 4.6030ms, max: 6.2660ms
diff --git a/tests/army/query/function/ans/interp.csv b/tests/army/query/function/ans/interp.csv
index e1ba236aa1..3eaccd887a 100644
--- a/tests/army/query/function/ans/interp.csv
+++ b/tests/army/query/function/ans/interp.csv
@@ -366,3 +366,652 @@ taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _i
2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | 1 |
2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | 1 |
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(null);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(value, 1);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(prev);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(next);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(linear);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | NULL |
+ 2020-01-01 00:00:23.000 | true | NULL |
+ 2020-01-01 00:00:24.000 | true | NULL |
+ 2020-01-01 00:00:25.000 | true | NULL |
+ 2020-01-01 00:00:26.000 | true | NULL |
+ 2020-01-01 00:00:27.000 | true | NULL |
+ 2020-01-01 00:00:28.000 | true | NULL |
+ 2020-01-01 00:00:29.000 | true | NULL |
+ 2020-01-01 00:00:30.000 | true | NULL |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | 1 |
+ 2020-01-01 00:00:23.000 | true | 1 |
+ 2020-01-01 00:00:24.000 | true | 1 |
+ 2020-01-01 00:00:25.000 | true | 1 |
+ 2020-01-01 00:00:26.000 | true | 1 |
+ 2020-01-01 00:00:27.000 | true | 1 |
+ 2020-01-01 00:00:28.000 | true | 1 |
+ 2020-01-01 00:00:29.000 | true | 1 |
+ 2020-01-01 00:00:30.000 | true | 1 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | 21 |
+ 2020-01-01 00:00:23.000 | true | 21 |
+ 2020-01-01 00:00:24.000 | true | 21 |
+ 2020-01-01 00:00:25.000 | true | 21 |
+ 2020-01-01 00:00:26.000 | true | 21 |
+ 2020-01-01 00:00:27.000 | true | 21 |
+ 2020-01-01 00:00:28.000 | true | 21 |
+ 2020-01-01 00:00:29.000 | true | 21 |
+ 2020-01-01 00:00:30.000 | true | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(null);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | NULL |
+ 2020-01-01 00:00:17.000 | true | NULL |
+ 2020-01-01 00:00:18.000 | true | NULL |
+ 2020-01-01 00:00:19.000 | true | NULL |
+ 2020-01-01 00:00:20.000 | true | NULL |
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | NULL |
+ 2020-01-01 00:00:23.000 | true | NULL |
+ 2020-01-01 00:00:24.000 | true | NULL |
+ 2020-01-01 00:00:25.000 | true | NULL |
+ 2020-01-01 00:00:26.000 | true | NULL |
+ 2020-01-01 00:00:27.000 | true | NULL |
+ 2020-01-01 00:00:28.000 | true | NULL |
+ 2020-01-01 00:00:29.000 | true | NULL |
+ 2020-01-01 00:00:30.000 | true | NULL |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 1 |
+ 2020-01-01 00:00:17.000 | true | 1 |
+ 2020-01-01 00:00:18.000 | true | 1 |
+ 2020-01-01 00:00:19.000 | true | 1 |
+ 2020-01-01 00:00:20.000 | true | 1 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | 1 |
+ 2020-01-01 00:00:23.000 | true | 1 |
+ 2020-01-01 00:00:24.000 | true | 1 |
+ 2020-01-01 00:00:25.000 | true | 1 |
+ 2020-01-01 00:00:26.000 | true | 1 |
+ 2020-01-01 00:00:27.000 | true | 1 |
+ 2020-01-01 00:00:28.000 | true | 1 |
+ 2020-01-01 00:00:29.000 | true | 1 |
+ 2020-01-01 00:00:30.000 | true | 1 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(prev);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 15 |
+ 2020-01-01 00:00:17.000 | true | 15 |
+ 2020-01-01 00:00:18.000 | true | 15 |
+ 2020-01-01 00:00:19.000 | true | 15 |
+ 2020-01-01 00:00:20.000 | true | 15 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | 21 |
+ 2020-01-01 00:00:23.000 | true | 21 |
+ 2020-01-01 00:00:24.000 | true | 21 |
+ 2020-01-01 00:00:25.000 | true | 21 |
+ 2020-01-01 00:00:26.000 | true | 21 |
+ 2020-01-01 00:00:27.000 | true | 21 |
+ 2020-01-01 00:00:28.000 | true | 21 |
+ 2020-01-01 00:00:29.000 | true | 21 |
+ 2020-01-01 00:00:30.000 | true | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(next);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 21 |
+ 2020-01-01 00:00:17.000 | true | 21 |
+ 2020-01-01 00:00:18.000 | true | 21 |
+ 2020-01-01 00:00:19.000 | true | 21 |
+ 2020-01-01 00:00:20.000 | true | 21 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(linear);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 16 |
+ 2020-01-01 00:00:17.000 | true | 17 |
+ 2020-01-01 00:00:18.000 | true | 18 |
+ 2020-01-01 00:00:19.000 | true | 19 |
+ 2020-01-01 00:00:20.000 | true | 20 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | NULL |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | NULL |
+ 2020-01-01 00:00:05.000 | true | NULL |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | NULL |
+ 2020-01-01 00:00:08.000 | true | NULL |
+ 2020-01-01 00:00:09.000 | true | NULL |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | NULL |
+ 2020-01-01 00:00:12.000 | true | NULL |
+ 2020-01-01 00:00:13.000 | true | NULL |
+ 2020-01-01 00:00:14.000 | true | NULL |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | NULL |
+ 2020-01-01 00:00:17.000 | true | NULL |
+ 2020-01-01 00:00:18.000 | true | NULL |
+ 2020-01-01 00:00:19.000 | true | NULL |
+ 2020-01-01 00:00:20.000 | true | NULL |
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | NULL |
+ 2020-01-01 00:00:23.000 | true | NULL |
+ 2020-01-01 00:00:24.000 | true | NULL |
+ 2020-01-01 00:00:25.000 | true | NULL |
+ 2020-01-01 00:00:26.000 | true | NULL |
+ 2020-01-01 00:00:27.000 | true | NULL |
+ 2020-01-01 00:00:28.000 | true | NULL |
+ 2020-01-01 00:00:29.000 | true | NULL |
+ 2020-01-01 00:00:30.000 | true | NULL |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 1 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 1 |
+ 2020-01-01 00:00:05.000 | true | 1 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 1 |
+ 2020-01-01 00:00:08.000 | true | 1 |
+ 2020-01-01 00:00:09.000 | true | 1 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 1 |
+ 2020-01-01 00:00:12.000 | true | 1 |
+ 2020-01-01 00:00:13.000 | true | 1 |
+ 2020-01-01 00:00:14.000 | true | 1 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 1 |
+ 2020-01-01 00:00:17.000 | true | 1 |
+ 2020-01-01 00:00:18.000 | true | 1 |
+ 2020-01-01 00:00:19.000 | true | 1 |
+ 2020-01-01 00:00:20.000 | true | 1 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | 1 |
+ 2020-01-01 00:00:23.000 | true | 1 |
+ 2020-01-01 00:00:24.000 | true | 1 |
+ 2020-01-01 00:00:25.000 | true | 1 |
+ 2020-01-01 00:00:26.000 | true | 1 |
+ 2020-01-01 00:00:27.000 | true | 1 |
+ 2020-01-01 00:00:28.000 | true | 1 |
+ 2020-01-01 00:00:29.000 | true | 1 |
+ 2020-01-01 00:00:30.000 | true | 1 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 1 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 3 |
+ 2020-01-01 00:00:05.000 | true | 3 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 6 |
+ 2020-01-01 00:00:08.000 | true | 6 |
+ 2020-01-01 00:00:09.000 | true | 6 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 10 |
+ 2020-01-01 00:00:12.000 | true | 10 |
+ 2020-01-01 00:00:13.000 | true | 10 |
+ 2020-01-01 00:00:14.000 | true | 10 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 15 |
+ 2020-01-01 00:00:17.000 | true | 15 |
+ 2020-01-01 00:00:18.000 | true | 15 |
+ 2020-01-01 00:00:19.000 | true | 15 |
+ 2020-01-01 00:00:20.000 | true | 15 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+ 2020-01-01 00:00:22.000 | true | 21 |
+ 2020-01-01 00:00:23.000 | true | 21 |
+ 2020-01-01 00:00:24.000 | true | 21 |
+ 2020-01-01 00:00:25.000 | true | 21 |
+ 2020-01-01 00:00:26.000 | true | 21 |
+ 2020-01-01 00:00:27.000 | true | 21 |
+ 2020-01-01 00:00:28.000 | true | 21 |
+ 2020-01-01 00:00:29.000 | true | 21 |
+ 2020-01-01 00:00:30.000 | true | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 3 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 6 |
+ 2020-01-01 00:00:05.000 | true | 6 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 10 |
+ 2020-01-01 00:00:08.000 | true | 10 |
+ 2020-01-01 00:00:09.000 | true | 10 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 15 |
+ 2020-01-01 00:00:12.000 | true | 15 |
+ 2020-01-01 00:00:13.000 | true | 15 |
+ 2020-01-01 00:00:14.000 | true | 15 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 21 |
+ 2020-01-01 00:00:17.000 | true | 21 |
+ 2020-01-01 00:00:18.000 | true | 21 |
+ 2020-01-01 00:00:19.000 | true | 21 |
+ 2020-01-01 00:00:20.000 | true | 21 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 2 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 4 |
+ 2020-01-01 00:00:05.000 | true | 5 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 7 |
+ 2020-01-01 00:00:08.000 | true | 8 |
+ 2020-01-01 00:00:09.000 | true | 9 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 11 |
+ 2020-01-01 00:00:12.000 | true | 12 |
+ 2020-01-01 00:00:13.000 | true | 13 |
+ 2020-01-01 00:00:14.000 | true | 14 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 16 |
+ 2020-01-01 00:00:17.000 | true | 17 |
+ 2020-01-01 00:00:18.000 | true | 18 |
+ 2020-01-01 00:00:19.000 | true | 19 |
+ 2020-01-01 00:00:20.000 | true | 20 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | NULL |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | NULL |
+ 2020-01-01 00:00:05.000 | true | NULL |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | NULL |
+ 2020-01-01 00:00:08.000 | true | NULL |
+ 2020-01-01 00:00:09.000 | true | NULL |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | NULL |
+ 2020-01-01 00:00:12.000 | true | NULL |
+ 2020-01-01 00:00:13.000 | true | NULL |
+ 2020-01-01 00:00:14.000 | true | NULL |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | NULL |
+ 2020-01-01 00:00:17.000 | true | NULL |
+ 2020-01-01 00:00:18.000 | true | NULL |
+ 2020-01-01 00:00:19.000 | true | NULL |
+ 2020-01-01 00:00:20.000 | true | NULL |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 1 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 1 |
+ 2020-01-01 00:00:05.000 | true | 1 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 1 |
+ 2020-01-01 00:00:08.000 | true | 1 |
+ 2020-01-01 00:00:09.000 | true | 1 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 1 |
+ 2020-01-01 00:00:12.000 | true | 1 |
+ 2020-01-01 00:00:13.000 | true | 1 |
+ 2020-01-01 00:00:14.000 | true | 1 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 1 |
+ 2020-01-01 00:00:17.000 | true | 1 |
+ 2020-01-01 00:00:18.000 | true | 1 |
+ 2020-01-01 00:00:19.000 | true | 1 |
+ 2020-01-01 00:00:20.000 | true | 1 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 1 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 3 |
+ 2020-01-01 00:00:05.000 | true | 3 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 6 |
+ 2020-01-01 00:00:08.000 | true | 6 |
+ 2020-01-01 00:00:09.000 | true | 6 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 10 |
+ 2020-01-01 00:00:12.000 | true | 10 |
+ 2020-01-01 00:00:13.000 | true | 10 |
+ 2020-01-01 00:00:14.000 | true | 10 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 15 |
+ 2020-01-01 00:00:17.000 | true | 15 |
+ 2020-01-01 00:00:18.000 | true | 15 |
+ 2020-01-01 00:00:19.000 | true | 15 |
+ 2020-01-01 00:00:20.000 | true | 15 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 3 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 6 |
+ 2020-01-01 00:00:05.000 | true | 6 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 10 |
+ 2020-01-01 00:00:08.000 | true | 10 |
+ 2020-01-01 00:00:09.000 | true | 10 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 15 |
+ 2020-01-01 00:00:12.000 | true | 15 |
+ 2020-01-01 00:00:13.000 | true | 15 |
+ 2020-01-01 00:00:14.000 | true | 15 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 21 |
+ 2020-01-01 00:00:17.000 | true | 21 |
+ 2020-01-01 00:00:18.000 | true | 21 |
+ 2020-01-01 00:00:19.000 | true | 21 |
+ 2020-01-01 00:00:20.000 | true | 21 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | false | 0 |
+ 2020-01-01 00:00:01.000 | false | 1 |
+ 2020-01-01 00:00:02.000 | true | 2 |
+ 2020-01-01 00:00:03.000 | false | 3 |
+ 2020-01-01 00:00:04.000 | true | 4 |
+ 2020-01-01 00:00:05.000 | true | 5 |
+ 2020-01-01 00:00:06.000 | false | 6 |
+ 2020-01-01 00:00:07.000 | true | 7 |
+ 2020-01-01 00:00:08.000 | true | 8 |
+ 2020-01-01 00:00:09.000 | true | 9 |
+ 2020-01-01 00:00:10.000 | false | 10 |
+ 2020-01-01 00:00:11.000 | true | 11 |
+ 2020-01-01 00:00:12.000 | true | 12 |
+ 2020-01-01 00:00:13.000 | true | 13 |
+ 2020-01-01 00:00:14.000 | true | 14 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 16 |
+ 2020-01-01 00:00:17.000 | true | 17 |
+ 2020-01-01 00:00:18.000 | true | 18 |
+ 2020-01-01 00:00:19.000 | true | 19 |
+ 2020-01-01 00:00:20.000 | true | 20 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | true | NULL |
+ 2020-01-01 00:00:01.000 | true | NULL |
+ 2020-01-01 00:00:02.000 | true | NULL |
+ 2020-01-01 00:00:03.000 | true | NULL |
+ 2020-01-01 00:00:04.000 | true | NULL |
+ 2020-01-01 00:00:05.000 | true | NULL |
+ 2020-01-01 00:00:06.000 | true | NULL |
+ 2020-01-01 00:00:07.000 | true | NULL |
+ 2020-01-01 00:00:08.000 | true | NULL |
+ 2020-01-01 00:00:09.000 | true | NULL |
+ 2020-01-01 00:00:10.000 | true | NULL |
+ 2020-01-01 00:00:11.000 | true | NULL |
+ 2020-01-01 00:00:12.000 | true | NULL |
+ 2020-01-01 00:00:13.000 | true | NULL |
+ 2020-01-01 00:00:14.000 | true | NULL |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | NULL |
+ 2020-01-01 00:00:17.000 | true | NULL |
+ 2020-01-01 00:00:18.000 | true | NULL |
+ 2020-01-01 00:00:19.000 | true | NULL |
+ 2020-01-01 00:00:20.000 | true | NULL |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | true | 1 |
+ 2020-01-01 00:00:01.000 | true | 1 |
+ 2020-01-01 00:00:02.000 | true | 1 |
+ 2020-01-01 00:00:03.000 | true | 1 |
+ 2020-01-01 00:00:04.000 | true | 1 |
+ 2020-01-01 00:00:05.000 | true | 1 |
+ 2020-01-01 00:00:06.000 | true | 1 |
+ 2020-01-01 00:00:07.000 | true | 1 |
+ 2020-01-01 00:00:08.000 | true | 1 |
+ 2020-01-01 00:00:09.000 | true | 1 |
+ 2020-01-01 00:00:10.000 | true | 1 |
+ 2020-01-01 00:00:11.000 | true | 1 |
+ 2020-01-01 00:00:12.000 | true | 1 |
+ 2020-01-01 00:00:13.000 | true | 1 |
+ 2020-01-01 00:00:14.000 | true | 1 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 1 |
+ 2020-01-01 00:00:17.000 | true | 1 |
+ 2020-01-01 00:00:18.000 | true | 1 |
+ 2020-01-01 00:00:19.000 | true | 1 |
+ 2020-01-01 00:00:20.000 | true | 1 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 15 |
+ 2020-01-01 00:00:17.000 | true | 15 |
+ 2020-01-01 00:00:18.000 | true | 15 |
+ 2020-01-01 00:00:19.000 | true | 15 |
+ 2020-01-01 00:00:20.000 | true | 15 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | true | 15 |
+ 2020-01-01 00:00:01.000 | true | 15 |
+ 2020-01-01 00:00:02.000 | true | 15 |
+ 2020-01-01 00:00:03.000 | true | 15 |
+ 2020-01-01 00:00:04.000 | true | 15 |
+ 2020-01-01 00:00:05.000 | true | 15 |
+ 2020-01-01 00:00:06.000 | true | 15 |
+ 2020-01-01 00:00:07.000 | true | 15 |
+ 2020-01-01 00:00:08.000 | true | 15 |
+ 2020-01-01 00:00:09.000 | true | 15 |
+ 2020-01-01 00:00:10.000 | true | 15 |
+ 2020-01-01 00:00:11.000 | true | 15 |
+ 2020-01-01 00:00:12.000 | true | 15 |
+ 2020-01-01 00:00:13.000 | true | 15 |
+ 2020-01-01 00:00:14.000 | true | 15 |
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 21 |
+ 2020-01-01 00:00:17.000 | true | 21 |
+ 2020-01-01 00:00:18.000 | true | 21 |
+ 2020-01-01 00:00:19.000 | true | 21 |
+ 2020-01-01 00:00:20.000 | true | 21 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:15.000 | false | 15 |
+ 2020-01-01 00:00:16.000 | true | 16 |
+ 2020-01-01 00:00:17.000 | true | 17 |
+ 2020-01-01 00:00:18.000 | true | 18 |
+ 2020-01-01 00:00:19.000 | true | 19 |
+ 2020-01-01 00:00:20.000 | true | 20 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | true | NULL |
+ 2020-01-01 00:00:01.000 | true | NULL |
+ 2020-01-01 00:00:02.000 | true | NULL |
+ 2020-01-01 00:00:03.000 | true | NULL |
+ 2020-01-01 00:00:04.000 | true | NULL |
+ 2020-01-01 00:00:05.000 | true | NULL |
+ 2020-01-01 00:00:06.000 | true | NULL |
+ 2020-01-01 00:00:07.000 | true | NULL |
+ 2020-01-01 00:00:08.000 | true | NULL |
+ 2020-01-01 00:00:09.000 | true | NULL |
+ 2020-01-01 00:00:10.000 | true | NULL |
+ 2020-01-01 00:00:11.000 | true | NULL |
+ 2020-01-01 00:00:12.000 | true | NULL |
+ 2020-01-01 00:00:13.000 | true | NULL |
+ 2020-01-01 00:00:14.000 | true | NULL |
+ 2020-01-01 00:00:15.000 | true | NULL |
+ 2020-01-01 00:00:16.000 | true | NULL |
+ 2020-01-01 00:00:17.000 | true | NULL |
+ 2020-01-01 00:00:18.000 | true | NULL |
+ 2020-01-01 00:00:19.000 | true | NULL |
+ 2020-01-01 00:00:20.000 | true | NULL |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | true | 1 |
+ 2020-01-01 00:00:01.000 | true | 1 |
+ 2020-01-01 00:00:02.000 | true | 1 |
+ 2020-01-01 00:00:03.000 | true | 1 |
+ 2020-01-01 00:00:04.000 | true | 1 |
+ 2020-01-01 00:00:05.000 | true | 1 |
+ 2020-01-01 00:00:06.000 | true | 1 |
+ 2020-01-01 00:00:07.000 | true | 1 |
+ 2020-01-01 00:00:08.000 | true | 1 |
+ 2020-01-01 00:00:09.000 | true | 1 |
+ 2020-01-01 00:00:10.000 | true | 1 |
+ 2020-01-01 00:00:11.000 | true | 1 |
+ 2020-01-01 00:00:12.000 | true | 1 |
+ 2020-01-01 00:00:13.000 | true | 1 |
+ 2020-01-01 00:00:14.000 | true | 1 |
+ 2020-01-01 00:00:15.000 | true | 1 |
+ 2020-01-01 00:00:16.000 | true | 1 |
+ 2020-01-01 00:00:17.000 | true | 1 |
+ 2020-01-01 00:00:18.000 | true | 1 |
+ 2020-01-01 00:00:19.000 | true | 1 |
+ 2020-01-01 00:00:20.000 | true | 1 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:00.000 | true | 21 |
+ 2020-01-01 00:00:01.000 | true | 21 |
+ 2020-01-01 00:00:02.000 | true | 21 |
+ 2020-01-01 00:00:03.000 | true | 21 |
+ 2020-01-01 00:00:04.000 | true | 21 |
+ 2020-01-01 00:00:05.000 | true | 21 |
+ 2020-01-01 00:00:06.000 | true | 21 |
+ 2020-01-01 00:00:07.000 | true | 21 |
+ 2020-01-01 00:00:08.000 | true | 21 |
+ 2020-01-01 00:00:09.000 | true | 21 |
+ 2020-01-01 00:00:10.000 | true | 21 |
+ 2020-01-01 00:00:11.000 | true | 21 |
+ 2020-01-01 00:00:12.000 | true | 21 |
+ 2020-01-01 00:00:13.000 | true | 21 |
+ 2020-01-01 00:00:14.000 | true | 21 |
+ 2020-01-01 00:00:15.000 | true | 21 |
+ 2020-01-01 00:00:16.000 | true | 21 |
+ 2020-01-01 00:00:17.000 | true | 21 |
+ 2020-01-01 00:00:18.000 | true | 21 |
+ 2020-01-01 00:00:19.000 | true | 21 |
+ 2020-01-01 00:00:20.000 | true | 21 |
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+ _irowts | _isfilled | interp(c1) |
+====================================================
+ 2020-01-01 00:00:21.000 | false | 21 |
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+
+taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+
diff --git a/tests/army/query/function/generate_answer.sh b/tests/army/query/function/generate_answer.sh
new file mode 100755
index 0000000000..6b798865bd
--- /dev/null
+++ b/tests/army/query/function/generate_answer.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -e
+set -x
+# 检查是否传入了两个参数
+# echo "参数个数: $#"
+# if [ "$#" -ne 4 ]; then
+# echo "使用方法: $0 -i -o "
+# exit 1
+# fi
+
+# 读取传入的参数
+while getopts "i:o:" opt; do
+ case $opt in
+ i)
+ sqlfile="$OPTARG"
+ ;;
+ o)
+ query_result_file="$OPTARG"
+ ;;
+ \?)
+ echo "无效选项: -$OPTARG" >&2
+ exit 1
+ ;;
+ :)
+ echo "选项 -$OPTARG 需要一个参数." >&2
+ exit 1
+ ;;
+ esac
+done
+
+# 删除sqlfile文件中每一行末尾的分号和空格
+sed -i 's/;\s*$//' "$sqlfile"
+
+# 执行SQL文件并生成query_result_file文件
+taos -f "$sqlfile" | grep -v 'Query OK' | grep -v 'Copyright' | grep -v 'Welcome to the TDengine Command' > "$query_result_file"
+# echo $(cat "$query_result_file")
+# echo "1"
+# sed -i 's/ ([^()]*)$//' "$query_result_file"
+# echo "1"
+# 打印输入的文件名
+echo "输入的文件: $sqlfile"
+
+# 打印输出的文件名
+echo "输出的文件: $query_result_file"
\ No newline at end of file
diff --git a/tests/army/query/function/in/interp.in b/tests/army/query/function/in/interp.in
index 4825ab46b1..97a9936b8d 100644
--- a/tests/army/query/function/in/interp.in
+++ b/tests/army/query/function/in/interp.in
@@ -13,3 +13,53 @@ select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfille
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2, c3;
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2, c3;
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2, c3;
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
+select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
diff --git a/tests/army/query/function/test_interp.py b/tests/army/query/function/test_interp.py
index f903e7be73..106ef1e58e 100644
--- a/tests/army/query/function/test_interp.py
+++ b/tests/army/query/function/test_interp.py
@@ -38,6 +38,7 @@ class TDTestCase(TBase):
(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned)
'''
)
+ tdSql.execute("create table if not exists test.td32861(ts timestamp, c1 int);")
tdLog.printNoPrefix("==========step2:insert data")
@@ -45,6 +46,16 @@ class TDTestCase(TBase):
tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)")
tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)")
+ tdSql.execute(
+ """insert into test.td32861 values
+ ('2020-01-01 00:00:00', 0),
+ ('2020-01-01 00:00:01', 1),
+ ('2020-01-01 00:00:03', 3),
+ ('2020-01-01 00:00:06', 6),
+ ('2020-01-01 00:00:10', 10),
+ ('2020-01-01 00:00:15', 15),
+ ('2020-01-01 00:00:21', 21);"""
+ )
def test_normal_query_new(self, testCase):
# read sql from .sql file and execute
diff --git a/tests/army/storage/compressBasic.py b/tests/army/storage/compressBasic.py
index f24c4dd288..446cb920fb 100644
--- a/tests/army/storage/compressBasic.py
+++ b/tests/army/storage/compressBasic.py
@@ -33,7 +33,26 @@ class TDTestCase(TBase):
"compressMsgSize" : "100",
}
# compress
- compresses = ["lz4","tsz","zlib","zstd","disabled","xz"]
+ compresses = ["lz4","zlib","zstd","disabled","xz"]
+
+ compressDefaultDict = {};
+ compressDefaultDict["BOOL"] = "zstd"
+ compressDefaultDict["TINYINT"] = "zlib"
+ compressDefaultDict["SMALLINT"] = "zlib"
+ compressDefaultDict["INT"] = "lz4"
+ compressDefaultDict["BIGINT"] = "lz4"
+ compressDefaultDict["FLOAT"] = "lz4"
+ compressDefaultDict["DOUBLE"] = "lz4"
+ compressDefaultDict["VARCHAR"] = "zstd"
+ compressDefaultDict["TIMESTAMP"] = "lz4"
+ compressDefaultDict["NCHAR"] = "zstd"
+ compressDefaultDict["TINYINT UNSIGNED"] = "zlib"
+ compressDefaultDict["SMALLINT UNSIGNED"] = "zlib"
+ compressDefaultDict["INT UNSIGNED"] = "lz4"
+ compressDefaultDict["BIGINT UNSIGNED"] = "lz4"
+ compressDefaultDict["NCHAR"] = "zstd"
+ compressDefaultDict["BLOB"] = "lz4"
+ compressDefaultDict["VARBINARY"] = "zstd"
# level
levels = ["high","medium","low"]
@@ -137,15 +156,20 @@ class TDTestCase(TBase):
defEncodes = [ "delta-i","delta-i","simple8b","simple8b","simple8b","simple8b","simple8b","simple8b",
"simple8b","simple8b","delta-d","delta-d","bit-packing",
"disabled","disabled","disabled","disabled"]
-
+
count = tdSql.getRows()
for i in range(count):
node = tdSql.getData(i, 3)
if node == "TAG":
break
# check
- tdSql.checkData(i, 4, defEncodes[i])
- tdSql.checkData(i, 5, self.defCompress)
+ tdLog.info(f"check default encode {tdSql.getData(i, 1)}")
+ #tdLog.info(f"check default encode compressDefaultDict[tdSql.getData(i, 2)]")
+ defaultValue = self.compressDefaultDict[tdSql.getData(i, 1)]
+ if defaultValue == None:
+ defaultValue = self.defCompress
+ tdLog.info(f"check default compress {tdSql.getData(i, 1)} {defaultValue}")
+ tdSql.checkData(i, 5, defaultValue)
tdSql.checkData(i, 6, self.defLevel)
# geometry encode is disabled
@@ -185,10 +209,6 @@ class TDTestCase(TBase):
comps.append(self.compresses[0]) # add lz4
for comp in comps:
for i in range(self.colCnt - 1):
- col = f"c{i}"
- sql = f"alter table {tbname} modify column {col} COMPRESS '{comp}';"
- tdSql.execute(sql, show=True)
- self.checkDataDesc(tbname, i + 1, 5, comp)
self.writeData(1000)
# alter float(c9) double(c10) to tsz
@@ -326,6 +346,7 @@ class TDTestCase(TBase):
while offset < count:
sql = f"select * from {tbname} limit {step} offset {offset}"
+ tdLog.info(sql)
tdSql.query(sql)
self.autoGen.dataCorrect(tdSql.res, tdSql.getRows(), step)
offset += step
diff --git a/tests/army/storage/s3/s3Basic.py b/tests/army/storage/s3/s3Basic.py
index 273a6129e1..cefd4ef60d 100644
--- a/tests/army/storage/s3/s3Basic.py
+++ b/tests/army/storage/s3/s3Basic.py
@@ -47,7 +47,7 @@ for test:
class TDTestCase(TBase):
- index = eutil.cpuRand(20) + 1
+ index = eutil.cpuRand(40) + 1
bucketName = f"ci-bucket{index}"
updatecfgDict = {
"supportVnodes":"1000",
@@ -63,6 +63,10 @@ class TDTestCase(TBase):
tdLog.info(f"assign bucketName is {bucketName}\n")
maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer
+ def exit(self, log):
+ self.dropDb(True)
+ tdLog.exit(log)
+
def insertData(self):
tdLog.info(f"insert data.")
# taosBenchmark run
@@ -107,8 +111,8 @@ class TDTestCase(TBase):
loop = 0
rets = []
overCnt = 0
- while loop < 200:
- time.sleep(3)
+ while loop < 150:
+ time.sleep(2)
# check upload to s3
rets = eos.runRetList(cmd)
@@ -134,7 +138,7 @@ class TDTestCase(TBase):
# check can pass
if overCnt > 0:
- tdLog.exit(f"s3 have {overCnt} files over size.")
+ self.exit(f"s3 have {overCnt} files over size.")
def doAction(self):
@@ -159,7 +163,7 @@ class TDTestCase(TBase):
return True
time.sleep(1)
- tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
+ self.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
def checkCreateDb(self, keepLocal, chunkSize, compact):
diff --git a/tests/army/user/test_passwd.py b/tests/army/user/test_passwd.py
new file mode 100644
index 0000000000..dfec175824
--- /dev/null
+++ b/tests/army/user/test_passwd.py
@@ -0,0 +1,55 @@
+import os
+import platform
+import subprocess
+from frame.log import *
+from frame.cases import *
+from frame.sql import *
+from frame.caseBase import *
+from frame.epath import *
+from frame import *
+
+class TDTestCase(TBase):
+ def apiPath(self):
+ apiPath = None
+ currentFilePath = os.path.dirname(os.path.realpath(__file__))
+ if (os.sep.join(["community", "tests"]) in currentFilePath):
+ testFilePath = currentFilePath[:currentFilePath.find(os.sep.join(["community", "tests"]))]
+ else:
+ testFilePath = currentFilePath[:currentFilePath.find(os.sep.join(["TDengine", "tests"]))]
+
+ for root, dirs, files in os.walk(testFilePath):
+ if ("passwdTest.c" in files):
+ apiPath = root
+ break
+ return apiPath
+
+ def run(self):
+ apiPath = self.apiPath()
+ tdLog.info(f"api path: {apiPath}")
+ if platform.system().lower() == 'linux':
+ p = subprocess.Popen(f"cd {apiPath} && make", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ if 0 != p.returncode:
+ tdLog.exit("Test script passwdTest.c make failed")
+
+ p = subprocess.Popen(f"ls {apiPath}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ tdLog.info(f"test files: {out}")
+ if apiPath:
+ test_file_cmd = os.sep.join([apiPath, "passwdTest localhost"])
+ try:
+ p = subprocess.Popen(test_file_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ if 0 != p.returncode:
+ tdLog.exit("Failed to run passwd test with output: %s \n error: %s" % (out, err))
+ else:
+ tdLog.info(out)
+ tdLog.success(f"{__file__} successfully executed")
+ except Exception as e:
+ tdLog.exit(f"Failed to execute {__file__} with error: {e}")
+ else:
+ tdLog.exit("passwdTest.c not found")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile
index 8381f1bb57..1caa6fea9e 100644
--- a/tests/ci/Dockerfile
+++ b/tests/ci/Dockerfile
@@ -7,7 +7,7 @@ RUN apt-get install -y locales psmisc sudo tree libgeos-dev libgflags2.2 libgfl
RUN sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen && locale-gen
RUN pip3 config set global.index-url http://admin:123456@192.168.0.212:3141/admin/dev/+simple/
RUN pip3 config set global.trusted-host 192.168.0.212
-RUN pip3 install taospy==2.7.15 taos-ws-py==0.3.1 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog
+RUN pip3 install taospy==2.7.16 taos-ws-py==0.3.5 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/'
diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh
index 6a25683b58..3a9812637c 100644
--- a/tests/docs-examples-test/python.sh
+++ b/tests/docs-examples-test/python.sh
@@ -130,7 +130,7 @@ pip3 install kafka-python
python3 kafka_example_consumer.py
# 21
-pip3 install taos-ws-py==0.3.3
+pip3 install taos-ws-py==0.3.5
python3 conn_websocket_pandas.py
# 22
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 151358aec3..b7298d359b 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -44,6 +44,7 @@
,,y,army,./pytest.sh python3 ./test.py -f storage/compressBasic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f grant/grantBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/queryBugs.py -N 3
+,,n,army,python3 ./test.py -f user/test_passwd.py
,,y,army,./pytest.sh python3 ./test.py -f tmq/tmqBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_compare_asc_desc.py
,,y,army,./pytest.sh python3 ./test.py -f query/last/test_last.py
@@ -51,6 +52,7 @@
,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py
,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py
+
#
# system test
#
@@ -230,6 +232,14 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-32548.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 2
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/checkpoint_info.py -N 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/checkpoint_info2.py -N 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_multi_insert.py
+
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False
@@ -327,6 +337,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3311.py
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3821.py
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-5130.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-5580.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py
@@ -350,6 +361,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel_createdb.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compress_tsz1.py
@@ -360,6 +372,7 @@
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py
+,,n,system-test,python3 ./test.py -f 0-others/wal_level_skip.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 1
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 1
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroup.py -N 3 -n 3
@@ -436,6 +449,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show_tag_index.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/grant.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/and_or_for_byte.py
@@ -1289,6 +1303,7 @@
#,,y,script,./test.sh -f tsim/mnode/basic3.sim
,,y,script,./test.sh -f tsim/mnode/basic4.sim
,,y,script,./test.sh -f tsim/mnode/basic5.sim
+,,y,script,./test.sh -f tsim/mnode/basic6.sim
,,y,script,./test.sh -f tsim/show/basic.sim
,,y,script,./test.sh -f tsim/table/autocreate.sim
,,y,script,./test.sh -f tsim/table/basic1.sim
diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh
index a386269f85..363f62284a 100755
--- a/tests/parallel_test/container_build.sh
+++ b/tests/parallel_test/container_build.sh
@@ -83,7 +83,7 @@ docker run \
-v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \
-v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \
-v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \
- --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j|| exit 1"
+ --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j|| exit 1"
# -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \
if [[ -d ${WORKDIR}/debugNoSan ]] ;then
@@ -137,7 +137,7 @@ docker run \
-v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \
-v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \
-v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \
- --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DCMAKE_BUILD_TYPE=Debug -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 "
+ --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DCMAKE_BUILD_TYPE=Debug -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 "
mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan
diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh
index fa8fedbdbe..a78d0aa4a4 100755
--- a/tests/parallel_test/run_case.sh
+++ b/tests/parallel_test/run_case.sh
@@ -76,16 +76,9 @@ ulimit -c unlimited
md5sum /usr/lib/libtaos.so.1
md5sum /home/TDinternal/debug/build/lib/libtaos.so
-#define taospy 2.7.16
-pip3 list|grep taospy
-pip3 uninstall taospy -y
-pip3 install --default-timeout=120 taospy==2.7.16
-
-#define taos-ws-py 0.3.1
-pip3 list|grep taos-ws-py
-pip3 uninstall taos-ws-py -y
-pip3 install --default-timeout=600 taos-ws-py==0.3.3
-
+#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.5
+pip3 install taospy==2.7.16
+pip3 install taos-ws-py==0.3.5
$TIMEOUT_CMD $cmd
RET=$?
echo "cmd exit code: $RET"
diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py
index 4e4679db6a..f6b31b4691 100755
--- a/tests/pytest/auto_crash_gen.py
+++ b/tests/pytest/auto_crash_gen.py
@@ -16,7 +16,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4:
# formal
hostname = socket.gethostname()
-group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
+group_url_test = (
+ 'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d'
+)
+
+notification_robot_url = (
+ "https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9"
+)
+
+alert_robot_url = (
+ "https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3"
+)
+
def get_msg(text):
return {
@@ -37,12 +48,12 @@ def get_msg(text):
}
-def send_msg(json):
+def send_msg(url:str,json:dict):
headers = {
'Content-Type': 'application/json'
}
- req = requests.post(url=group_url, headers=headers, json=json)
+ req = requests.post(url=url, headers=headers, json=json)
inf = req.json()
if "StatusCode" in inf and inf["StatusCode"] == 0:
pass
@@ -355,18 +366,27 @@ def main():
core_dir = "none"
text = f'''
- exit status: {msg_dict[status]}
- test scope: crash_gen
- owner: pxiao
- hostname: {hostname}
- start time: {starttime}
- end time: {endtime}
- git commit : {git_commit}
- log dir: {log_dir}
- core dir: {core_dir}
- cmd: {cmd}'''
+Result: {msg_dict[status]}
- send_msg(get_msg(text))
+Details
+Owner: Jayden Jia
+Start time: {starttime}
+End time: {endtime}
+Hostname: {hostname}
+Commit: {git_commit}
+Cmd: {cmd}
+Log dir: {log_dir}
+Core dir: {core_dir}
+'''
+ text_result=text.split("Result: ")[1].split("Details")[0].strip()
+ print(text_result)
+
+ if text_result == "success":
+ send_msg(notification_robot_url, get_msg(text))
+ else:
+ send_msg(alert_robot_url, get_msg(text))
+
+ #send_msg(get_msg(text))
except Exception as e:
print("exception:", e)
exit(status)
diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py
index 1e0de6ace1..b346aca308 100755
--- a/tests/pytest/auto_crash_gen_valgrind.py
+++ b/tests/pytest/auto_crash_gen_valgrind.py
@@ -19,7 +19,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4:
# formal
hostname = socket.gethostname()
-group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
+group_url_test = (
+ 'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d'
+)
+
+notification_robot_url = (
+ "https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9"
+)
+
+alert_robot_url = (
+ "https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3"
+)
+
def get_msg(text):
return {
@@ -40,13 +51,12 @@ def get_msg(text):
}
-def send_msg(json):
+def send_msg(url:str,json:dict):
headers = {
'Content-Type': 'application/json'
}
-
- req = requests.post(url=group_url, headers=headers, json=json)
+ req = requests.post(url=url, headers=headers, json=json)
inf = req.json()
if "StatusCode" in inf and inf["StatusCode"] == 0:
pass
@@ -389,18 +399,28 @@ def main():
core_dir = "none"
text = f'''
- exit status: {msg_dict[status]}
- test scope: crash_gen
- owner: pxiao
- hostname: {hostname}
- start time: {starttime}
- end time: {endtime}
- git commit : {git_commit}
- log dir: {log_dir}
- core dir: {core_dir}
- cmd: {cmd}'''
+Result: {msg_dict[status]}
- send_msg(get_msg(text))
+Details
+Owner: Jayden Jia
+Start time: {starttime}
+End time: {endtime}
+Hostname: {hostname}
+Commit: {git_commit}
+Cmd: {cmd}
+Log dir: {log_dir}
+Core dir: {core_dir}
+'''
+
+ text_result=text.split("Result: ")[1].split("Details")[0].strip()
+ print(text_result)
+
+ if text_result == "success":
+ send_msg(notification_robot_url, get_msg(text))
+ else:
+ send_msg(alert_robot_url, get_msg(text))
+
+ #send_msg(get_msg(text))
except Exception as e:
print("exception:", e)
exit(status)
diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py
index 22f453e51e..522ad48640 100755
--- a/tests/pytest/auto_crash_gen_valgrind_cluster.py
+++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py
@@ -16,7 +16,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4:
# formal
hostname = socket.gethostname()
-group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
+group_url_test = (
+ 'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d'
+)
+
+notification_robot_url = (
+ "https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9"
+)
+
+alert_robot_url = (
+ "https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3"
+)
+
def get_msg(text):
return {
@@ -37,12 +48,12 @@ def get_msg(text):
}
-def send_msg(json):
+def send_msg(url:str,json:dict):
headers = {
'Content-Type': 'application/json'
}
- req = requests.post(url=group_url, headers=headers, json=json)
+ req = requests.post(url=url, headers=headers, json=json)
inf = req.json()
if "StatusCode" in inf and inf["StatusCode"] == 0:
pass
@@ -376,18 +387,28 @@ def main():
core_dir = "none"
text = f'''
- exit status: {msg_dict[status]}
- test scope: crash_gen
- owner: pxiao
- hostname: {hostname}
- start time: {starttime}
- end time: {endtime}
- git commit : {git_commit}
- log dir: {log_dir}
- core dir: {core_dir}
- cmd: {cmd}'''
-
- send_msg(get_msg(text))
+Result: {msg_dict[status]}
+
+Details
+Owner: Jayden Jia
+Start time: {starttime}
+End time: {endtime}
+Hostname: {hostname}
+Commit: {git_commit}
+Cmd: {cmd}
+Log dir: {log_dir}
+Core dir: {core_dir}
+'''
+
+ text_result=text.split("Result: ")[1].split("Details")[0].strip()
+ print(text_result)
+
+ if text_result == "success":
+ send_msg(notification_robot_url, get_msg(text))
+ else:
+ send_msg(alert_robot_url, get_msg(text))
+
+ #send_msg(get_msg(text))
except Exception as e:
print("exception:", e)
exit(status)
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 3832530218..29fb52e124 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -594,8 +594,7 @@ class TDDnode:
def forcestop(self):
if self.asan:
- stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
- (self.execPath, self.index)
+ stopCmd = "%s -s stop -n dnode%d -x SIGKILL" % (self.execPath, self.index)
tdLog.info("execute script: " + stopCmd)
os.system(stopCmd)
return
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 1d3333264a..46b7e1f795 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -843,9 +843,10 @@ class TDSql:
tdSql.query("select * from information_schema.ins_vnodes")
#result: dnode_id|vgroup_id|db_name|status|role_time|start_time|restored|
+ results = list(tdSql.queryResult)
for vnode_group_id in db_vgroups_list:
- print(tdSql.queryResult)
- for result in tdSql.queryResult:
+ for result in results:
+ print(f'result[2] is {result[2]}, db_name is {db_name}, result[1] is {result[1]}, vnode_group_id is {vnode_group_id}')
if result[2] == db_name and result[1] == vnode_group_id:
tdLog.debug(f"dbname: {db_name}, vgroup :{vnode_group_id}, dnode is {result[0]}")
print(useful_trans_dnodes_list)
diff --git a/tests/requirements.txt b/tests/requirements.txt
index c6dd044c86..a036c2b3d0 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -9,4 +9,5 @@ requests
pexpect
faker
pyopenssl
-hyperloglog
\ No newline at end of file
+hyperloglog
+tzlocal
\ No newline at end of file
diff --git a/tests/script/api/makefile b/tests/script/api/makefile
index 9c2bb6be3d..ce5980b37a 100644
--- a/tests/script/api/makefile
+++ b/tests/script/api/makefile
@@ -13,7 +13,7 @@ all: $(TARGET)
exe:
gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-test.c -o $(ROOT)stmt2-test $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-test.c -o $(ROOT)stmt2-test $(LFLAGS)
gcc $(CFLAGS) ./stopquery.c -o $(ROOT)stopquery $(LFLAGS)
gcc $(CFLAGS) ./dbTableRoute.c -o $(ROOT)dbTableRoute $(LFLAGS)
gcc $(CFLAGS) ./insertSameTs.c -o $(ROOT)insertSameTs $(LFLAGS)
@@ -22,11 +22,11 @@ exe:
gcc $(CFLAGS) ./insert_stb.c -o $(ROOT)insert_stb $(LFLAGS)
gcc $(CFLAGS) ./tmqViewTest.c -o $(ROOT)tmqViewTest $(LFLAGS)
gcc $(CFLAGS) ./stmtQuery.c -o $(ROOT)stmtQuery $(LFLAGS)
- gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS)
- gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS)
gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS)
clean:
diff --git a/tests/script/api/makefile_win64.mak b/tests/script/api/makefile_win64.mak
new file mode 100644
index 0000000000..50a2447a06
--- /dev/null
+++ b/tests/script/api/makefile_win64.mak
@@ -0,0 +1,20 @@
+# Makefile.mak for win64
+
+TARGET = passwdTest.exe
+CC = cl
+CFLAGS = /W4 /EHsc /I"C:\TDengine\include" /DWINDOWS
+LDFLAGS = /link /LIBPATH:"C:\TDengine\driver" taos.lib
+
+SRCS = passwdTest.c
+OBJS = $(SRCS:.c=.obj)
+
+all: $(TARGET)
+
+$(TARGET): $(OBJS)
+ $(CC) $(OBJS) $(LDFLAGS)
+
+.c.obj:
+ $(CC) $(CFLAGS) /c $<
+
+clean:
+ del $(OBJS) $(TARGET)
\ No newline at end of file
diff --git a/tests/script/api/passwdTest.c b/tests/script/api/passwdTest.c
index 928525750e..259d3bec8e 100644
--- a/tests/script/api/passwdTest.c
+++ b/tests/script/api/passwdTest.c
@@ -20,12 +20,27 @@
* passwdTest.c
* - Run the test case in clear TDengine environment with default root passwd 'taosdata'
*/
+#ifdef WINDOWS
+#include
+#include
+#include
+#ifndef PRId64
+#define PRId64 "I64d"
+#endif
+
+#ifndef PRIu64
+#define PRIu64 "I64u"
+#endif
+
+#else
#include
+#include
+#endif
+
#include
#include
#include
-#include
#include "taos.h" // TAOS header file
#define nDup 1
@@ -50,6 +65,16 @@ void sysInfoTest(TAOS *taos, const char *host, char *qstr);
void userDroppedTest(TAOS *taos, const char *host, char *qstr);
void clearTestEnv(TAOS *taos, const char *host, char *qstr);
+void taosMsleep(int64_t ms) {
+ if (ms < 0) return;
+#ifdef WINDOWS
+ Sleep(ms);
+#else
+ usleep(ms * 1000);
+#endif
+}
+
+
int nPassVerNotified = 0;
int nUserDropped = 0;
TAOS *taosu[nRoot] = {0};
@@ -59,7 +84,8 @@ void __taos_notify_cb(void *param, void *ext, int type) {
switch (type) {
case TAOS_NOTIFY_PASSVER: {
++nPassVerNotified;
- printf("%s:%d type:%d user:%s passVer:%d\n", __func__, __LINE__, type, param ? (char *)param : "NULL", *(int *)ext);
+ printf("%s:%d type:%d user:%s passVer:%d\n", __func__, __LINE__, type, param ? (char *)param : "NULL",
+ *(int *)ext);
break;
}
case TAOS_NOTIFY_USER_DROPPED: {
@@ -191,11 +217,11 @@ static int printResult(TAOS_RES *res, char *output) {
printRow(temp, row, fields, numFields);
puts(temp);
}
+ return 0;
}
int main(int argc, char *argv[]) {
char qstr[1024];
-
// connect to server
if (argc < 2) {
printf("please input server-ip \n");
@@ -215,6 +241,7 @@ int main(int argc, char *argv[]) {
taos_close(taos);
taos_cleanup();
+ exit(EXIT_SUCCESS);
}
void createUsers(TAOS *taos, const char *host, char *qstr) {
@@ -234,6 +261,7 @@ void createUsers(TAOS *taos, const char *host, char *qstr) {
if (code != 0) {
fprintf(stderr, "failed to run: taos_set_notify_cb(TAOS_NOTIFY_PASSVER) for user:%s since %d\n", users[i], code);
+ exit(EXIT_FAILURE);
} else {
fprintf(stderr, "success to run: taos_set_notify_cb(TAOS_NOTIFY_PASSVER) for user:%s\n", users[i]);
}
@@ -260,6 +288,7 @@ void passVerTestMulti(const char *host, char *qstr) {
if (code != 0) {
fprintf(stderr, "failed to run: taos_set_notify_cb since %d\n", code);
+ exit(EXIT_FAILURE);
} else {
fprintf(stderr, "success to run: taos_set_notify_cb\n");
}
@@ -283,26 +312,25 @@ void passVerTestMulti(const char *host, char *qstr) {
printf("%s:%d [%d] second(s) elasped, passVer notification received:%d, total:%d\n", __func__, __LINE__, i,
nPassVerNotified, nConn);
if (nPassVerNotified >= nConn) break;
- sleep(1);
+ taosMsleep(1000);
}
// close the taos_conn
for (int i = 0; i < nRoot; ++i) {
taos_close(taos[i]);
printf("%s:%d close taos[%d]\n", __func__, __LINE__, i);
- // sleep(1);
+ // taosMsleep(1000);
}
for (int i = 0; i < nUser; ++i) {
taos_close(taosu[i]);
printf("%s:%d close taosu[%d]\n", __func__, __LINE__, i);
- // sleep(1);
+ // taosMsleep(1000);
}
fprintf(stderr, "######## %s #########\n", __func__);
if (nPassVerNotified == nConn) {
- fprintf(stderr, ">>> succeed to get passVer notification since nNotify %d == nConn %d\n", nPassVerNotified,
- nConn);
+ fprintf(stderr, ">>> succeed to get passVer notification since nNotify %d == nConn %d\n", nPassVerNotified, nConn);
} else {
fprintf(stderr, ">>> failed to get passVer notification since nNotify %d != nConn %d\n", nPassVerNotified, nConn);
exit(1);
@@ -337,7 +365,7 @@ void sysInfoTest(TAOS *taosRoot, const char *host, char *qstr) {
TAOS_RES *res = NULL;
int32_t nRep = 0;
-_REP:
+_REP:
fprintf(stderr, "######## %s loop:%d #########\n", __func__, nRep);
res = taos_query(taos[0], qstr);
if (taos_errno(res) != 0) {
@@ -356,7 +384,7 @@ _REP:
fprintf(stderr, "%s:%d sleep 2 seconds to wait HB take effect\n", __func__, __LINE__);
for (int i = 1; i <= 2; ++i) {
- sleep(1);
+ taosMsleep(1000);
}
res = taos_query(taos[0], qstr);
@@ -372,10 +400,10 @@ _REP:
queryDB(taosRoot, "alter user user0 sysinfo 1");
fprintf(stderr, "%s:%d sleep 2 seconds to wait HB take effect\n", __func__, __LINE__);
for (int i = 1; i <= 2; ++i) {
- sleep(1);
+ taosMsleep(1000);
}
- if(++nRep < 5) {
+ if (++nRep < 5) {
goto _REP;
}
@@ -390,7 +418,7 @@ _REP:
fprintf(stderr, "######## %s #########\n", __func__);
}
static bool isDropUser = true;
-void userDroppedTest(TAOS *taos, const char *host, char *qstr) {
+void userDroppedTest(TAOS *taos, const char *host, char *qstr) {
// users
int nTestUsers = nUser;
int nLoop = 0;
@@ -408,6 +436,7 @@ _loop:
if (code != 0) {
fprintf(stderr, "failed to run: taos_set_notify_cb:%d for user:%s since %d\n", TAOS_NOTIFY_USER_DROPPED, users[i],
code);
+ exit(EXIT_FAILURE);
} else {
fprintf(stderr, "success to run: taos_set_notify_cb:%d for user:%s\n", TAOS_NOTIFY_USER_DROPPED, users[i]);
}
@@ -426,7 +455,7 @@ _loop:
printf("%s:%d [%d] second(s) elasped, user dropped notification received:%d, total:%d\n", __func__, __LINE__, i,
nUserDropped, nConn);
if (nUserDropped >= nConn) break;
- sleep(1);
+ taosMsleep(1000);
}
for (int i = 0; i < nTestUsers; ++i) {
diff --git a/tests/script/api/sameReqidTest.c b/tests/script/api/sameReqidTest.c
new file mode 100644
index 0000000000..7507619886
--- /dev/null
+++ b/tests/script/api/sameReqidTest.c
@@ -0,0 +1,406 @@
+// sample code to verify multiple queries with the same reqid
+// to compile: gcc -o sameReqdiTest sameReqidTest.c -ltaos
+
+#include
+#include
+#include
+#include
+#include "taos.h"
+
+#define NUM_ROUNDS 10
+#define CONST_REQ_ID 12345
+#define TEST_DB "test"
+
+#define CHECK_CONDITION(condition, prompt, errstr) \
+ do { \
+ if (!(condition)) { \
+ printf("\033[31m[%s:%d] failed to " prompt ", reason: %s\033[0m\n", __func__, __LINE__, errstr); \
+ exit(EXIT_FAILURE); \
+ } \
+ } while (0)
+
+#define CHECK_RES(res, prompt) CHECK_CONDITION(taos_errno(res) == 0, prompt, taos_errstr(res))
+#define CHECK_CODE(code, prompt) CHECK_CONDITION(code == 0, prompt, taos_errstr(NULL))
+
+static TAOS* getNewConnection() {
+ const char* host = "127.0.0.1";
+ const char* user = "root";
+ const char* passwd = "taosdata";
+ TAOS* taos = NULL;
+
+ taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
+ taos = taos_connect(host, user, passwd, "", 0);
+ CHECK_CONDITION(taos != NULL, "connect to db", taos_errstr(NULL));
+ return taos;
+}
+
+static void prepareData(TAOS* taos) {
+ TAOS_RES* res = NULL;
+ int32_t code = 0;
+
+ res = taos_query(taos, "create database if not exists " TEST_DB " precision 'ns'");
+ CHECK_RES(res, "create database");
+ taos_free_result(res);
+ usleep(100000);
+
+ code = taos_select_db(taos, TEST_DB);
+ CHECK_CODE(code, "switch to database");
+
+ res = taos_query(taos, "create table if not exists meters(ts timestamp, a int) tags(area int)");
+ CHECK_RES(res, "create stable meters");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t0 using meters tags(0)");
+ CHECK_RES(res, "create table t0");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t1 using meters tags(1)");
+ CHECK_RES(res, "create table t1");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t2 using meters tags(2)");
+ CHECK_RES(res, "create table t2");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t3 using meters tags(3)");
+ CHECK_RES(res, "create table t3");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t4 using meters tags(4)");
+ CHECK_RES(res, "create table t4");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t5 using meters tags(5)");
+ CHECK_RES(res, "create table t5");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t6 using meters tags(6)");
+ CHECK_RES(res, "create table t6");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t7 using meters tags(7)");
+ CHECK_RES(res, "create table t7");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t8 using meters tags(8)");
+ CHECK_RES(res, "create table t8");
+ taos_free_result(res);
+
+ res = taos_query(taos, "create table if not exists t9 using meters tags(9)");
+ CHECK_RES(res, "create table t9");
+ taos_free_result(res);
+
+ res = taos_query(taos,
+ "insert into t0 values('2020-01-01 00:00:00.000', 0)"
+ " ('2020-01-01 00:01:00.000', 0)"
+ " ('2020-01-01 00:02:00.000', 0)"
+ " t1 values('2020-01-01 00:00:00.000', 1)"
+ " ('2020-01-01 00:01:00.000', 1)"
+ " ('2020-01-01 00:02:00.000', 1)"
+ " ('2020-01-01 00:03:00.000', 1)"
+ " t2 values('2020-01-01 00:00:00.000', 2)"
+ " ('2020-01-01 00:01:00.000', 2)"
+ " ('2020-01-01 00:01:01.000', 2)"
+ " ('2020-01-01 00:01:02.000', 2)"
+ " t3 values('2020-01-01 00:01:02.000', 3)"
+ " t4 values('2020-01-01 00:01:02.000', 4)"
+ " t5 values('2020-01-01 00:01:02.000', 5)"
+ " t6 values('2020-01-01 00:01:02.000', 6)"
+ " t7 values('2020-01-01 00:01:02.000', 7)"
+ " t8 values('2020-01-01 00:01:02.000', 8)"
+ " t9 values('2020-01-01 00:01:02.000', 9)");
+ CHECK_RES(res, "insert into meters");
+ CHECK_CONDITION(taos_affected_rows(res), "insert into meters", "insufficient count");
+ taos_free_result(res);
+
+ res = taos_query(
+ taos,
+ "create table if not exists m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 "
+ "double, bin binary(40), blob nchar(10))");
+ CHECK_RES(res, "create table m1");
+ taos_free_result(res);
+
+ usleep(1000000);
+}
+
+static void verifySchemaLess(TAOS* taos) {
+ TAOS_RES* res = NULL;
+ char* lines[] = {
+ "st,t1=3i64,t2=4f64,t3=L\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ "st,t1=4i64,t3=L\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000",
+ "st,t2=5f64,t3=L\"ste\" c1=4i64,c2=true,c3=L\"iam\" 1626056811823316532",
+ "st,t1=4i64,t2=5f64,t3=L\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000",
+ "st,t2=5f64,t3=L\"ste2\" c3=L\"iamszhou\",c2=false 1626056811843316532",
+ "st,t2=5f64,t3=L\"ste2\" c3=L\"iamszhou\",c2=false,c5=5f64,c6=7u64,c7=32i32,c8=88.88f32 1626056812843316532",
+ "st,t1=4i64,t3=L\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 "
+ "1626006933640000000",
+ "st,t1=4i64,t3=L\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 "
+ "1626006933640000000",
+ "st,t1=4i64,t3=L\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 "
+ "1626006933641000000"};
+
+ res = taos_schemaless_insert_with_reqid(taos, lines, sizeof(lines) / sizeof(char*), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS, CONST_REQ_ID);
+ CHECK_RES(res, "insert schema-less data");
+ printf("successfully inserted %d rows\n", taos_affected_rows(res));
+ taos_free_result(res);
+}
+
+static int32_t printResult(TAOS_RES* res, int32_t nlimit) {
+ TAOS_ROW row = NULL;
+ TAOS_FIELD* fields = NULL;
+ int32_t numFields = 0;
+ int32_t nRows = 0;
+
+ numFields = taos_num_fields(res);
+ fields = taos_fetch_fields(res);
+ while ((nlimit-- > 0) && (row = taos_fetch_row(res))) {
+ char temp[256] = {0};
+ taos_print_row(temp, row, fields, numFields);
+ puts(temp);
+ nRows++;
+ }
+ return nRows;
+}
+
+static void verifyQuery(TAOS* taos) {
+ TAOS_RES* res = NULL;
+
+ res = taos_query_with_reqid(taos, "select * from meters", CONST_REQ_ID);
+ CHECK_RES(res, "select from meters");
+ printResult(res, INT32_MAX);
+ taos_free_result(res);
+
+ res = taos_query_with_reqid(taos, "select * from t0", CONST_REQ_ID);
+ CHECK_RES(res, "select from t0");
+ printResult(res, INT32_MAX);
+ taos_free_result(res);
+
+ res = taos_query_with_reqid(taos, "select * from t1", CONST_REQ_ID);
+ CHECK_RES(res, "select from t1");
+ printResult(res, INT32_MAX);
+ taos_free_result(res);
+
+ res = taos_query_with_reqid(taos, "select * from t2", CONST_REQ_ID);
+ CHECK_RES(res, "select from t2");
+ printResult(res, INT32_MAX);
+ taos_free_result(res);
+
+ res = taos_query_with_reqid(taos, "select * from t3", CONST_REQ_ID);
+ CHECK_RES(res, "select from t3");
+ printResult(res, INT32_MAX);
+ taos_free_result(res);
+
+ printf("succeed to read from meters\n");
+}
+
+void retrieveCallback(void* param, TAOS_RES* res, int32_t nrows) {
+ if (nrows == 0) {
+ taos_free_result(res);
+ } else {
+ printResult(res, nrows);
+ taos_fetch_rows_a(res, retrieveCallback, param);
+ }
+}
+
+void selectCallback(void* param, TAOS_RES* res, int32_t code) {
+ CHECK_CODE(code, "read async from table");
+ taos_fetch_rows_a(res, retrieveCallback, param);
+}
+
+static void verifyQueryAsync(TAOS* taos) {
+ taos_query_a_with_reqid(taos, "select *from meters", selectCallback, NULL, CONST_REQ_ID);
+ taos_query_a_with_reqid(taos, "select *from t0", selectCallback, NULL, CONST_REQ_ID);
+ taos_query_a_with_reqid(taos, "select *from t1", selectCallback, NULL, CONST_REQ_ID);
+ taos_query_a_with_reqid(taos, "select *from t2", selectCallback, NULL, CONST_REQ_ID);
+ taos_query_a_with_reqid(taos, "select *from t3", selectCallback, NULL, CONST_REQ_ID);
+
+ sleep(1);
+}
+
+void veriryStmt(TAOS* taos) {
+ // insert 10 records
+ struct {
+ int64_t ts[10];
+ int8_t b[10];
+ int8_t v1[10];
+ int16_t v2[10];
+ int32_t v4[10];
+ int64_t v8[10];
+ float f4[10];
+ double f8[10];
+ char bin[10][40];
+ char blob[10][80];
+ } v;
+
+ int32_t* t8_len = malloc(sizeof(int32_t) * 10);
+ int32_t* t16_len = malloc(sizeof(int32_t) * 10);
+ int32_t* t32_len = malloc(sizeof(int32_t) * 10);
+ int32_t* t64_len = malloc(sizeof(int32_t) * 10);
+ int32_t* float_len = malloc(sizeof(int32_t) * 10);
+ int32_t* double_len = malloc(sizeof(int32_t) * 10);
+ int32_t* bin_len = malloc(sizeof(int32_t) * 10);
+ int32_t* blob_len = malloc(sizeof(int32_t) * 10);
+
+ TAOS_STMT* stmt = taos_stmt_init_with_reqid(taos, CONST_REQ_ID);
+ TAOS_MULTI_BIND params[10];
+ char is_null[10] = {0};
+
+ params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ params[0].buffer_length = sizeof(v.ts[0]);
+ params[0].buffer = v.ts;
+ params[0].length = t64_len;
+ params[0].is_null = is_null;
+ params[0].num = 10;
+
+ params[1].buffer_type = TSDB_DATA_TYPE_BOOL;
+ params[1].buffer_length = sizeof(v.b[0]);
+ params[1].buffer = v.b;
+ params[1].length = t8_len;
+ params[1].is_null = is_null;
+ params[1].num = 10;
+
+ params[2].buffer_type = TSDB_DATA_TYPE_TINYINT;
+ params[2].buffer_length = sizeof(v.v1[0]);
+ params[2].buffer = v.v1;
+ params[2].length = t8_len;
+ params[2].is_null = is_null;
+ params[2].num = 10;
+
+ params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ params[3].buffer_length = sizeof(v.v2[0]);
+ params[3].buffer = v.v2;
+ params[3].length = t16_len;
+ params[3].is_null = is_null;
+ params[3].num = 10;
+
+ params[4].buffer_type = TSDB_DATA_TYPE_INT;
+ params[4].buffer_length = sizeof(v.v4[0]);
+ params[4].buffer = v.v4;
+ params[4].length = t32_len;
+ params[4].is_null = is_null;
+ params[4].num = 10;
+
+ params[5].buffer_type = TSDB_DATA_TYPE_BIGINT;
+ params[5].buffer_length = sizeof(v.v8[0]);
+ params[5].buffer = v.v8;
+ params[5].length = t64_len;
+ params[5].is_null = is_null;
+ params[5].num = 10;
+
+ params[6].buffer_type = TSDB_DATA_TYPE_FLOAT;
+ params[6].buffer_length = sizeof(v.f4[0]);
+ params[6].buffer = v.f4;
+ params[6].length = float_len;
+ params[6].is_null = is_null;
+ params[6].num = 10;
+
+ params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ params[7].buffer_length = sizeof(v.f8[0]);
+ params[7].buffer = v.f8;
+ params[7].length = double_len;
+ params[7].is_null = is_null;
+ params[7].num = 10;
+
+ params[8].buffer_type = TSDB_DATA_TYPE_BINARY;
+ params[8].buffer_length = sizeof(v.bin[0]);
+ params[8].buffer = v.bin;
+ params[8].length = bin_len;
+ params[8].is_null = is_null;
+ params[8].num = 10;
+
+ params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ params[9].buffer_length = sizeof(v.blob[0]);
+ params[9].buffer = v.blob;
+ params[9].length = blob_len;
+ params[9].is_null = is_null;
+ params[9].num = 10;
+
+ int32_t code = taos_stmt_prepare(
+ stmt, "insert into ? (ts, b, v1, v2, v4, v8, f4, f8, bin, blob) values(?,?,?,?,?,?,?,?,?,?)", 0);
+ CHECK_CODE(code, "taos_stmt_prepare");
+
+ code = taos_stmt_set_tbname(stmt, "m1");
+ CHECK_CODE(code, "taos_stmt_set_tbname");
+
+ int64_t ts = 1591060628000000000;
+ for (int i = 0; i < 10; ++i) {
+ v.ts[i] = ts;
+ ts += 1000000;
+ is_null[i] = 0;
+
+ v.b[i] = (int8_t)i % 2;
+ v.v1[i] = (int8_t)i;
+ v.v2[i] = (int16_t)(i * 2);
+ v.v4[i] = (int32_t)(i * 4);
+ v.v8[i] = (int64_t)(i * 8);
+ v.f4[i] = (float)(i * 40);
+ v.f8[i] = (double)(i * 80);
+ for (int j = 0; j < sizeof(v.bin[0]); ++j) {
+ v.bin[i][j] = (char)(i + '0');
+ }
+ strcpy(v.blob[i], "一二三四五六七八九十");
+
+ t8_len[i] = sizeof(int8_t);
+ t16_len[i] = sizeof(int16_t);
+ t32_len[i] = sizeof(int32_t);
+ t64_len[i] = sizeof(int64_t);
+ float_len[i] = sizeof(float);
+ double_len[i] = sizeof(double);
+ bin_len[i] = sizeof(v.bin[0]);
+ blob_len[i] = (int32_t)strlen(v.blob[i]);
+ }
+
+ code = taos_stmt_bind_param_batch(stmt, params);
+ CHECK_CODE(code, "taos_stmt_bind_param_batch");
+
+ code = taos_stmt_add_batch(stmt);
+ CHECK_CODE(code, "taos_stmt_add_batch");
+
+ code = taos_stmt_execute(stmt);
+ CHECK_CODE(code, "taos_stmt_execute");
+
+ taos_stmt_close(stmt);
+
+ free(t8_len);
+ free(t16_len);
+ free(t32_len);
+ free(t64_len);
+ free(float_len);
+ free(double_len);
+ free(bin_len);
+ free(blob_len);
+}
+
+int main(int argc, char* argv[]) {
+ TAOS* taos = NULL;
+ int32_t code = 0;
+
+ taos = getNewConnection();
+ taos_select_db(taos, TEST_DB);
+ CHECK_CODE(code, "switch to database");
+
+ printf("************ prepare data *************\n");
+ prepareData(taos);
+
+ for (int32_t i = 0; i < NUM_ROUNDS; ++i) {
+ printf("************ verify schema-less *************\n");
+ verifySchemaLess(taos);
+
+ printf("************ verify query *************\n");
+ verifyQuery(taos);
+
+ printf("********* verify async query **********\n");
+ verifyQueryAsync(taos);
+
+ printf("********* verify stmt query **********\n");
+ veriryStmt(taos);
+
+ printf("done\n");
+ }
+
+ taos_close(taos);
+ taos_cleanup();
+
+ return 0;
+}
diff --git a/tests/script/api/stmt2-geometry-test.c b/tests/script/api/stmt2-geometry-test.c
new file mode 100644
index 0000000000..46fd9081ae
--- /dev/null
+++ b/tests/script/api/stmt2-geometry-test.c
@@ -0,0 +1,107 @@
+#include
+#include
+#include
+#include "taos.h"
+
+int8_t byteArray[21] = {0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x59, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40};
+int8_t worngArray[21] = {0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x59, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40};
+
+void do_query(TAOS* taos, const char* sql) {
+ printf("[sql]%s\n", sql);
+ TAOS_RES* result = taos_query(taos, sql);
+ int code = taos_errno(result);
+ if (code) {
+ printf(" failed to query: %s, reason:%s\n", sql, taos_errstr(result));
+ taos_free_result(result);
+ return;
+ }
+ taos_free_result(result);
+}
+
+void execute_test(TAOS* taos, const char* tbname1, const char* tbname2, int8_t* tag2, int8_t* col2,
+ const char* case_desc, int size) {
+ // prepare stmt
+ TAOS_STMT2_OPTION option = {0, true, false, NULL, NULL};
+ TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
+ const char* sql;
+ if (tbname1 == "tb41") {
+ sql = "insert into db.? using db.stb2 tags(?, ?) values(?,?)";
+ } else {
+ sql = "insert into db.? using db.stb tags(?, ?) values(?,?)";
+ }
+ int code = taos_stmt2_prepare(stmt, sql, 0);
+ printf("\n%s\n insert into db.? using db.stb tags(?, ?) values(?,?)\n", case_desc);
+ if (code != 0) {
+ printf(" failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt));
+ taos_stmt2_close(stmt);
+ return;
+ }
+
+ // prepare data
+ int t1_val = 0;
+ int64_t ts = 1591060628000;
+ const char* tbname[2] = {tbname1, tbname2};
+ int32_t length[5] = {sizeof(int), 2, sizeof(int64_t), size, 20, sizeof(col2)};
+
+ TAOS_STMT2_BIND tags[2][2] = {
+ {{TSDB_DATA_TYPE_INT, &t1_val, &length[0], NULL, 2}, {TSDB_DATA_TYPE_GEOMETRY, tag2, &length[3], NULL, 2}},
+ {{TSDB_DATA_TYPE_INT, &t1_val, &length[0], NULL, 2}, {TSDB_DATA_TYPE_GEOMETRY, tag2, &length[3], NULL, 2}}};
+ TAOS_STMT2_BIND params[2][2] = {
+ {{TSDB_DATA_TYPE_TIMESTAMP, &ts, &length[2], NULL, 1}, {TSDB_DATA_TYPE_GEOMETRY, col2, &length[3], NULL, 1}},
+ {{TSDB_DATA_TYPE_TIMESTAMP, &ts, &length[2], NULL, 1}, {TSDB_DATA_TYPE_GEOMETRY, col2, &length[3], NULL, 1}}};
+ TAOS_STMT2_BIND* tagv[2] = {&tags[0][0], &tags[1][0]};
+ TAOS_STMT2_BIND* paramv[2] = {¶ms[0][0], ¶ms[1][0]};
+
+ TAOS_STMT2_BINDV bindv = {2, &tbname[0], &tagv[0], ¶mv[0]};
+ code = taos_stmt2_bind_param(stmt, &bindv, -1);
+ if (code != 0) {
+ printf(" failed to bind param. error:%s\n", taos_stmt2_error(stmt));
+ taos_stmt2_close(stmt);
+ return;
+ }
+
+ if (taos_stmt2_exec(stmt, NULL)) {
+ printf(" failed to execute insert statement.error:%s\n", taos_stmt2_error(stmt));
+ taos_stmt2_close(stmt);
+ return;
+ }
+ printf("[ok]\n");
+
+ taos_stmt2_close(stmt);
+}
+
+void test1(TAOS* taos) { execute_test(taos, "tb11", "tb12", &byteArray[0], &byteArray[0], "[normal]case 1", 21); }
+
+void test2(TAOS* taos) {
+ execute_test(taos, "tb21", "tb22", &worngArray[0], &byteArray[0], "[wrong WKB tag]case 2", 21);
+}
+
+void test3(TAOS* taos) {
+ execute_test(taos, "tb31", "tb32", "POLYGON((0 0, 4 0, 4 4, 0 4, 0 0))", "POLYGON((0 0, 4 0, 4 4, 0 4, 0 0))",
+ "[wrong WKT col]case 3", sizeof("POLYGON((0 0, 4 0, 4 4, 0 4, 0 0))"));
+}
+
+void test4(TAOS* taos) { execute_test(taos, "tb41", "tb42", &byteArray[0], &byteArray[0], "[wrong size]case 4", 21); }
+
+int main() {
+ TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0);
+ if (!taos) {
+ printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
+ exit(1);
+ }
+ // init test db & stb table
+ do_query(taos, "drop database if exists db");
+ do_query(taos, "create database db");
+ do_query(taos, "create table db.stb (ts timestamp, b geometry(100)) tags(t1 int, t2 geometry(100))");
+ do_query(taos, "create table db.stb2 (ts timestamp, b geometry(100)) tags(t1 int, t2 geometry(10))");
+
+ test1(taos);
+ test2(taos);
+ test3(taos);
+ test4(taos);
+
+ taos_close(taos);
+ taos_cleanup();
+}
diff --git a/tests/script/api/stmt2-nchar.c b/tests/script/api/stmt2-nchar.c
new file mode 100644
index 0000000000..3952bd5898
--- /dev/null
+++ b/tests/script/api/stmt2-nchar.c
@@ -0,0 +1,273 @@
+// sample code to verify all TDengine API
+// to compile: gcc -o apitest apitest.c -ltaos
+
+#include
+#include
+#include
+#include
+#include "taos.h"
+static int64_t count = 10000;
+
+int64_t genReqid() {
+ count += 100;
+ return count;
+}
+
+void stmtAsyncQueryCb(void* param, TAOS_RES* pRes, int code) {
+ int affected_rows = taos_affected_rows(pRes);
+ return;
+ /*
+ SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)param;
+ if (code == 0 && pRes) {
+ if (qParam->fetch) {
+ taos_fetch_rows_a(pRes, sqAsyncFetchCb, param);
+ } else {
+ if (qParam->free) {
+ taos_free_result(pRes);
+ }
+ *qParam->end = 1;
+ }
+ } else {
+ sqError("select", taos_errstr(pRes));
+ *qParam->end = 1;
+ taos_free_result(pRes);
+ }
+ */
+}
+
+void veriry_stmt(TAOS* taos) {
+ TAOS_RES* result = taos_query(taos, "drop database if exists test;");
+ taos_free_result(result);
+ usleep(100000);
+ result = taos_query(taos, "create database test;");
+
+ int code = taos_errno(result);
+ if (code != 0) {
+ printf("\033[31mfailed to create database, reason:%s\033[0m\n", taos_errstr(result));
+ taos_free_result(result);
+ return;
+ }
+ taos_free_result(result);
+
+ usleep(100000);
+ taos_select_db(taos, "test");
+
+ // create table
+ /*
+ const char* sql =
+ "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin "
+ "binary(40), blob nchar(10))";
+ */
+ const char* sql =
+ "create table m1 (ts timestamp, blob2 nchar(10), blob nchar(10),blob3 nchar(10),blob4 nchar(10),blob5 "
+ "nchar(10))";
+ result = taos_query(taos, sql);
+ code = taos_errno(result);
+ if (code != 0) {
+ printf("\033[31mfailed to create table, reason:%s\033[0m\n", taos_errstr(result));
+ taos_free_result(result);
+ return;
+ }
+ taos_free_result(result);
+
+ // insert 10 records
+ struct {
+ int64_t ts[10];
+ char blob[10][1];
+ char blob2[10][1];
+ char blob3[10][1];
+ char blob4[10][1];
+ char blob5[10][1];
+
+ } v;
+
+ int32_t* t64_len = malloc(sizeof(int32_t) * 10);
+ int32_t* blob_len = malloc(sizeof(int32_t) * 10);
+ int32_t* blob_len2 = malloc(sizeof(int32_t) * 10);
+ int32_t* blob_len3 = malloc(sizeof(int32_t) * 10);
+ int32_t* blob_len4 = malloc(sizeof(int32_t) * 10);
+ int32_t* blob_len5 = malloc(sizeof(int32_t) * 10);
+
+#include "time.h"
+ clock_t start, end;
+ TAOS_STMT2_OPTION option = {0, true, true, stmtAsyncQueryCb, NULL};
+
+ start = clock();
+ TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
+ end = clock();
+ printf("init time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
+ // TAOS_MULTI_BIND params[10];
+ TAOS_STMT2_BIND params[10];
+ char is_null[10] = {0};
+
+ params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ // params[0].buffer_length = sizeof(v.ts[0]);
+ params[0].buffer = v.ts;
+ params[0].length = t64_len;
+ params[0].is_null = is_null;
+ params[0].num = 10;
+
+ params[1].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ // params[8].buffer_length = sizeof(v.blob2[0]);
+ params[1].buffer = v.blob2;
+ params[1].length = blob_len2;
+ params[1].is_null = is_null;
+ params[1].num = 10;
+
+ params[2].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ // params[9].buffer_length = sizeof(v.blob[0]);
+ params[2].buffer = v.blob3;
+ params[2].length = blob_len;
+ params[2].is_null = is_null;
+ params[2].num = 10;
+
+ params[3].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ // params[9].buffer_length = sizeof(v.blob[0]);
+ params[3].buffer = v.blob4;
+ params[3].length = blob_len;
+ params[3].is_null = is_null;
+ params[3].num = 10;
+
+ params[4].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ // params[9].buffer_length = sizeof(v.blob[0]);
+ params[4].buffer = v.blob;
+ params[4].length = blob_len;
+ params[4].is_null = is_null;
+ params[4].num = 10;
+
+ params[5].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ // params[9].buffer_length = sizeof(v.blob[0]);
+ params[5].buffer = v.blob5;
+ params[5].length = blob_len;
+ params[5].is_null = is_null;
+ params[5].num = 10;
+
+ sql = "insert into ? (ts, blob2, blob, blob3, blob4, blob5) values(?,?,?,?,?,?)";
+ start = clock();
+ code = taos_stmt2_prepare(stmt, sql, 0);
+ end = clock();
+ printf("prepare time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
+ if (code != 0) {
+ printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt));
+ taos_stmt_close(stmt);
+ return;
+ }
+ /*
+ code = taos_stmt_set_tbname(stmt, "m1");
+ if (code != 0) {
+ printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt));
+ taos_stmt_close(stmt);
+ return;
+ }
+ */
+
+ int64_t ts = 1591060628000;
+ for (int i = 0; i < 10; ++i) {
+ is_null[i] = 0;
+
+ v.ts[i] = ts++;
+
+ v.blob[i][0] = 'a' + i;
+ v.blob2[i][0] = 'f' + i;
+ v.blob3[i][0] = 't' + i;
+ v.blob4[i][0] = 'A' + i;
+ v.blob5[i][0] = 'G' + i;
+
+ // v.blob2[i] = malloc(strlen("一二三四五六七十九八"));
+ // v.blob[i] = malloc(strlen("十九八七六五四三二一"));
+
+ // strcpy(v.blob2[i], "一二三四五六七十九八");
+ // strcpy(v.blob[i], "十九八七六五四三二一");
+
+ blob_len[i] = sizeof(char);
+ blob_len2[i] = sizeof(char);
+ blob_len3[i] = sizeof(char);
+ blob_len4[i] = sizeof(char);
+ blob_len5[i] = sizeof(char);
+ }
+
+ char* tbname = "m1";
+ TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]};
+ TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]};
+ start = clock();
+ // taos_stmt2_bind_param(stmt, "m1", NULL, params, -1);
+ taos_stmt2_bind_param(stmt, &bindv, -1);
+ end = clock();
+ printf("bind time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
+ // taos_stmt_bind_param_batch(stmt, params);
+ // taos_stmt_add_batch(stmt);
+ /*
+ int param_count = -1;
+ code = taos_stmt2_param_count(stmt, ¶m_count);
+ if (code != 0) {
+ printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt));
+ taos_stmt_close(stmt);
+ return;
+ }
+ printf("param_count: %d\n", param_count);
+ */
+ TAOS_FIELD_E* fields = NULL;
+ int field_count = -1;
+ start = clock();
+ code = taos_stmt2_get_fields(stmt, TAOS_FIELD_COL, &field_count, NULL);
+ end = clock();
+ printf("get fields time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
+ if (code != 0) {
+ printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt));
+ taos_stmt_close(stmt);
+ return;
+ }
+ printf("col field_count: %d\n", field_count);
+ start = clock();
+ taos_stmt2_free_fields(stmt, fields);
+ end = clock();
+ printf("free time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
+ /*
+ code = taos_stmt2_get_fields(stmt, TAOS_FIELD_TAG, &field_count, &fields);
+ if (code != 0) {
+ printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt));
+ taos_stmt_close(stmt);
+ return;
+ }
+ printf("tag field_count: %d\n", field_count);
+ taos_stmt2_free_fields(stmt, fields);
+ */
+ // if (taos_stmt_execute(stmt) != 0) {
+ start = clock();
+ // if (taos_stmt2_exec(stmt, NULL, stmtAsyncQueryCb, NULL) != 0) {
+ if (taos_stmt2_exec(stmt, NULL) != 0) {
+ printf("\033[31mfailed to execute insert statement.error:%s\033[0m\n", taos_stmt_errstr(stmt));
+ taos_stmt2_close(stmt);
+ return;
+ }
+ end = clock();
+ printf("exec time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
+
+ taos_stmt2_close(stmt);
+
+ free(blob_len);
+ free(blob_len2);
+ free(blob_len5);
+ free(blob_len3);
+ free(blob_len4);
+}
+
+int main(int argc, char* argv[]) {
+ const char* host = "127.0.0.1";
+ const char* user = "root";
+ const char* passwd = "taosdata";
+
+ taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
+ TAOS* taos = taos_connect(host, user, passwd, "", 0);
+ if (taos == NULL) {
+ printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos));
+ exit(1);
+ }
+
+ printf("********* verify stmt query **********\n");
+ veriry_stmt(taos);
+
+ printf("done\n");
+ taos_close(taos);
+ taos_cleanup();
+}
diff --git a/tests/script/api/stmt2.c b/tests/script/api/stmt2.c
index 82537905dc..5b9f41baff 100644
--- a/tests/script/api/stmt2.c
+++ b/tests/script/api/stmt2.c
@@ -81,8 +81,8 @@ void veriry_stmt(TAOS* taos) {
float f4[10];
double f8[10];
char bin[10][40];
- char blob[10][80];
- char blob2[10][80];
+ char blob[10][1];
+ char blob2[10][1];
} v;
int32_t* t8_len = malloc(sizeof(int32_t) * 10);
@@ -218,8 +218,14 @@ void veriry_stmt(TAOS* taos) {
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
v.bin[i][j] = (char)(i + '0');
}
- strcpy(v.blob2[i], "一二三四五六七十九八");
- strcpy(v.blob[i], "一二三四五六七八九十");
+ v.blob[i][0] = 'a' + i;
+ v.blob2[i][0] = 'A' + i;
+
+ // v.blob2[i] = malloc(strlen("一二三四五六七十九八"));
+ // v.blob[i] = malloc(strlen("十九八七六五四三二一"));
+
+ // strcpy(v.blob2[i], "一二三四五六七十九八");
+ // strcpy(v.blob[i], "十九八七六五四三二一");
t8_len[i] = sizeof(int8_t);
t16_len[i] = sizeof(int16_t);
@@ -228,10 +234,9 @@ void veriry_stmt(TAOS* taos) {
float_len[i] = sizeof(float);
double_len[i] = sizeof(double);
bin_len[i] = sizeof(v.bin[0]);
- blob_len[i] = (int32_t)strlen(v.blob[i]);
- blob_len2[i] = (int32_t)strlen(v.blob2[i]);
+ blob_len[i] = sizeof(char);
+ blob_len2[i] = sizeof(char);
}
-
char* tbname = "m1";
TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]};
TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]};
diff --git a/tests/script/tsim/compress/compress2.sim b/tests/script/tsim/compress/compress2.sim
index 0af6f87de4..179317dfbb 100644
--- a/tests/script/tsim/compress/compress2.sim
+++ b/tests/script/tsim/compress/compress2.sim
@@ -38,7 +38,7 @@ sql alter table $tb modify column b level 'm'
sql_error alter table $tb modify column b level 'l' # already exist
-sql_error alter table $tb modify column b compress 'lz4'
+sql alter table $tb modify column b compress 'lz4'
sql alter table $tb modify column b compress 'xz'
sql alter table $tb modify column b compress 'zstd'
sql_error alter table $tb modify column b compress 'tsz'
@@ -147,7 +147,7 @@ sql alter table $stb modify column b level 'm'
sql_error alter table $stb modify column b level 'l' # already exist
sql desc $stb
-sql_error alter table $stb modify column b compress 'lz4'
+sql alter table $stb modify column b compress 'lz4'
sql alter table $stb modify column b compress 'xz'
sql alter table $stb modify column b compress 'zstd'
sql_error alter table $stb modify column b compress 'tsz'
diff --git a/tests/script/tsim/db/basic1.sim b/tests/script/tsim/db/basic1.sim
index 8eb6dce759..f3239957d3 100644
--- a/tests/script/tsim/db/basic1.sim
+++ b/tests/script/tsim/db/basic1.sim
@@ -53,6 +53,8 @@ if $rows != 5 then
return -1
endi
+sleep 500
+
print =============== show vgroups2
sql show d2.vgroups
if $rows != 2 then
@@ -126,13 +128,14 @@ if $data12 != d2 then
endi
if $data13 != leader then
+ print expect leader , actual $13
return -1
endi
-print $data14
-print $data15
+print $data14 , $data15
if $data16 != 1 then
+ print expect 1, acutal $data16
return -1
endi
diff --git a/tests/script/tsim/mnode/basic6.sim b/tests/script/tsim/mnode/basic6.sim
new file mode 100644
index 0000000000..4ee56ff555
--- /dev/null
+++ b/tests/script/tsim/mnode/basic6.sim
@@ -0,0 +1,413 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+system sh/cfg.sh -n dnode1 -c compressMsgSize -v 0
+system sh/cfg.sh -n dnode2 -c compressMsgSize -v 0
+system sh/cfg.sh -n dnode3 -c compressMsgSize -v 0
+system sh/cfg.sh -n dnode4 -c compressMsgSize -v 0
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print =============== step1: create dnodes
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 1000
+ if $x == 5 then
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+if $data(1)[4] != ready then
+ goto step1
+endi
+
+print =============== step2: create dnodes - with error
+sql_error create mnode on dnode 1;
+sql_error create mnode on dnode 2;
+sql_error create mnode on dnode 3;
+sql_error create mnode on dnode 4;
+sql_error create mnode on dnode 5;
+sql_error create mnode on dnode 6;
+
+print =============== step3: create mnode 2 and 3
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+$x = 0
+step3:
+ $x = $x + 1
+ sleep 1000
+ if $x == 5 then
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+if $data(2)[4] != ready then
+ goto step3
+endi
+if $data(3)[4] != ready then
+ goto step3
+endi
+if $data(4)[4] != ready then
+ goto step3
+endi
+
+sql create mnode on dnode 2
+sql create mnode on dnode 3
+
+$x = 0
+step31:
+ $x = $x + 1
+ sleep 1000
+ if $x == 50 then
+ return -1
+ endi
+sql select * from information_schema.ins_mnodes
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum == 0 then
+ goto step31
+endi
+
+print =============== step4: create dnodes - with error
+sql_error create mnode on dnode 1
+sql_error create mnode on dnode 2;
+sql_error create mnode on dnode 3;
+sql_error create mnode on dnode 4;
+sql_error create mnode on dnode 5;
+sql_error create mnode on dnode 6;
+
+print =============== step5: drop mnodes - with error
+sql_error drop mnode on dnode 1
+sql_error drop mnode on dnode 4
+sql_error drop mnode on dnode 5
+sql_error drop mnode on dnode 6
+
+system sh/exec.sh -n dnode2 -s stop
+$x = 0
+step5:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data34 $data35
+if $data(1)[4] != ready then
+ goto step5
+endi
+if $data(2)[4] != offline then
+ goto step5
+endi
+if $data(3)[4] != ready then
+ goto step5
+endi
+if $data(4)[4] != ready then
+ goto step5
+endi
+
+sql_error drop mnode on dnode 2
+
+system sh/exec.sh -n dnode2 -s start
+$x = 0
+step51:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data34 $data35
+if $data(1)[4] != ready then
+ goto step51
+endi
+if $data(2)[4] != ready then
+ goto step51
+endi
+if $data(3)[4] != ready then
+ goto step51
+endi
+if $data(4)[4] != ready then
+ goto step51
+endi
+
+print =============== step6: stop mnode1
+system sh/exec.sh -n dnode1 -s stop
+# sql_error drop mnode on dnode 1
+
+$x = 0
+step61:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql select * from information_schema.ins_mnodes -x step61
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step61
+endi
+
+print =============== step7: start mnode1 and wait it online
+system sh/exec.sh -n dnode1 -s start
+
+$x = 0
+step71:
+ $x = $x + 1
+ sleep 1000
+ if $x == 50 then
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data34 $data35
+if $data(1)[4] != ready then
+ goto step71
+endi
+if $data(2)[4] != ready then
+ goto step71
+endi
+if $data(3)[4] != ready then
+ goto step71
+endi
+if $data(4)[4] != ready then
+ goto step71
+endi
+
+print =============== step8: stop mnode1 and drop it
+system sh/exec.sh -n dnode1 -s stop
+
+$x = 0
+step81:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step81
+endi
+
+print =============== step9: start mnode1 and wait it dropped
+print check mnode has leader step9a
+$x = 0
+step9a:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+print check mnode leader
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step9a
+endi
+
+print start dnode1 step9b
+system sh/exec.sh -n dnode1 -s start
+$x = 0
+step9b:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+print check dnode1 ready
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data34 $data35
+if $data(1)[4] != ready then
+ goto step9b
+endi
+if $data(2)[4] != ready then
+ goto step9b
+endi
+if $data(3)[4] != ready then
+ goto step9b
+endi
+if $data(4)[4] != ready then
+ goto step9b
+endi
+
+sleep 4000
+print check mnode has leader step9c
+$x = 0
+step9c:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+print check mnode leader
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step9c
+endi
+
+print drop mnode step9d
+sql drop mnode on dnode 1
+
+$x = 0
+step9d:
+ $x = $x + 1
+ sleep 1000
+ if $x == 20 then
+ return -1
+ endi
+print check mnode leader
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step9d
+endi
+if $rows != 2 then
+ goto step9d
+endi
+
+print =============== stepa: create mnode1 again
+sql create mnode on dnode 1
+$x = 0
+stepa:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum == 0 then
+ goto stepa
+endi
+if $leaderNum != 1 then
+ return -1
+endi
+
+$x = 0
+stepb:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data34 $data35
+if $rows != 4 then
+ return -1
+endi
+if $data(1)[4] != ready then
+ goto stepb
+endi
+if $data(2)[4] != ready then
+ goto stepb
+endi
+if $data(3)[4] != ready then
+ goto stepb
+endi
+if $data(4)[4] != ready then
+ goto stepb
+endi
+
+system sh/exec.sh -n dnode1 -s stop
+system sh/exec.sh -n dnode2 -s stop
+system sh/exec.sh -n dnode3 -s stop
+system sh/exec.sh -n dnode4 -s stop
diff --git a/tests/script/tsim/stream/streamFwcIntervalCheckpoint.sim b/tests/script/tsim/stream/streamFwcIntervalCheckpoint.sim
new file mode 100644
index 0000000000..ed72d87e9a
--- /dev/null
+++ b/tests/script/tsim/stream/streamFwcIntervalCheckpoint.sim
@@ -0,0 +1,67 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+
+system sh/cfg.sh -n dnode1 -c checkpointInterval -v 60
+
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print step1
+print =============== create database
+sql create database test vgroups 4;
+sql use test;
+
+sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+
+sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt1 as select _wstart, count(a) from st partition by tbname interval(2s);
+sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart, count(a) from st interval(2s);
+
+run tsim/stream/checkTaskStatus.sim
+
+sleep 70000
+
+
+print restart taosd 01 ......
+
+system sh/stop_dnodes.sh
+
+system sh/exec.sh -n dnode1 -s start
+
+run tsim/stream/checkTaskStatus.sim
+
+sql insert into t1 values(now + 3000a,1,1,1);
+
+$loop_count = 0
+loop0:
+
+sleep 2000
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+print select * from streamt1;
+sql select * from streamt1;
+
+print $data00 $data01 $data02
+
+if $rows == 0 then
+ goto loop0
+endi
+
+print select * from streamt2;
+sql select * from streamt2;
+
+print $data00 $data01 $data02
+
+if $rows == 0 then
+ goto loop0
+endi
+
+print end
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/system-test/0-others/grant.py b/tests/system-test/0-others/grant.py
new file mode 100644
index 0000000000..9e54d9ca37
--- /dev/null
+++ b/tests/system-test/0-others/grant.py
@@ -0,0 +1,222 @@
+from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
+import taos
+import sys
+import time
+import os
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.dnodes import TDDnodes
+from util.dnodes import TDDnode
+import time
+import socket
+import subprocess
+
+class MyDnodes(TDDnodes):
+ def __init__(self ,dnodes_lists):
+ super(MyDnodes,self).__init__()
+ self.dnodes = dnodes_lists # dnode must be TDDnode instance
+ if platform.system().lower() == 'windows':
+ self.simDeployed = True
+ else:
+ self.simDeployed = False
+
+class TDTestCase:
+ noConn = True
+ def getTDinternalPath():
+ path_parts = os.getcwd().split(os.sep)
+ try:
+ tdinternal_index = path_parts.index("TDinternal")
+ except ValueError:
+ raise ValueError("The specified directory 'TDinternal' was not found in the path.")
+ return os.sep.join(path_parts[:tdinternal_index + 1])
+
+ def init(self, conn, logSql, replicaVar=1):
+ tdLog.debug(f"start to excute {__file__}")
+ self.TDDnodes = None
+ self.depoly_cluster(5)
+ self.master_dnode = self.TDDnodes.dnodes[0]
+ self.host=self.master_dnode.cfgDict["fqdn"]
+ conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
+ tdSql.init(conn1.cursor(), True)
+ self.TDinternal = TDTestCase.getTDinternalPath()
+ self.workPath = os.path.join(self.TDinternal, "debug", "build", "bin")
+ tdLog.info(self.workPath)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def depoly_cluster(self ,dnodes_nums):
+
+ testCluster = False
+ valgrind = 0
+ hostname = socket.gethostname()
+ dnodes = []
+ start_port = 6030
+ for num in range(1, dnodes_nums+1):
+ dnode = TDDnode(num)
+ dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
+ dnode.addExtraCfg("fqdn", f"{hostname}")
+ dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
+ dnode.addExtraCfg("monitorFqdn", hostname)
+ dnode.addExtraCfg("monitorPort", 7043)
+ dnodes.append(dnode)
+
+ self.TDDnodes = MyDnodes(dnodes)
+ self.TDDnodes.init("")
+ self.TDDnodes.setTestCluster(testCluster)
+ self.TDDnodes.setValgrind(valgrind)
+
+ self.TDDnodes.setAsan(tdDnodes.getAsan())
+ self.TDDnodes.stopAll()
+ for dnode in self.TDDnodes.dnodes:
+ self.TDDnodes.deploy(dnode.index,{})
+
+ for dnode in self.TDDnodes.dnodes:
+ self.TDDnodes.starttaosd(dnode.index)
+
+ # create cluster
+ for dnode in self.TDDnodes.dnodes[1:]:
+ # print(dnode.cfgDict)
+ dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
+ dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
+ dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
+ cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
+ print(cmd)
+ os.system(cmd)
+
+ time.sleep(2)
+ tdLog.info(" create cluster done! ")
+
+ def s0_five_dnode_one_mnode(self):
+ tdSql.query("select * from information_schema.ins_dnodes;")
+ tdSql.checkData(0,1,'%s:6030'%self.host)
+ tdSql.checkData(4,1,'%s:6430'%self.host)
+ tdSql.checkData(0,4,'ready')
+ tdSql.checkData(4,4,'ready')
+ tdSql.query("select * from information_schema.ins_mnodes;")
+ tdSql.checkData(0,1,'%s:6030'%self.host)
+ tdSql.checkData(0,2,'leader')
+ tdSql.checkData(0,3,'ready')
+ tdSql.error("create mnode on dnode 1;")
+ tdSql.error("drop mnode on dnode 1;")
+ tdSql.execute("create database if not exists audit");
+ tdSql.execute("use audit");
+ tdSql.execute("create table operations(ts timestamp, c0 int primary key,c1 bigint,c2 int,c3 float,c4 double) tags(t0 bigint unsigned)");
+ tdSql.execute("create table t_operations_abc using operations tags(1)");
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db replica 1")
+ tdSql.execute("use db")
+ tdSql.execute("create table stb0(ts timestamp, c0 int primary key,c1 bigint,c2 int,c3 float,c4 double) tags(t0 bigint unsigned)");
+ tdSql.execute("create table ctb0 using stb0 tags(0)");
+ tdSql.execute("create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart, count(*) c1, count(c2) c2 , sum(c3) c3 , max(c4) c4 from stb0 interval(10s)");
+ tdSql.execute("create topic topic_stb_column as select ts, c3 from stb0");
+ tdSql.execute("create topic topic_stb_all as select ts, c1, c2, c3 from stb0");
+ tdSql.execute("create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb0");
+ tdSql.execute("create view view1 as select * from stb0");
+
+ def getConnection(self, dnode):
+ host = dnode.cfgDict["fqdn"]
+ port = dnode.cfgDict["serverPort"]
+ config_dir = dnode.cfgDir
+ return taos.connect(host=host, port=int(port), config=config_dir)
+
+ def s1_check_alive(self):
+ # check cluster alive
+ tdLog.printNoPrefix("======== test cluster alive: ")
+ tdSql.checkDataLoop(0, 0, 1, "show cluster alive;", 20, 0.5)
+
+ tdSql.query("show db.alive;")
+ tdSql.checkData(0, 0, 1)
+
+ def s2_check_show_grants_ungranted(self):
+ tdLog.printNoPrefix("======== test show grants ungranted: ")
+ self.infoPath = os.path.join(self.workPath, ".clusterInfo")
+ infoFile = open(self.infoPath, "w")
+ try:
+ tdSql.query(f'select create_time,expire_time,version from information_schema.ins_cluster;')
+ tdSql.checkEqual(len(tdSql.queryResult), 1)
+ infoFile.write(";".join(map(str, tdSql.queryResult[0])) + "\n")
+ tdSql.query(f'show cluster machines;')
+ tdSql.checkEqual(len(tdSql.queryResult), 1)
+ infoFile.write(";".join(map(str,tdSql.queryResult[0])) + "\n")
+ tdSql.query(f'show grants;')
+ tdSql.checkEqual(len(tdSql.queryResult), 1)
+ infoFile.write(";".join(map(str,tdSql.queryResult[0])) + "\n")
+ tdSql.query(f'show grants full;')
+ tdSql.checkEqual(len(tdSql.queryResult), 31)
+
+ if infoFile:
+ infoFile.flush()
+
+ files_and_dirs = os.listdir(f'{self.workPath}')
+ print(f"files_and_dirs: {files_and_dirs}")
+
+ process = subprocess.Popen(f'{self.workPath}{os.sep}grantTest', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output, error = process.communicate()
+ output = output.decode(encoding="utf-8")
+ error = error.decode(encoding="utf-8")
+ print(f"code: {process.returncode}")
+ print(f"error:\n{error}")
+ tdSql.checkEqual(process.returncode, 0)
+ tdSql.checkEqual(error, "")
+ lines = output.splitlines()
+ for line in lines:
+ if line.startswith("code:"):
+ fields = line.split(":")
+ tdSql.error(f"{fields[2]}", int(fields[1]), fields[3])
+ except Exception as e:
+ if os.path.exists(self.infoPath):
+ os.remove(self.infoPath)
+ raise Exception(repr(e))
+ finally:
+ if infoFile:
+ infoFile.close()
+
+ def s3_check_show_grants_granted(self):
+ tdLog.printNoPrefix("======== test show grants granted: ")
+ try:
+ process = subprocess.Popen(f'{self.workPath}{os.sep}grantTest 1', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output, error = process.communicate()
+ output = output.decode(encoding="utf-8")
+ error = error.decode(encoding="utf-8")
+ print(f"code: {process.returncode}")
+ print(f"error:\n{error}")
+ print(f"output:\n{output}")
+ tdSql.checkEqual(process.returncode, 0)
+ except Exception as e:
+ raise Exception(repr(e))
+ finally:
+ if os.path.exists(self.infoPath):
+ os.remove(self.infoPath)
+
+ def run(self):
+ # print(self.master_dnode.cfgDict)
+ # keep the order of following steps
+ self.s0_five_dnode_one_mnode()
+ self.s1_check_alive()
+ self.s2_check_show_grants_ungranted()
+ self.s3_check_show_grants_granted()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py
index aa548d4e59..538aa1ad63 100644
--- a/tests/system-test/0-others/information_schema.py
+++ b/tests/system-test/0-others/information_schema.py
@@ -222,7 +222,7 @@ class TDTestCase:
tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'")
tdLog.info(len(tdSql.queryResult))
- tdSql.checkEqual(True, len(tdSql.queryResult) in range(281, 282))
+ tdSql.checkEqual(True, len(tdSql.queryResult) in range(282, 283))
tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'")
tdSql.checkEqual(56, len(tdSql.queryResult))
@@ -299,6 +299,7 @@ class TDTestCase:
'oracle':'Oracle',
'mssql':'SqlServer',
'mongodb':'MongoDB',
+ 'csv':'CSV',
}
tdSql.execute('drop database if exists db2')
diff --git a/tests/system-test/0-others/multilevel_createdb.py b/tests/system-test/0-others/multilevel_createdb.py
new file mode 100644
index 0000000000..70131a760b
--- /dev/null
+++ b/tests/system-test/0-others/multilevel_createdb.py
@@ -0,0 +1,89 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+import glob
+
+def scanFiles(pattern):
+ res = []
+ for f in glob.iglob(pattern):
+ res += [f]
+ return res
+
+def checkFiles(pattern, state):
+ res = scanFiles(pattern)
+ tdLog.info(res)
+ num = len(res)
+ if num:
+ if state:
+ tdLog.info("%s: %d files exist. expect: files exist" % (pattern, num))
+ else:
+ tdLog.exit("%s: %d files exist. expect: files not exist." % (pattern, num))
+ else:
+ if state:
+ tdLog.exit("%s: %d files exist. expect: files exist" % (pattern, num))
+ else:
+ tdLog.info("%s: %d files exist. expect: files not exist." % (pattern, num))
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.setsql = TDSetSql()
+
+ def basic(self):
+ tdLog.info("============== basic test ===============")
+ cfg={
+ '/mnt/data1 0 1 0' : 'dataDir',
+ '/mnt/data2 0 0 0' : 'dataDir',
+ '/mnt/data3 0 0 0' : 'dataDir',
+ '/mnt/data4 0 0 0' : 'dataDir'
+ }
+ tdSql.createDir('/mnt/data1')
+ tdSql.createDir('/mnt/data2')
+ tdSql.createDir('/mnt/data3')
+ tdSql.createDir('/mnt/data4')
+
+ tdDnodes.stop(1)
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.start(1)
+
+ checkFiles(r'/mnt/data1/*/*',1)
+ checkFiles(r'/mnt/data2/*/*',0)
+
+ tdSql.execute('create database nws vgroups 20 stt_trigger 1 wal_level 1 wal_retention_period 0')
+
+ checkFiles(r'/mnt/data1/vnode/*/wal',5)
+ checkFiles(r'/mnt/data2/vnode/*/wal',5)
+ checkFiles(r'/mnt/data3/vnode/*/wal',5)
+ checkFiles(r'/mnt/data4/vnode/*/wal',5)
+
+ def run(self):
+ self.basic()
+
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/0-others/wal_level_skip.py b/tests/system-test/0-others/wal_level_skip.py
new file mode 100644
index 0000000000..2105ad0ada
--- /dev/null
+++ b/tests/system-test/0-others/wal_level_skip.py
@@ -0,0 +1,170 @@
+import sys
+import taos
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+class TDTestCase:
+
+ def init(self, conn, logSql,replicaVar=1):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def preData(self):
+ tdSql.execute("drop database if exists db0;")
+ tdSql.execute("create database db0 KEEP 30 vgroups 1 buffer 3 wal_level 0;")
+ tdSql.execute("create table if not exists db0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);")
+ tdSql.execute("create table db0.ct1 using db0.stb tags(1000);")
+ tdSql.execute("create table db0.ct2 using db0.stb tags(2000);")
+ tdSql.execute("create table if not exists db0.ntb (ts timestamp, c1 int, c2 float, c3 double) ;")
+ tdSql.query("show db0.stables;")
+ tdSql.execute("insert into db0.ct1 values(now+0s, 10, 2.0, 3.0);")
+ tdSql.execute("insert into db0.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);")
+ tdSql.execute("insert into db0.ntb values(now+2s, 10, 2.0, 3.0);")
+
+ def insertData(self):
+ tdSql.execute("insert into db0.ct1 values(now+0s, 10, 2.0, 3.0);")
+ tdSql.execute("insert into db0.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);")
+ tdSql.execute("insert into db0.ntb values(now+2s, 10, 2.0, 3.0);")
+
+ def createSubTableAndInsertData(self):
+ tdSql.execute("create table db0.ct1 using db0.stb tags(1000);")
+ tdSql.execute("create table db0.ct2 using db0.stb tags(2000);")
+ tdSql.execute("create table if not exists db0.ntb (ts timestamp, c1 int, c2 float, c3 double) ;")
+ tdSql.execute("insert into db0.ct1 values(now+0s, 10, 2.0, 3.0);")
+ tdSql.execute("insert into db0.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);")
+ tdSql.execute("insert into db0.ntb values(now+2s, 10, 2.0, 3.0);")
+
+
+ def alterWalLevel(self,level):
+ tdSql.execute("alter database db0 wal_level %d;"%level)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdLog.info("-----------test for stop taosd before alter wal level-----------")
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdDnodes.stop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+ tdLog.info(" alter wal level from 0 to 1")
+ self.alterWalLevel(1)
+ self.insertData()
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdDnodes.stop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+ tdLog.info(" alter wal level from 0 to 2")
+ self.alterWalLevel(2)
+ self.insertData()
+ tdDnodes.forcestop(1)
+ tdDnodes.start(1)
+
+
+ tdLog.info("-----------test for kill taosd before alter wal level-----------")
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdDnodes.forcestop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+ tdLog.info(" alter wal level from 0 to 1")
+ self.alterWalLevel(1)
+ tdDnodes.forcestop(1)
+ tdDnodes.start(1)
+
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdDnodes.forcestop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+ tdLog.info(" alter wal level from 0 to 2")
+ self.alterWalLevel(2)
+ tdDnodes.forcestop(1)
+ tdDnodes.start(1)
+
+ tdLog.info("-----------test for stop taosd after alter wal level-----------")
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdLog.info(" alter wal level from 0 to 1")
+ self.alterWalLevel(1)
+ time.sleep(1)
+ self.insertData()
+ tdDnodes.stop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdLog.info(" alter wal level from 0 to 2")
+ self.alterWalLevel(2)
+ time.sleep(1)
+ self.insertData()
+ tdDnodes.stop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+ tdLog.info("-----------test for kill taosd after alter wal level-----------")
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdLog.info(" alter wal level from 0 to 1")
+ self.alterWalLevel(1)
+ time.sleep(1)
+ self.insertData()
+ tdDnodes.forcestop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+
+ tdLog.info("create database wal_level = 0 and insert data")
+ self.preData()
+ tdLog.info(" alter wal level from 0 to 2")
+ self.alterWalLevel(2)
+ time.sleep(1)
+ self.insertData()
+ tdDnodes.forcestop(1)
+ time.sleep(2)
+ tdLog.info("restart taosd")
+ tdDnodes.start(1)
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/test_multi_insert.py b/tests/system-test/1-insert/test_multi_insert.py
new file mode 100644
index 0000000000..d1b6d28ffd
--- /dev/null
+++ b/tests/system-test/1-insert/test_multi_insert.py
@@ -0,0 +1,32 @@
+from util.sql import *
+from util.common import *
+import taos
+taos.taos_connect
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar = 1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ self.conn = conn
+ tdSql.init(conn.cursor(), logSql)
+ def initdb(self):
+ tdSql.execute("drop database if exists d0")
+ tdSql.execute("create database d0")
+ tdSql.execute("use d0")
+ tdSql.execute("create stable stb0 (ts timestamp, w_ts timestamp, opc nchar(100), quality int) tags(t0 int)")
+ tdSql.execute("create table t0 using stb0 tags(1)")
+ tdSql.execute("create table t1 using stb0 tags(2)")
+ def multi_insert(self):
+ for i in range(5):
+ tdSql.execute(f"insert into t1 values(1721265436000, now() + {i + 1}s, '0', 12) t1(opc, quality, ts) values ('opc2', 192, now()+ {i + 2}s) t1(ts, opc, quality) values(now() + {i + 3}s, 'opc4', 10) t1 values(1721265436000, now() + {i + 4}s, '1', 191) t1(opc, quality, ts) values('opc5', 192, now() + {i + 5}s) t1 values(now(), now() + {i + 6}s, '2', 192)")
+ tdSql.execute("insert into t0 values(1721265436000,now(),'0',192) t0(quality,w_ts,ts) values(192,now(),1721265326000) t0(quality,w_t\
+s,ts) values(190,now()+1s,1721265326000) t0 values(1721265436000,now()+2s,'1',191) t0(quality,w_ts,ts) values(192,now()+3s,\
+1721265326002) t0(ts,w_ts,opc,quality) values(1721265436003,now()+4s,'3',193) t0 values(now(), now() + 4s , '2', 192)")
+ def run(self):
+ self.initdb()
+ self.multi_insert()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/2-query/agg_group_NotReturnValue.py b/tests/system-test/2-query/agg_group_NotReturnValue.py
index 83f0acd362..059a5c5f2e 100755
--- a/tests/system-test/2-query/agg_group_NotReturnValue.py
+++ b/tests/system-test/2-query/agg_group_NotReturnValue.py
@@ -1590,11 +1590,9 @@ class TDTestCase(TDTestCase):
self.modify_tables()
tdSql.execute('alter local "countAlwaysReturnValue" "0"')
- for i in range(2):
- self.tag_count_all()
- self.tbname_count_all()
- self.tbname_agg_all()
-
+ self.tag_count_all()
+ self.tbname_count_all()
+ self.tbname_agg_all()
endTime = time.time()
print("total time %ds" % (endTime - startTime))
diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py
index 7ee528841c..74f5e86267 100644
--- a/tests/system-test/2-query/group_partition.py
+++ b/tests/system-test/2-query/group_partition.py
@@ -422,21 +422,36 @@ class TDTestCase:
def test_TS5567(self):
tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t group by const_col")
- tdSql.checkRows(50)
+ tdSql.checkRows(1)
tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by const_col")
tdSql.checkRows(50)
tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by const_col")
- tdSql.checkRows(10)
+ tdSql.checkRows(1)
tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by const_col")
tdSql.checkRows(10)
tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t group by c_c")
- tdSql.checkRows(50)
+ tdSql.checkRows(1)
tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by c_c")
tdSql.checkRows(50)
tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by 1")
- tdSql.checkRows(10)
+ tdSql.checkRows(1)
tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by 1")
tdSql.checkRows(10)
+
+ def test_TD_32883(self):
+ sql = "select avg(c1), t9 from db.stb group by t9,t9, tbname"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkRows(5)
+ sql = "select avg(c1), t10 from db.stb group by t10,t10, tbname"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkRows(5)
+ sql = "select avg(c1), t10 from db.stb partition by t10,t10, tbname"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkRows(5)
+ sql = "select avg(c1), concat(t9,t10) from db.stb group by concat(t9,t10), concat(t9,t10),tbname"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkRows(5)
+
def run(self):
tdSql.prepare()
self.prepare_db()
@@ -470,6 +485,7 @@ class TDTestCase:
self.test_event_window(nonempty_tb_num)
self.test_TS5567()
+ self.test_TD_32883()
## test old version before changed
# self.test_groupby('group', 0, 0)
diff --git a/tests/system-test/2-query/partition_by_col.py b/tests/system-test/2-query/partition_by_col.py
index ef88e88cbd..da7fe78124 100644
--- a/tests/system-test/2-query/partition_by_col.py
+++ b/tests/system-test/2-query/partition_by_col.py
@@ -313,7 +313,21 @@ class TDTestCase:
order_by_list = 'ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,t1,t2,t3,t4,t5,t6'
self.prepare_and_query_and_compare(sqls, order_by_list, compare_what=COMPARE_LEN)
+
+ def test_tsdb_read(self):
+ tdSql.execute('delete from t0')
+ tdSql.execute('flush database test')
+ for i in range(0, 4096):
+ tdSql.execute(f"insert into test.t0 values({1537146000000 + i}, 1,1,1,1,1,1,1,'a','1')")
+ tdSql.execute("flush database test")
+ tdSql.execute(f"insert into t0 values({1537146000000 + 4095}, 1,1,1,1,1,1,1,'a','1')")
+ for i in range(4095, 4096*2 + 100):
+ tdSql.execute(f"insert into test.t0 values({1537146000000 + i}, 1,1,1,1,1,1,1,'a','1')")
+ tdSql.execute("flush database test")
+ time.sleep(5)
+ tdSql.query('select first(ts), last(ts) from t0', queryTimes=1)
+ tdSql.checkRows(1)
def run(self):
self.prepareTestEnv()
@@ -323,6 +337,8 @@ class TDTestCase:
self.test_sort_for_partition_res()
self.test_sort_for_partition_interval()
self.test_sort_for_partition_no_agg_limit()
+ self.test_tsdb_read()
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/project_group.py b/tests/system-test/2-query/project_group.py
index 19fe8b1cf0..a251854213 100644
--- a/tests/system-test/2-query/project_group.py
+++ b/tests/system-test/2-query/project_group.py
@@ -15,6 +15,30 @@ class TDTestCase:
self.batchNum = 5
self.ts = 1537146000000
+ def groupby_value(self):
+ tdSql.query('select 1 from stb group by now')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+ tdSql.query('select 1 from stb group by "1"')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+ tdSql.query('select count(*) from stb group by now')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 12)
+ tdSql.query('select count(*) from stb group by now+1')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 12)
+ tdSql.query('select 1, count(*) from stb group by now, "1"')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(0, 1, 12)
+ tdSql.query('select count(*) as cc from sta1 as a join sta2 as b on a.ts = b.ts group by now')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 3)
+ tdSql.query('select a.tbname, count(*) as cc from sta1 as a join sta2 as b on a.ts = b.ts group by a.tbname, "1"')
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 3)
+
def run(self):
dbname = "db"
tdSql.prepare()
@@ -59,6 +83,9 @@ class TDTestCase:
tdSql.checkRows(2)
tdSql.query('select col1 > 0 and col2 > 0 from stb')
tdSql.checkRows(12)
+
+ self.groupby_value()
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/2-query/stddev_test.py b/tests/system-test/2-query/stddev_test.py
new file mode 100644
index 0000000000..c0cb51fe57
--- /dev/null
+++ b/tests/system-test/2-query/stddev_test.py
@@ -0,0 +1,54 @@
+import numpy as np
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+
+'''
+Test case for TS-5150
+'''
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.ts = 1537146000000
+ def initdabase(self):
+ tdSql.execute('create database if not exists db_test vgroups 2 buffer 10')
+ tdSql.execute('use db_test')
+ tdSql.execute('create stable stb(ts timestamp, delay int) tags(groupid int)')
+ tdSql.execute('create table t1 using stb tags(1)')
+ tdSql.execute('create table t2 using stb tags(2)')
+ tdSql.execute('create table t3 using stb tags(3)')
+ tdSql.execute('create table t4 using stb tags(4)')
+ tdSql.execute('create table t5 using stb tags(5)')
+ tdSql.execute('create table t6 using stb tags(6)')
+ def insert_data(self):
+ for i in range(5000):
+ tdSql.execute(f"insert into t1 values({self.ts + i * 1000}, {i%5})")
+ tdSql.execute(f"insert into t2 values({self.ts + i * 1000}, {i%5})")
+ tdSql.execute(f"insert into t3 values({self.ts + i * 1000}, {i%5})")
+
+ def verify_stddev(self):
+ for i in range(20):
+ tdSql.query(f'SELECT MAX(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS maxDelay,\
+ MIN(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS minDelay,\
+ AVG(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS avgDelay,\
+ STDDEV(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS jitter,\
+ COUNT(CASE WHEN delay = 0 THEN 1 ELSE NULL END) AS timeoutCount,\
+ COUNT(*) AS totalCount from stb where ts between {1537146000000 + i * 1000} and {1537146000000 + (i+10) * 1000}')
+ res = tdSql.queryResult[0][3]
+ assert res > 0.8
+ def run(self):
+ self.initdabase()
+ self.insert_data()
+ self.verify_stddev()
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
+
diff --git a/tests/system-test/2-query/tsma.py b/tests/system-test/2-query/tsma.py
index a1638ae4cb..1c688d568c 100644
--- a/tests/system-test/2-query/tsma.py
+++ b/tests/system-test/2-query/tsma.py
@@ -604,7 +604,7 @@ class TSMATestSQLGenerator:
class TDTestCase:
- updatecfgDict = {'asynclog': 0, 'ttlUnit': 1, 'ttlPushInterval': 5, 'ratioOfVnodeStreamThrea': 4, 'maxTsmaNum': 3}
+ updatecfgDict = {'asynclog': 0, 'ttlUnit': 1, 'ttlPushInterval': 5, 'ratioOfVnodeStreamThrea': 4, 'maxTsmaNum': 3, 'debugFlag': 143}
def __init__(self):
self.vgroups = 4
@@ -804,8 +804,8 @@ class TDTestCase:
self.tsma_tester.check_sql(ctx.sql, ctx)
def test_query_with_tsma(self):
- self.create_tsma('tsma1', 'test', 'meters', ['avg(c1)', 'avg(c2)'], '5m')
- self.create_tsma('tsma2', 'test', 'meters', ['avg(c1)', 'avg(c2)'], '30m')
+ self.create_tsma('tsma1', 'test', 'meters', ['avg(c1)', 'avg(c2)', 'count(ts)'], '5m')
+ self.create_tsma('tsma2', 'test', 'meters', ['avg(c1)', 'avg(c2)', 'count(ts)'], '30m')
self.create_tsma('tsma5', 'test', 'norm_tb', ['avg(c1)', 'avg(c2)'], '10m')
self.test_query_with_tsma_interval()
@@ -1237,7 +1237,41 @@ class TDTestCase:
clust_dnode_nums = len(cluster_dnode_list)
if clust_dnode_nums > 1:
self.test_redistribute_vgroups()
-
+ tdSql.execute("drop tsma test.tsma5")
+ for _ in range(4):
+ self.test_td_32519()
+
+ def test_td_32519(self):
+ self.create_recursive_tsma('tsma1', 'tsma_r', 'test', '1h', 'meters', ['avg(c1)', 'avg(c2)', 'count(ts)'])
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 11:45:00", 1,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 11:55:00", 2,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('DROP TABLE test.t1', queryTimes=1)
+ self.wait_query_err('desc test.`404e15422d96c8b5de9603c2296681b1`', 10, -2147473917)
+ self.wait_query_err('desc test.`82b56f091c4346369da0af777c3e580d`', 10, -2147473917)
+ self.wait_query_err('desc test.`163b7c69922cf6d83a98bfa44e52dade`', 10, -2147473917)
+ tdSql.execute('CREATE TABLE test.t1 USING test.meters TAGS(1, "a", "b", 1,1,1)')
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 11:59:00", 3,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 12:10:00", 4,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 12:20:00", 5,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('FLUSH DATABASE test', queryTimes=1)
+ tdSql.query('SELECT * FROM test.t1', queryTimes=1)
+ tdSql.checkRows(3)
+ sql = 'SELECT * FROM test.`404e15422d96c8b5de9603c2296681b1`'
+ self.wait_query(sql, 3, 20) ## tsma1 output ctb for t1
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkData(0,1, 1)
+ tdSql.checkData(1,1, 1)
+ tdSql.checkData(2,1, 1)
+ #sql = 'select * from test.`82b56f091c4346369da0af777c3e580d`'
+ #self.wait_query(sql, 2, 10) ## tsma2 output ctb for t1
+ #tdSql.query(sql, queryTimes=1)
+ #tdSql.checkData(0, 1, 1)
+ #tdSql.checkData(1, 1, 2)
+ sql = 'select * from test.`163b7c69922cf6d83a98bfa44e52dade`'
+ self.wait_query(sql, 2, 20) ## tsma_r output ctb for t1
+ tdSql.checkData(0, 1, 1)
+ self.drop_tsma('tsma_r', 'test')
+
def test_create_tsma(self):
function_name = sys._getframe().f_code.co_name
tdLog.debug(f'-----{function_name}------')
diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py
index 2aa01f2c02..5104489592 100644
--- a/tests/system-test/2-query/union.py
+++ b/tests/system-test/2-query/union.py
@@ -369,8 +369,46 @@ class TDTestCase:
'''
)
+ def test_TS_5630(self):
+ sql = "CREATE DATABASE `ep_iot` BUFFER 256 CACHESIZE 20 CACHEMODEL 'both' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 3 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0"
+ tdSql.execute(sql, queryTimes=1)
+ tdLog.info("database ep_iot created")
+ sql = "CREATE STABLE `ep_iot`.`sldc_dp` (`ts` TIMESTAMP, `data_write_time` TIMESTAMP, `jz1fdgl` DOUBLE, `jz1ssfdfh` DOUBLE, `jz1fdmh` DOUBLE, `jz1gdmh` DOUBLE, `jz1qjrhl` DOUBLE, `jz1zhcydl` DOUBLE, `jz1zkby` DOUBLE, `jz1zzqyl` DOUBLE, `jz1zzqwda` DOUBLE, `jz1zzqwdb` DOUBLE, `jz1zzqll` DOUBLE, `jz1gswd` DOUBLE, `jz1gsll` DOUBLE, `jz1glxl` DOUBLE, `jz1qjrh` DOUBLE, `jz1zhrxl` DOUBLE, `jz1gmjassllfk` DOUBLE, `jz1gmjasslllj` DOUBLE, `jz1gmjbssllfk` DOUBLE, `jz1gmjbsslllj` DOUBLE, `jz1gmjcssllfk` DOUBLE, `jz1gmjcsslllj` DOUBLE, `jz1gmjdssllfk` DOUBLE, `jz1gmjdsslllj` DOUBLE, `jz1gmjessllfk` DOUBLE, `jz1gmjesslllj` DOUBLE, `jz1gmjfssllfk` DOUBLE, `jz1gmjfsslllj` DOUBLE, `jz1zrqwda` DOUBLE, `jz1zrqwdb` DOUBLE, `jz1zrzqyl` DOUBLE, `jz1mmjadl` DOUBLE, `jz1mmjbdl` DOUBLE, `jz1mmjcdl` DOUBLE, `jz1mmjddl` DOUBLE, `jz1mmjedl` DOUBLE, `jz1mmjfdl` DOUBLE, `jz1cyqckwda` DOUBLE, `jz1cyqckwdb` DOUBLE, `jz1njswd` DOUBLE, `jz1nqqxhsckawd` DOUBLE, `jz1nqqxhsckbwd` DOUBLE, `jz1nqqxhsrkawd` DOUBLE, `jz1nqqxhsrkbwd` DOUBLE, `jz1kyqackyqwdsel` DOUBLE, `jz1kyqbckyqwdsel` DOUBLE, `jz1yfjackyqwd` DOUBLE, `jz1yfjbckyqwd` DOUBLE, `jz1trkyqwd` DOUBLE, `jz1trkyqwd1` DOUBLE, `jz1trkyqwd2` DOUBLE, `jz1trkyqwd3` DOUBLE, `jz1tckjyqwd1` DOUBLE, `jz1tckjyqwd2` DOUBLE, `jz1tckyqwd1` DOUBLE, `jz1bya` DOUBLE, `jz1byb` DOUBLE, `jz1pqwda` DOUBLE, `jz1pqwdb` DOUBLE, `jz1gmjadl` DOUBLE, `jz1gmjbdl` DOUBLE, `jz1gmjcdl` DOUBLE, `jz1gmjddl` DOUBLE, `jz1gmjedl` DOUBLE, `jz1gmjfdl` DOUBLE, `jz1yfjadl` DOUBLE, `jz1yfjbdl` DOUBLE, `jz1ycfjadl` DOUBLE, `jz1ycfjbdl` DOUBLE, `jz1sfjadl` DOUBLE, `jz1sfjbdl` DOUBLE, `jz1fdjyggl` DOUBLE, `jz1fdjwggl` DOUBLE, `jz1sjzs` DOUBLE, `jz1zfl` DOUBLE, `jz1ltyl` DOUBLE, `jz1smb` DOUBLE, `jz1rll` DOUBLE, `jz1grd` DOUBLE, `jz1zjwd` DOUBLE, `jz1yl` DOUBLE, `jz1kyqckwd` DOUBLE, `jz1abmfsybrkcy` DOUBLE, `jz1bbmfsybrkcy` DOUBLE, `jz1abjcsdmfytwdzdz` DOUBLE, `jz1bbjcsdmfytwdzdz` DOUBLE, `jz2fdgl` DOUBLE, `jz2ssfdfh` DOUBLE, `jz2fdmh` DOUBLE, `jz2gdmh` DOUBLE, `jz2qjrhl` DOUBLE, `jz2zhcydl` DOUBLE, `jz2zkby` DOUBLE, `jz2zzqyl` DOUBLE, `jz2zzqwda` DOUBLE, `jz2zzqwdb` DOUBLE, `jz2zzqll` DOUBLE, `jz2gswd` DOUBLE, `jz2gsll` DOUBLE, `jz2glxl` DOUBLE, `jz2qjrh` DOUBLE, `jz2zhrxl` DOUBLE, `jz2gmjassllfk` DOUBLE, `jz2gmjasslllj` DOUBLE, `jz2gmjbssllfk` DOUBLE, `jz2gmjbsslllj` DOUBLE, `jz2gmjcssllfk` DOUBLE, `jz2gmjcsslllj` DOUBLE, `jz2gmjdssllfk` DOUBLE, `jz2gmjdsslllj` DOUBLE, `jz2gmjessllfk` DOUBLE, `jz2gmjesslllj` DOUBLE, `jz2gmjfssllfk` DOUBLE, `jz2gmjfsslllj` DOUBLE, `jz2zrqwda` DOUBLE, `jz2zrqwdb` DOUBLE, `jz2zrzqyl` DOUBLE, `jz2mmjadl` DOUBLE, `jz2mmjbdl` DOUBLE, `jz2mmjcdl` DOUBLE, `jz2mmjddl` DOUBLE, `jz2mmjedl` DOUBLE, `jz2mmjfdl` DOUBLE, `jz2cyqckwda` DOUBLE, `jz2cyqckwdb` DOUBLE, `jz2njswd` DOUBLE, `jz2nqqxhsckawd` DOUBLE, `jz2nqqxhsckbwd` DOUBLE, `jz2nqqxhsrkawd` DOUBLE, `jz2nqqxhsrkbwd` DOUBLE, `jz2kyqackyqwdsel` DOUBLE, `jz2kyqbckyqwdsel` DOUBLE, `jz2yfjackyqwd` DOUBLE, `jz2yfjbckyqwd` DOUBLE, `jz2trkyqwd` DOUBLE, `jz2trkyqwd1` DOUBLE, `jz2trkyqwd2` DOUBLE, `jz2trkyqwd3` DOUBLE, `jz2tckjyqwd1` DOUBLE, `jz2tckjyqwd2` DOUBLE, `jz2tckyqwd1` DOUBLE, `jz2bya` DOUBLE, `jz2byb` DOUBLE, `jz2pqwda` DOUBLE, `jz2pqwdb` DOUBLE, `jz2gmjadl` DOUBLE, `jz2gmjbdl` DOUBLE, `jz2gmjcdl` DOUBLE, `jz2gmjddl` DOUBLE, `jz2gmjedl` DOUBLE, `jz2gmjfdl` DOUBLE, `jz2yfjadl` DOUBLE, `jz2yfjbdl` DOUBLE, `jz2ycfjadl` DOUBLE, `jz2ycfjbdl` DOUBLE, `jz2sfjadl` DOUBLE, `jz2sfjbdl` DOUBLE, `jz2fdjyggl` DOUBLE, `jz2fdjwggl` DOUBLE, `jz2sjzs` DOUBLE, `jz2zfl` DOUBLE, `jz2ltyl` DOUBLE, `jz2smb` DOUBLE, `jz2rll` DOUBLE, `jz2grd` DOUBLE, `jz2zjwd` DOUBLE, `jz2yl` DOUBLE, `jz2kyqckwd` DOUBLE, `jz2abmfsybrkcy` DOUBLE, `jz2bbmfsybrkcy` DOUBLE, `jz2abjcsdmfytwdzdz` DOUBLE, `jz2bbjcsdmfytwdzdz` DOUBLE) TAGS (`iot_hub_id` VARCHAR(100), `device_group_code` VARCHAR(100), `device_code` VARCHAR(100))"
+ tdLog.info("stable ep_iot.sldc_dp created")
+ tdSql.execute(sql, queryTimes=1)
+ sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('a','a','a') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
+ tdSql.execute(sql, queryTimes=1)
+ sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('b','b','b') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
+ tdSql.execute(sql, queryTimes=1)
+ sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('c','c','c') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
+ tdSql.execute(sql, queryTimes=1)
+ sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('d','d','d') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
+ tdSql.execute(sql, queryTimes=1)
+ sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('e','e','e') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
+ tdSql.execute(sql, queryTimes=1)
+ sql = "select scdw_code, scdw_name, jzmc, fdgl, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkCols(5)
+ tdSql.checkRows(6)
+
+ sql = "select scdw_name, scdw_code, jzmc, fdgl, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkCols(5)
+ tdSql.checkRows(6)
+ sql = "select scdw_name, scdw_code, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkRows(6)
+ tdSql.checkCols(3)
+
+ sql = "select scdw_code, scdw_name, jzmc, fdgl, jzzt,ts from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkCols(6)
+ tdSql.checkRows(6)
+ ##tdSql.execute("drop database ep_iot")
+
def run(self):
tdSql.prepare()
+ self.test_TS_5630()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
diff --git a/tests/system-test/7-tmq/tmq_offset.py b/tests/system-test/7-tmq/tmq_offset.py
index 07d1a4bc04..7eabb50be2 100644
--- a/tests/system-test/7-tmq/tmq_offset.py
+++ b/tests/system-test/7-tmq/tmq_offset.py
@@ -45,6 +45,11 @@ class TDTestCase:
tdLog.exit("tmq_offset_test error!")
else:
buildPath = tdCom.getBuildPath()
+ cmdStr0 = '%s/build/bin/tmq_offset_test 5679'%(buildPath)
+ tdLog.info(cmdStr0)
+ if os.system(cmdStr0) != 0:
+ tdLog.exit(cmdStr0)
+
cmdStr1 = '%s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y &'%(buildPath)
tdLog.info(cmdStr1)
os.system(cmdStr1)
diff --git a/tests/system-test/8-stream/checkpoint_info.py b/tests/system-test/8-stream/checkpoint_info.py
new file mode 100644
index 0000000000..522017a702
--- /dev/null
+++ b/tests/system-test/8-stream/checkpoint_info.py
@@ -0,0 +1,140 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+from util.cluster import *
+import threading
+# should be used by -N option
+class TDTestCase:
+
+ #updatecfgDict = {'checkpointInterval': 60 ,}
+ def init(self, conn, logSql, replicaVar=1):
+ print("========init========")
+
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+ def find_checkpoint_info_file(self, dirpath, checkpointid, task_id):
+ for root, dirs, files in os.walk(dirpath):
+ if f'checkpoint{checkpointid}' in dirs:
+ info_path = os.path.join(root, f'checkpoint{checkpointid}', 'info')
+ if os.path.exists(info_path):
+ if task_id in info_path:
+ return info_path
+ else:
+ continue
+ else:
+ return None
+ def get_dnode_info(self):
+ '''
+ get a dict from vnode to dnode
+ '''
+ self.vnode_dict = {}
+ sql = 'select dnode_id, vgroup_id from information_schema.ins_vnodes'
+ result = tdSql.getResult(sql)
+ for (dnode,vnode) in result:
+ self.vnode_dict[vnode] = dnode
+ def print_time_info(self):
+ '''
+ sometimes, we need to wait for a while to check the info (for example, the checkpoint info file won't be created immediately after the redistribute)
+ '''
+ times= 0
+ while(True):
+ if(self.check_info()):
+ tdLog.success(f'Time to finish is {times}')
+ return
+ else:
+ if times > 200:
+ tdLog.exit("time out")
+ times += 10
+ time.sleep(10)
+ def check_info(self):
+ '''
+ first, check if the vnode is restored
+ '''
+ while(True):
+ if(self.check_vnodestate()):
+ break
+ sql = 'select task_id, node_id, checkpoint_id, checkpoint_ver from information_schema.ins_stream_tasks where `level` = "source" or `level` = "agg" and node_type == "vnode"'
+ for task_id, vnode, checkpoint_id, checkpoint_ver in tdSql.getResult(sql):
+ dirpath = f"{cluster.dnodes[self.vnode_dict[vnode]-1].dataDir}/vnode/vnode{vnode}/"
+ info_path = self.find_checkpoint_info_file(dirpath, checkpoint_id, task_id)
+ if info_path is None:
+ return False
+ with open(info_path, 'r') as f:
+ info_id, info_ver = f.read().split()
+ if int(info_id) != int(checkpoint_id) or int(info_ver) != int(checkpoint_ver):
+ return False
+ return True
+
+ def restart_stream(self):
+ tdLog.debug("========restart stream========")
+ time.sleep(10)
+ for i in range(5):
+ tdSql.execute("pause stream s1")
+ time.sleep(2)
+ tdSql.execute("resume stream s1")
+ def initstream(self):
+ tdLog.debug("========case1 start========")
+ os.system("nohup taosBenchmark -y -B 1 -t 4 -S 500 -n 1000 -v 3 > /dev/null 2>&1 &")
+ time.sleep(5)
+ tdSql.execute("create snode on dnode 1")
+ tdSql.execute("use test")
+ tdSql.execute("create stream if not exists s1 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into st1 as select _wstart,sum(voltage),groupid from meters partition by groupid interval(1s)")
+ tdLog.debug("========create stream using snode and insert data ok========")
+ self.get_dnode_info()
+ def redistribute_vnode(self):
+ tdLog.debug("========redistribute vnode========")
+ tdSql.redistribute_db_all_vgroups()
+ self.get_dnode_info()
+ def replicate_db(self):
+ tdLog.debug("========replicate db========")
+ while True:
+ res = tdSql.getResult("SHOW TRANSACTIONS")
+ if res == []:
+ tdLog.debug("========== no transaction, begin to replicate db =========")
+ tdSql.execute("alter database test replica 3")
+ return
+ else:
+ time.sleep(5)
+ continue
+ def check_vnodestate(self):
+ sql = 'select distinct restored from information_schema.ins_vnodes'
+ if tdSql.getResult(sql) != [(True,)]:
+ tdLog.debug(f"vnode not restored, wait 5s")
+ time.sleep(5)
+ return False
+ else:
+ return True
+ def run(self):
+ print("========run========")
+ self.initstream()
+ self.restart_stream()
+ time.sleep(60)
+ self.print_time_info()
+ self.redistribute_vnode()
+ self.restart_stream()
+ time.sleep(60)
+ self.print_time_info()
+
+ def stop(self):
+ print("========stop========")
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/8-stream/checkpoint_info2.py b/tests/system-test/8-stream/checkpoint_info2.py
new file mode 100644
index 0000000000..3dc57477f7
--- /dev/null
+++ b/tests/system-test/8-stream/checkpoint_info2.py
@@ -0,0 +1,141 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+from util.cluster import *
+
+# should be used by -N option
+class TDTestCase:
+ updatecfgDict = {'checkpointInterval': 60 ,
+ }
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+ def find_checkpoint_info_file(self, dirpath, checkpointid, task_id):
+ for root, dirs, files in os.walk(dirpath):
+ if f'checkpoint{checkpointid}' in dirs:
+ info_path = os.path.join(root, f'checkpoint{checkpointid}', 'info')
+ if os.path.exists(info_path):
+ if task_id in info_path:
+ tdLog.info(f"info file found in {info_path}")
+ return info_path
+ else:
+ continue
+ else:
+ tdLog.info(f"info file not found in {info_path}")
+ return None
+ else:
+ tdLog.info(f"no checkpoint{checkpointid} in {dirpath}")
+ def get_dnode_info(self):
+ '''
+ get a dict from vnode to dnode
+ '''
+ self.vnode_dict = {}
+ sql = 'select dnode_id, vgroup_id from information_schema.ins_vnodes where status = "leader"'
+ result = tdSql.getResult(sql)
+ for (dnode,vnode) in result:
+ self.vnode_dict[vnode] = dnode
+ def print_time_info(self):
+ '''
+ sometimes, we need to wait for a while to check the info (for example, the checkpoint info file won't be created immediately after the redistribute)
+ '''
+ times= 0
+ while(True):
+ if(self.check_info()):
+ tdLog.success(f'Time to finish is {times}')
+ return
+ else:
+ if times > 400:
+ tdLog.exit("time out")
+ times += 10
+ time.sleep(10)
+ def check_info(self):
+ '''
+ first, check if the vnode is restored
+ '''
+ while(True):
+ if(self.check_vnodestate()):
+ break
+ self.get_dnode_info()
+ sql = 'select task_id, node_id, checkpoint_id, checkpoint_ver from information_schema.ins_stream_tasks where `level` = "source" or `level` = "agg" and node_type == "vnode"'
+ for task_id, vnode, checkpoint_id, checkpoint_ver in tdSql.getResult(sql):
+ dirpath = f"{cluster.dnodes[self.vnode_dict[vnode]-1].dataDir}/vnode/vnode{vnode}/"
+ info_path = self.find_checkpoint_info_file(dirpath, checkpoint_id, task_id)
+ if info_path is None:
+ tdLog.info(f"info path: {dirpath} is null")
+ return False
+ with open(info_path, 'r') as f:
+ info_id, info_ver = f.read().split()
+ if int(info_id) != int(checkpoint_id) or int(info_ver) != int(checkpoint_ver):
+ tdLog.info(f"infoId: {info_id}, checkpointId: {checkpoint_id}, infoVer: {info_ver}, checkpointVer: {checkpoint_ver}")
+ return False
+ return True
+
+ def restart_stream(self):
+ tdLog.debug("========restart stream========")
+ for i in range(5):
+ tdSql.execute("pause stream s1")
+ time.sleep(2)
+ tdSql.execute("resume stream s1")
+ def initstream(self):
+ tdLog.debug("========case1 start========")
+ os.system("nohup taosBenchmark -y -B 1 -t 4 -S 500 -n 1000 -v 3 > /dev/null 2>&1 &")
+ time.sleep(5)
+ tdSql.execute("create snode on dnode 1")
+ tdSql.execute("use test")
+ tdSql.execute("create stream if not exists s1 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into st1 as select _wstart,sum(voltage),groupid from meters partition by groupid interval(1s)")
+ tdLog.debug("========create stream using snode and insert data ok========")
+ self.get_dnode_info()
+ def redistribute_vnode(self):
+ tdLog.debug("========redistribute vnode========")
+ tdSql.redistribute_db_all_vgroups()
+ self.get_dnode_info()
+ def replicate_db(self):
+ tdLog.debug("========replicate db========")
+ while True:
+ res = tdSql.getResult("SHOW TRANSACTIONS")
+ if res == []:
+ tdLog.debug("========== no transaction, begin to replicate db =========")
+ tdSql.execute("alter database test replica 3")
+ return
+ else:
+ time.sleep(5)
+ continue
+ def check_vnodestate(self):
+ sql = 'select distinct restored from information_schema.ins_vnodes'
+ if tdSql.getResult(sql) != [(True,)]:
+ tdLog.debug(f"vnode not restored, wait 5s")
+ time.sleep(5)
+ return False
+ else:
+ return True
+ def run(self):
+ self.initstream()
+ self.replicate_db()
+ self.print_time_info()
+ self.restart_stream()
+ time.sleep(60)
+ self.print_time_info()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/99-TDcase/TS-5580.py b/tests/system-test/99-TDcase/TS-5580.py
new file mode 100644
index 0000000000..84dd44c3b0
--- /dev/null
+++ b/tests/system-test/99-TDcase/TS-5580.py
@@ -0,0 +1,118 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import time
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+
+class TDTestCase:
+ updatecfgDict = {'qDebugFlag':135 , 'mDebugFlag':135}
+
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+ self.setsql = TDSetSql()
+ self.dbname = 'db'
+ self.stbname = 'stb'
+ self.binary_length = 20 # the length of binary for column_dict
+ self.nchar_length = 20 # the length of nchar for column_dict
+ self.ts = 1537146000000
+ self.column_dict = {
+ 'ts' : 'timestamp',
+ 'col1': 'tinyint',
+ 'col2': 'smallint',
+ 'col3': 'int',
+ 'col4': 'bigint',
+ 'col5': 'float',
+ 'col6': 'double',
+ 'col7': 'double',
+ 'col8': 'double',
+ 'col9': 'double',
+ 'col10': 'double',
+ 'col11': 'double',
+ 'col12': 'double',
+ 'col13': 'double',
+ 'col14': 'double',
+ 'col15': 'double',
+ 'col16': 'double',
+ 'col17': 'double',
+ 'col18': 'double',
+ 'col19': 'double'
+ }
+ self.tbnum = 500
+ self.rowNum = 10
+ self.tag_dict = {
+ 't0':'int',
+ 't1':'bigint',
+ 't2':'float',
+ 't3':'double',
+ 't4':'bool',
+ 't5':'bool',
+ 't6':'bool',
+ 't7':'bool',
+ 't8':'bool',
+ 't9':'bool',
+ 't10':'bool',
+ 't11':'bool',
+ 't12':'bool',
+ 't13':'bool',
+ 't14':'bool',
+ 't15':'bool',
+ 't16':'bool',
+ 't17':'bool',
+ 't18':'bool',
+ 't19':'bool',
+ }
+ self.tag_values = [
+ f'1','1','1','1','true','true','true','true','true','true','true','true','true','true','true','true','true',
+ 'true','true','true'
+ ]
+ def prepare_data(self):
+ tdSql.execute(f"create database if not exists {self.dbname} vgroups 2")
+ tdSql.execute(f'use {self.dbname}')
+ tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
+ for i in range(self.tbnum):
+ tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]}, {self.tag_values[1]}, "
+ f"{self.tag_values[2]}, {self.tag_values[3]}, {self.tag_values[4]}, {self.tag_values[5]}, "
+ f"{self.tag_values[6]}, {self.tag_values[7]}, {self.tag_values[8]}, {self.tag_values[9]}, "
+ f"{self.tag_values[10]}, {self.tag_values[11]}, {self.tag_values[12]}, {self.tag_values[13]}, "
+ f"{self.tag_values[14]}, {self.tag_values[15]}, {self.tag_values[16]}, {self.tag_values[17]}, "
+ f"{self.tag_values[18]}, {self.tag_values[19]})")
+
+ def test_query_ins_tags(self):
+ for i in range(self.tbnum):
+ sql = f'select tag_name, tag_value from information_schema.ins_tags where table_name = "{self.stbname}_{i}"'
+ tdSql.query(sql)
+ tdSql.checkRows(20)
+
+ def test_query_ins_columns(self):
+ for i in range(self.tbnum):
+ sql = f'select col_name from information_schema.ins_columns where table_name = "{self.stbname}_{i}"'
+ tdSql.query(sql)
+ tdSql.checkRows(20)
+ def run(self):
+ self.prepare_data()
+ self.test_query_ins_tags()
+ self.test_query_ins_columns()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/taosc_test/CMakeLists.txt b/tests/taosc_test/CMakeLists.txt
index c16fe59271..45c14f84bf 100644
--- a/tests/taosc_test/CMakeLists.txt
+++ b/tests/taosc_test/CMakeLists.txt
@@ -16,7 +16,7 @@ aux_source_directory(src OS_SRC)
# taoscTest
add_executable(taoscTest "taoscTest.cpp")
-target_link_libraries(taoscTest taos os gtest_main)
+target_link_libraries(taoscTest ${TAOS_LIB} os gtest_main)
target_include_directories(
taoscTest
PUBLIC "${TD_SOURCE_DIR}/include/os"
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 5e93be695d..d058d7a52f 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -13,7 +13,7 @@ IF(TD_WEBSOCKET)
PREFIX "taosws-rs"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS ${TAOS_LIB}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"
PATCH_COMMAND
@@ -32,7 +32,7 @@ IF(TD_WEBSOCKET)
PREFIX "taosws-rs"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS ${TAOS_LIB}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"
PATCH_COMMAND
@@ -52,7 +52,7 @@ IF(TD_WEBSOCKET)
PREFIX "taosws-rs"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS ${TAOS_LIB}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"
PATCH_COMMAND
@@ -139,7 +139,7 @@ ELSE()
PREFIX "taosadapter"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS ${TAOS_LIB}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
PATCH_COMMAND
@@ -168,7 +168,7 @@ ELSE()
PREFIX "taosadapter"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS ${TAOS_LIB}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
PATCH_COMMAND
@@ -193,7 +193,7 @@ ELSE()
PREFIX "taosadapter"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS ${TAOS_LIB}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
PATCH_COMMAND
diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt
index 4a8e0b9d34..2301f33803 100644
--- a/tools/shell/CMakeLists.txt
+++ b/tools/shell/CMakeLists.txt
@@ -33,9 +33,9 @@ ELSE()
ENDIF()
if(TD_WINDOWS)
- target_link_libraries(shell PUBLIC taos_static ${LINK_WEBSOCKET})
+ target_link_libraries(shell PUBLIC ${TAOS_LIB_STATIC} ${LINK_WEBSOCKET})
else()
- target_link_libraries(shell PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP})
+ target_link_libraries(shell PUBLIC ${TAOS_LIB} ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP})
endif()
target_link_libraries(
@@ -63,7 +63,7 @@ IF(TD_LINUX)
IF(TD_WEBSOCKET)
ADD_DEPENDENCIES(shell_ut taosws-rs)
ENDIF()
- target_link_libraries(shell_ut PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP})
+ target_link_libraries(shell_ut PUBLIC ${TAOS_LIB} ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP})
target_link_libraries(shell_ut PRIVATE os common transport geometry util)
# util depends
diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt
index 7589d11840..7054eb218f 100644
--- a/utils/test/c/CMakeLists.txt
+++ b/utils/test/c/CMakeLists.txt
@@ -1,5 +1,5 @@
add_executable(tmq_demo tmqDemo.c)
-add_dependencies(tmq_demo taos)
+add_dependencies(tmq_demo ${TAOS_LIB})
add_executable(tmq_sim tmqSim.c)
add_executable(create_table createTable.c)
add_executable(tmq_taosx_ci tmq_taosx_ci.c)
@@ -22,7 +22,7 @@ endif(${TD_LINUX})
target_link_libraries(
tmq_offset
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -30,7 +30,7 @@ target_link_libraries(
target_link_libraries(
tmq_multi_thread_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -38,56 +38,56 @@ target_link_libraries(
target_link_libraries(
create_table
- PUBLIC taos
- PUBLIC util
- PUBLIC common
- PUBLIC os
-)
-target_link_libraries(
- tmq_demo
- PUBLIC taos
- PUBLIC util
- PUBLIC common
- PUBLIC os
-)
-target_link_libraries(
- tmq_sim
PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
)
+target_link_libraries(
+ tmq_demo
+ PUBLIC ${TAOS_LIB}
+ PUBLIC util
+ PUBLIC common
+ PUBLIC os
+)
+target_link_libraries(
+ tmq_sim
+ PUBLIC ${TAOS_LIB_PLATFORM_SPEC}
+ PUBLIC util
+ PUBLIC common
+ PUBLIC os
+)
target_link_libraries(
tmq_ts5466
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_td32187
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_td32526
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_taosx_ci
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_offset_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -95,7 +95,7 @@ target_link_libraries(
target_link_libraries(
replay_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -103,7 +103,7 @@ target_link_libraries(
target_link_libraries(
write_raw_block_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -111,7 +111,7 @@ target_link_libraries(
target_link_libraries(
tmq_write_raw_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -119,7 +119,7 @@ target_link_libraries(
target_link_libraries(
sml_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -128,7 +128,7 @@ target_link_libraries(
target_link_libraries(
get_db_name_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -136,7 +136,7 @@ target_link_libraries(
target_link_libraries(
varbinary_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
@@ -145,7 +145,7 @@ target_link_libraries(
if(${TD_LINUX})
target_link_libraries(
tsz_test
- PUBLIC taos
+ PUBLIC ${TAOS_LIB}
PUBLIC util
PUBLIC common
PUBLIC os
diff --git a/utils/test/c/tmq_offset_test.c b/utils/test/c/tmq_offset_test.c
index 6be9b38979..25f048bab2 100644
--- a/utils/test/c/tmq_offset_test.c
+++ b/utils/test/c/tmq_offset_test.c
@@ -80,6 +80,77 @@ int buildData(TAOS* pConn){
return 0;
}
+void test_ts5679(TAOS* pConn){
+ TAOS_RES* pRes = taos_query(pConn, "drop topic if exists t_5679");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop database if exists db_ts5679");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create database if not exists db_ts5679 vgroups 1 wal_retention_period 3600");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create topic t_5679 as database db_ts5679");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "use db_ts5679");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn,"CREATE TABLE `t1` (`ts` TIMESTAMP, `voltage` INT)");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into t1 values(now, 1)");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ tmq_conf_t* conf = tmq_conf_new();
+
+ tmq_conf_set(conf, "enable.auto.commit", "false");
+ tmq_conf_set(conf, "auto.commit.interval.ms", "2000");
+ tmq_conf_set(conf, "group.id", "group_id_2");
+ tmq_conf_set(conf, "td.connect.user", "root");
+ tmq_conf_set(conf, "td.connect.pass", "taosdata");
+ tmq_conf_set(conf, "auto.offset.reset", "earliest");
+ tmq_conf_set(conf, "msg.with.table.name", "false");
+
+ tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+ tmq_conf_destroy(conf);
+
+ // 创建订阅 topics 列表
+ tmq_list_t* topicList = tmq_list_new();
+ tmq_list_append(topicList, "t_5679");
+
+ // 启动订阅
+ tmq_subscribe(tmq, topicList);
+ tmq_list_destroy(topicList);
+
+ while(1){
+ pRes = tmq_consumer_poll(tmq, 1000);
+ if (pRes == NULL){
+ break;
+ }
+ taosSsleep(3);
+ }
+ tmq_topic_assignment* pAssign = NULL;
+ int32_t numOfAssign = 0;
+ int32_t code = tmq_get_topic_assignment(tmq, "t_5679", &pAssign, &numOfAssign);
+ ASSERT (code == 0);
+
+ for(int i = 0; i < numOfAssign; i++){
+ int64_t committed = tmq_committed(tmq, "t_5679", pAssign[i].vgId);
+ printf("committed offset:%"PRId64"\n", committed);
+ ASSERT(committed == TSDB_CODE_TMQ_NO_COMMITTED);
+ }
+
+ taos_free_result(pRes);
+}
+
void test_offset(TAOS* pConn){
if(buildData(pConn) != 0){
ASSERT(0);
@@ -304,8 +375,13 @@ void test_ts3756(TAOS* pConn){
int main(int argc, char* argv[]) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- test_offset(pConn);
- test_ts3756(pConn);
+ if (argc == 2) {
+ test_ts5679(pConn);
+ }else{
+ test_offset(pConn);
+ test_ts3756(pConn);
+ }
+
taos_close(pConn);
return 0;
}
diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c
index cd70dd88f5..117f9fa2e1 100644
--- a/utils/test/c/tmq_taosx_ci.c
+++ b/utils/test/c/tmq_taosx_ci.c
@@ -65,12 +65,12 @@ static void msg_process(TAOS_RES* msg) {
if (g_fp && strcmp(result, "") != 0) {
// RES_TYPE__TMQ_BATCH_META
if ((*(int8_t*)msg) == 5) {
- cJSON* pJson = cJSON_Parse(result);
- cJSON* pJsonArray = cJSON_GetObjectItem(pJson, "metas");
+ cJSON* pJson = cJSON_Parse(result);
+ cJSON* pJsonArray = cJSON_GetObjectItem(pJson, "metas");
int32_t num = cJSON_GetArraySize(pJsonArray);
for (int32_t i = 0; i < num; i++) {
cJSON* pJsonItem = cJSON_GetArrayItem(pJsonArray, i);
- char* itemStr = cJSON_PrintUnformatted(pJsonItem);
+ char* itemStr = cJSON_PrintUnformatted(pJsonItem);
taosFprintfFile(g_fp, itemStr);
tmq_free_json_meta(itemStr);
taosFprintfFile(g_fp, "\n");
@@ -489,10 +489,11 @@ int buildStable(TAOS* pConn, TAOS_RES* pRes) {
}
taos_free_result(pRes);
#else
- pRes = taos_query(pConn,
- "create stream meters_summary_s trigger at_once IGNORE EXPIRED 0 fill_history 1 into meters_summary as select "
- "_wstart, max(current) as current, "
- "groupid, location from meters partition by groupid, location interval(10m)");
+ pRes = taos_query(
+ pConn,
+ "create stream meters_summary_s trigger at_once IGNORE EXPIRED 0 fill_history 1 into meters_summary as select "
+ "_wstart, max(current) as current, "
+ "groupid, location from meters partition by groupid, location interval(10m)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table meters_summary, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -632,8 +633,8 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "msg.consume.excluded", "1");
-// tmq_conf_set(conf, "session.timeout.ms", "1000000");
-// tmq_conf_set(conf, "max.poll.interval.ms", "20000");
+ // tmq_conf_set(conf, "session.timeout.ms", "1000000");
+ // tmq_conf_set(conf, "max.poll.interval.ms", "20000");
if (g_conf.snapShot) {
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
@@ -722,7 +723,7 @@ void initLogFile() {
"\"level\":\"medium\"},{"
"\"name\":\"groupid\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\","
"\"level\":\"medium\"},{\"name\":"
- "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\","
+ "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\","
"\"level\":\"medium\"}],\"tags\":[{\"name\":\"group_id\","
"\"type\":14}"
"]}",
@@ -750,7 +751,7 @@ void initLogFile() {
"\"level\":\"medium\"}"
",{"
"\"name\":\"c3\",\"type\":8,\"length\":64,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
- "\"lz4\",\"level\":\"medium\"},{"
+ "\"zstd\",\"level\":\"medium\"},{"
"\"name\":\"c4\",\"type\":5,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":"
"\"medium\"}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":"
"\"t3\","
@@ -772,7 +773,7 @@ void initLogFile() {
"{\"type\":\"create\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"columns\":[{\"name\":\"ts\","
"\"type\":9,"
"\"isPrimarykey\":false,\"encode\":\"delta-i\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c2\","
- "\"type\":10,\"length\":8,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":"
+ "\"type\":10,\"length\":8,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":"
"\"medium\"},{\"name\":\"cc3\",\"type\":5,"
"\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":\"medium\"}],\"tags\":[]}",
"{\"type\":\"create\",\"tableType\":\"super\",\"tableName\":\"jt\",\"columns\":[{\"name\":\"ts\","
@@ -794,7 +795,7 @@ void initLogFile() {
"\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":"
"false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{"
"\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
- "\"lz4\",\"level\":\"medium\"}],"
+ "\"zstd\",\"level\":\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
"1}]}",
@@ -806,7 +807,7 @@ void initLogFile() {
"\"name\":\"c2\",\"type\":6,"
"\"isPrimarykey\":false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":"
"\"c3\","
- "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":"
+ "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":"
"\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
@@ -863,7 +864,7 @@ void initLogFile() {
"\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":false,\"encode\":\"delta-d\","
"\"compress\":\"lz4\",\"level\":\"medium\"},{"
"\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
- "\"lz4\",\"level\":\"medium\"}],"
+ "\"zstd\",\"level\":\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
"1}]}",
@@ -883,7 +884,8 @@ void initLogFile() {
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\","
"\"colType\":5}",
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}",
- "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\","
+ "\"colType\":5}",
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\","
"\"colType\":8,\"colLength\":64}",
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\","
@@ -896,7 +898,7 @@ void initLogFile() {
"9,\"isPrimarykey\":false,\"encode\":\"delta-i\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":"
"\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":"
"\"medium\"},{\"name\":\"c2\",\"type\":10,\"length\":4,"
- "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":\"medium\"}],\"tags\":[]}",
+ "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":\"medium\"}],\"tags\":[]}",
"{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":5,\"colName\":\"c3\","
"\"colType\":5}",
"{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":7,\"colName\":\"c2\","
@@ -921,7 +923,7 @@ void initLogFile() {
"{\"name\":\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":"
"\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":"
"false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c3\",\"type\":8,"
- "\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":\"medium\"}],"
+ "\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":1}]}",
"{\"type\":\"drop\",\"tableType\":\"super\",\"tableName\":\"st1\"}",
@@ -931,7 +933,7 @@ void initLogFile() {
"\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":"
"false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{"
"\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
- "\"lz4\",\"level\":\"medium\"}],"
+ "\"zstd\",\"level\":\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
"1}]}",
@@ -941,7 +943,7 @@ void initLogFile() {
"\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":"
"\"medium\"},{\"name\":\"c2\",\"type\":6,"
"\"isPrimarykey\":false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c3\","
- "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":"
+ "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":"
"\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
@@ -985,7 +987,8 @@ void initLogFile() {
"\"lz4\",\"level\":\"medium\"},{"
"\"name\":\"groupid\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\","
"\"level\":\"medium\"},{\"name\":"
- "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\","
+ "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
+ "\"zstd\","
"\"level\":\"medium\"}],\"tags\":[{\"name\":\"group_id\","
"\"type\":"
"14}]}",
@@ -1012,7 +1015,7 @@ void initLogFile() {
"\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":"
"false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{"
"\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
- "\"lz4\",\"level\":\"medium\"}],"
+ "\"zstd\",\"level\":\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
"1}]}",
@@ -1032,7 +1035,8 @@ void initLogFile() {
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\","
"\"colType\":5}",
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}",
- "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\","
+ "\"colType\":5}",
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\","
"\"colType\":8,\"colLength\":64}",
"{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\","
@@ -1044,7 +1048,7 @@ void initLogFile() {
"9,\"isPrimarykey\":false,\"encode\":\"delta-i\",\"compress\":\"lz4\",\"level\":\"medium\"}"
",{\"name\":\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\","
"\"level\":\"medium\"},{\"name\":\"c2\",\"type\":10,\"length\":4,"
- "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":\"medium\"}],\"tags\":[]}",
+ "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":\"medium\"}],\"tags\":[]}",
"{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":5,\"colName\":\"c3\","
"\"colType\":5}",
"{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":7,\"colName\":\"c2\","
@@ -1069,7 +1073,7 @@ void initLogFile() {
"\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":"
"false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{"
"\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":"
- "\"lz4\",\"level\":\"medium\"}],"
+ "\"zstd\",\"level\":\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"
"1}]}",
@@ -1079,7 +1083,7 @@ void initLogFile() {
"\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":"
"\"medium\"},{\"name\":\"c2\",\"type\":6,"
"\"isPrimarykey\":false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c3\","
- "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":"
+ "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":"
"\"medium\"}],"
"\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\","
"\"type\":"