Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/TS-3495

This commit is contained in:
wangmm0220 2023-06-25 14:54:16 +08:00
commit 7a49d67078
156 changed files with 12022 additions and 1219 deletions

1
.gitignore vendored
View File

@ -16,7 +16,6 @@ debug/
release/
target/
debs/
deps/
rpms/
mac/
*.pyc

View File

@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools目前它包含 taosBench
### Ubuntu 18.04 及以上版本 & Debian
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
```
#### 为 taos-tools 安装编译需要的软件
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
#### 在 CentOS 上构建 taosTools 安装依赖软件
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig geos
brew install argp-standalone pkgconfig
```
### 设置 golang 开发环境

View File

@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
### Ubuntu 18.04 and above or Debian
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
```
#### Install build dependencies for taosTools
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
#### Install build dependencies for taosTools on CentOS
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig geos
brew install argp-standalone pkgconfig
```
### Setup golang environment

View File

@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory
@ -115,18 +115,6 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
ENDIF ()
IF (${BUILD_SANITIZER})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSEIF (${BUILD_RELEASE})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
# disable all assert
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
ADD_DEFINITIONS(-DDISABLE_ASSERT)
@ -168,4 +156,20 @@ ELSE ()
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
ENDIF()
# build mode
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
IF (${BUILD_SANITIZER})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSEIF (${BUILD_RELEASE})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
ENDIF ()

View File

@ -172,5 +172,15 @@ ENDIF()
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
set(TD_DEPS_DIR "x86")
if (TD_LINUX)
IF (TD_ARM_64 OR TD_ARM_32)
set(TD_DEPS_DIR "arm")
ELSE()
set(TD_DEPS_DIR "x86")
ENDIF()
endif()
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.5.0")
SET(TD_VER_NUMBER "3.0.6.0.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -1,5 +1,6 @@
# rocksdb
IF (NOT ${TD_LINUX})
ExternalProject_Add(rocksdb
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
GIT_TAG v8.1.1
@ -9,3 +10,5 @@ ExternalProject_Add(rocksdb
INSTALL_COMMAND ""
TEST_COMMAND ""
)
ENDIF(NOT ${TD_LINUX})

View File

@ -78,10 +78,18 @@ if(${BUILD_WITH_LEVELDB})
endif(${BUILD_WITH_LEVELDB})
# rocksdb
IF (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
ELSE()
if(${BUILD_WITH_ROCKSDB})
#cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
ENDIF(NOT ${TD_LINUX})
# canonical-raft
if(${BUILD_WITH_CRAFT})
@ -227,9 +235,14 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
IF (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
@ -253,7 +266,7 @@ if(${BUILD_WITH_ROCKSDB})
endif(${TD_DARWIN})
if(${TD_WINDOWS})
option(WITH_JNI "" OFF)
option(WITH_JNI "" OFF)
endif(${TD_WINDOWS})
if(${TD_WINDOWS})
@ -265,7 +278,7 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
@ -285,6 +298,7 @@ if(${BUILD_WITH_ROCKSDB})
)
endif(${BUILD_WITH_ROCKSDB})
ENDIF(NOT ${TD_LINUX})
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE})
@ -485,8 +499,16 @@ endif(${BUILD_ADDR2LINE})
# geos
if(${BUILD_GEOS})
if(${TD_LINUX})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
endif(${TD_LINUX})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL)
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
target_include_directories(
geos_c
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>

BIN
deps/arm/rocksdb_static/librocksdb.a vendored Normal file

Binary file not shown.

2844
deps/arm/rocksdb_static/rocksdb/c.h vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
deps/x86/rocksdb_static/librocksdb.a vendored Normal file

Binary file not shown.

2844
deps/x86/rocksdb_static/rocksdb/c.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -667,6 +667,137 @@ Insert with req_id argument
</TabItem>
</Tabs>
### Parameter Binding
The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
<Tabs>
<TabItem value="native" label="native connection">
#### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
#### parameter binding
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
```
stmt.bind_param_batch(params)
```
#### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
#### Close Stmt
```
stmt.close()
```
#### Example
```python
{{#include docs/examples/python/stmt_example.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
#### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
#### Prepare sql
Call `prepare` method in stmt to prepare sql.
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
#### parameter binding
Call the `bind_param` method to bind parameters.
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
Call the `add_batch` method to add parameters to the batch.
```
stmt.add_batch()
```
#### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
#### Close Stmt
```
stmt.close()
```
#### Example
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |

View File

@ -79,8 +79,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump table schemas.
-y, --answer-yes Input yes for prompt. It will skip data file
checking!
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
and lzma.
-S, --start-time=START_TIME Start time to dump. Either epoch or

30
docs/en/14-reference/12-config/index.md Normal file → Executable file
View File

@ -365,6 +365,16 @@ The charset that takes effect is UTF-8.
| Unit | GB |
| Default Value | 2.0 |
### metaCacheMaxSize
| Attribute | Description |
| ------------- | ------------------------------------------------------------------------------------------------- |
| Applicable | Client Only |
| Meaning | Maximum meta cache size in single client process |
| Unit | MB |
| Default Value | -1 (No limitation) |
## Cluster Parameters
### supportVnodes
@ -433,6 +443,26 @@ The charset that takes effect is UTF-8.
| Default Value | 0 |
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
### slowLogThreshold
| Attribute | Description |
| ------------- | -------------------------------------------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | When an operation execution time exceeds this threshold, the operation will be logged in slow log file |
| Unit | second |
| Default Value | 3 |
| Note | All slow operations will be logged in file "taosSlowLog" in the log directory |
### slowLogScope
| Attribute | Description |
| --------------- | ----------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | Slow log type to be logged |
| Optional Values | ALL, QUERY, INSERT, OTHERS, NONE |
| Default Value | ALL |
| Note | All slow operations will be logged by default, one option could be set |
### debugFlag
| Attribute | Description |

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />

View File

@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.5.2
<Release type="tools" version="2.5.2" />
## 2.5.1
<Release type="tools" version="2.5.1" />

View File

@ -0,0 +1,82 @@
#!
import taosws
import taos
db_name = 'test_ws_stmt'
def before():
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def stmt_insert():
before()
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break
def stmt_insert_into_stable():
before()
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break

View File

@ -0,0 +1,78 @@
#!
import time
import taosws
import taos
def before_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def after_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.close()
def stmt_insert():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
def stmt_insert_into_stable():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
if __name__ == '__main__':
stmt_insert()
stmt_insert_into_stable()

View File

@ -299,7 +299,7 @@ SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
对表 `d10` 按 10 秒进行平均值、最大值和最小值聚合统计:
对表 `d10` 10 秒进行平均值、最大值和最小值聚合统计:
```sql
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);

View File

@ -672,6 +672,141 @@ consumer.close()
</TabItem>
</Tabs>
### 通过参数绑定写入数据
TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。
<Tabs>
<TabItem value="native" label="原生连接">
#### 创建 stmt
Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt该方法接收 sql 字符串作为参数sql 字符串目前仅支持用 `?` 来代表绑定的参数。
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
#### 参数绑定
调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。
```
stmt.bind_param_batch(params)
```
#### 执行 sql
调用 stmt 的 `execute` 方法执行 sql
```
stmt.execute()
```
#### 关闭 stmt
最后需要关闭 stmt。
```
stmt.close()
```
#### 示例代码
```python
{{#include docs/examples/python/stmt_example.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
#### 创建 stmt
Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt该方法接收 sql 字符串作为参数sql 字符串目前仅支持用 `?` 来代表绑定的参数。
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
#### 解析 sql
调用 stmt 的 `prepare` 方法来解析 insert 语句。
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
#### 参数绑定
调用 stmt 的 `bind_param` 方法绑定参数。
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
调用 stmt 的 `add_batch` 方法,将参数加入批处理。
```
stmt.add_batch()
```
#### 执行 sql
调用 stmt 的 `execute` 方法执行 sql
```
stmt.execute()
```
#### 关闭 stmt
最后需要关闭 stmt。
```
stmt.close()
```
#### 示例代码
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
```
</TabItem>
</Tabs>
### 其它示例程序
| 示例程序链接 | 示例程序内容 |

View File

@ -82,8 +82,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump tables' schema.
-y, --answer-yes Input yes for prompt. It will skip data file
checking!
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
and lzma.
-S, --start-time=START_TIME Start time to dump. Either epoch or

29
docs/zh/14-reference/12-config/index.md Normal file → Executable file
View File

@ -384,6 +384,15 @@ charset 的有效值是 UTF-8。
| 单位 | GB |
| 缺省值 | 2.0 |
### metaCacheMaxSize
| 属性 | 说明 |
| -------- | ---------------------------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | 指定单个客户端元数据缓存大小的最大值 |
| 单位 | MB |
| 缺省值 | -1 (无限制) |
## 集群相关
### supportVnodes
@ -452,6 +461,26 @@ charset 的有效值是 UTF-8。
| 缺省值 | 0 |
| 补充说明 | 大于 0 时,日志文件会被重命名为 taosdlog.xxx其中 xxx 为日志文件最后修改的时间戳。 |
### slowLogThreshold
| 属性 | 说明 |
| -------- | ------------------------------------------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | 指定慢查询门限值,大于等于门限值认为是慢查询 |
| 单位 | 秒 |
| 缺省值 | 3 |
| 补充说明 | 每个客户端中所有慢查询会被记录在日志目录下的taosSlowLog文件中 |
### slowLogScope
| 属性 | 说明 |
| -------- | --------------------------------------------------------------|
| 适用范围 | 仅客户端适用 |
| 含义 | 指定启动记录哪些类型的慢查询 |
| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE |
| 缺省值 | ALL |
| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 |
### debugFlag
| 属性 | 说明 |

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />

View File

@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.5.2
<Release type="tools" version="2.5.2" />
## 2.5.1
<Release type="tools" version="2.5.1" />

View File

@ -51,27 +51,27 @@ public class JdbcDemo {
private void createDatabase() {
String sql = "create database if not exists " + dbName;
exuete(sql);
execute(sql);
}
private void useDatabase() {
String sql = "use " + dbName;
exuete(sql);
execute(sql);
}
private void dropTable() {
final String sql = "drop table if exists " + dbName + "." + tbName + "";
exuete(sql);
execute(sql);
}
private void createTable() {
final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)";
exuete(sql);
execute(sql);
}
private void insert() {
final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)";
exuete(sql);
execute(sql);
}
private void select() {
@ -120,7 +120,7 @@ public class JdbcDemo {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
private void exuete(String sql) {
private void execute(String sql) {
long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
boolean execute = statement.execute(sql);

View File

@ -22,7 +22,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>30.1.1-jre</version>
<version>32.0.0-jre</version>
</dependency>
</dependencies>

View File

@ -198,6 +198,7 @@ typedef struct SDataBlockInfo {
SBlockID id;
int16_t hasVarCol;
int16_t dataLoad; // denote if the data is loaded or not
uint8_t scanFlag;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization

View File

@ -82,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
// mnode
extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention;
extern int8_t tsGrant;
extern bool tsMndSkipGrant;
// monitor
@ -119,6 +120,7 @@ extern bool tsQueryUseNodeAllocator;
extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
extern bool tsEnableScience;
extern bool tsTtlChangeOnWrite;
extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
@ -198,6 +200,7 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
int8_t taosGranted();
#ifdef __cplusplus
}

View File

@ -945,7 +945,7 @@ int32_t tSerializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
typedef struct {
int32_t timestamp;
int32_t timestampSec;
} SVDropTtlTableReq;
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
@ -2278,7 +2278,7 @@ typedef struct SVCreateTbReq {
int32_t flags;
char* name;
tb_uid_t uid;
int64_t ctime;
int64_t btime;
int32_t ttl;
int32_t commentLen;
char* comment;
@ -2415,10 +2415,12 @@ typedef struct {
int32_t newTTL;
int32_t newCommentLen;
char* newComment;
int64_t ctimeMs; // fill by vnode
} SVAlterTbReq;
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq);
int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs);
typedef struct {
int32_t code;
@ -3436,6 +3438,7 @@ typedef struct SDeleteRes {
int64_t affectedRows;
char tableFName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
int64_t ctimeMs; // fill by vnode
} SDeleteRes;
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
@ -3454,10 +3457,12 @@ int32_t tDecodeSSingleDeleteReq(SDecoder* pCoder, SSingleDeleteReq* pReq);
typedef struct {
int64_t suid;
SArray* deleteReqs; // SArray<SSingleDeleteReq>
int64_t ctimeMs; // fill by vnode
} SBatchDeleteReq;
int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq);
int32_t tDecodeSBatchDeleteReq(SDecoder* pCoder, SBatchDeleteReq* pReq);
int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder* pDecoder, SBatchDeleteReq* pReq, int64_t ctimeMs);
typedef struct {
int32_t msgIdx;
@ -3525,6 +3530,7 @@ typedef struct {
SArray* aRowP;
SArray* aCol;
};
int64_t ctimeMs;
} SSubmitTbData;
typedef struct {

View File

@ -310,6 +310,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_ADD_CHECKINFO, "vnode-tmq-add-checkinfo", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_DEL_CHECKINFO, "vnode-del-checkinfo", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_CONSUME, "vnode-tmq-consume", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_CONSUME_PUSH, "vnode-tmq-consume-push", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL)

View File

@ -54,7 +54,7 @@ typedef struct SMetaEntry {
SRSmaParam rsmaParam;
} stbEntry;
struct {
int64_t ctime;
int64_t btime;
int32_t ttlDays;
int32_t commentLen;
char* comment;
@ -62,7 +62,7 @@ typedef struct SMetaEntry {
uint8_t* pTags;
} ctbEntry;
struct {
int64_t ctime;
int64_t btime;
int32_t ttlDays;
int32_t commentLen;
char* comment;

View File

@ -53,6 +53,8 @@ typedef struct SLogicNode {
EDataOrderLevel requireDataOrder; // requirements for input data
EDataOrderLevel resultDataOrder; // properties of the output data
EGroupAction groupAction;
EOrder inputTsOrder;
EOrder outputTsOrder;
} SLogicNode;
typedef enum EScanType {
@ -111,7 +113,6 @@ typedef struct SJoinLogicNode {
SNode* pMergeCondition;
SNode* pOnConditions;
bool isSingleTableJoin;
EOrder inputTsOrder;
SNode* pColEqualOnConditions;
} SJoinLogicNode;
@ -229,8 +230,6 @@ typedef struct SWindowLogicNode {
int8_t igExpired;
int8_t igCheckUpdate;
EWindowAlgorithm windowAlgo;
EOrder inputTsOrder;
EOrder outputTsOrder;
} SWindowLogicNode;
typedef struct SFillLogicNode {
@ -241,7 +240,6 @@ typedef struct SFillLogicNode {
SNode* pWStartTs;
SNode* pValues; // SNodeListNode
STimeWindow timeRange;
EOrder inputTsOrder;
} SFillLogicNode;
typedef struct SSortLogicNode {
@ -310,6 +308,8 @@ typedef struct SDataBlockDescNode {
typedef struct SPhysiNode {
ENodeType type;
EOrder inputTsOrder;
EOrder outputTsOrder;
SDataBlockDescNode* pOutputDataBlockDesc;
SNode* pConditions;
SNodeList* pChildren;
@ -406,7 +406,6 @@ typedef struct SSortMergeJoinPhysiNode {
SNode* pMergeCondition;
SNode* pOnConditions;
SNodeList* pTargets;
EOrder inputTsOrder;
SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode;
@ -460,8 +459,6 @@ typedef struct SWindowPhysiNode {
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
EOrder inputTsOrder;
EOrder outputTsOrder;
bool mergeDataBlock;
} SWindowPhysiNode;
@ -488,7 +485,6 @@ typedef struct SFillPhysiNode {
SNode* pWStartTs; // SColumnNode
SNode* pValues; // SNodeListNode
STimeWindow timeRange;
EOrder inputTsOrder;
} SFillPhysiNode;
typedef SFillPhysiNode SStreamFillPhysiNode;

View File

@ -69,6 +69,7 @@ typedef struct SColumnNode {
uint64_t tableId;
int8_t tableType;
col_id_t colId;
uint16_t projIdx; // the idx in project list, start from 1
EColumnType colType; // column or tag
bool hasIndex;
char dbName[TSDB_DB_NAME_LEN];

View File

@ -281,7 +281,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
(_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID))
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || (_code) == TSDB_CODE_MND_INVALID_SCHEMA_VER)
#define NEED_CLIENT_HANDLE_ERROR(_code) \
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))

View File

@ -42,8 +42,8 @@ else:
# os.system("rm -rf /var/lib/taos/*")
# os.system("systemctl restart taosd ")
# wait a moment ,at least 5 seconds
time.sleep(5)
# wait a moment ,at least 10 seconds
time.sleep(10)
# prepare data by taosBenchmark

View File

@ -124,12 +124,12 @@ if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
# if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
# cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
# fi
# if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
# cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
# fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi

View File

@ -123,12 +123,12 @@ if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib
ln -sf libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib
fi
# if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
# cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib
# fi
# if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
# cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib
# fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{homepath}/jemalloc/lib/pkgconfig
fi

View File

@ -315,13 +315,13 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
# if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
# fi
# if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
# fi
if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi

View File

@ -214,13 +214,13 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
# if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
# fi
# if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
# fi
if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi

View File

@ -241,10 +241,10 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so > /dev/null 2>&1
${csudo}/usr/bin/install -c -d /usr/local/lib
[ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
[ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
# [ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
# ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
# [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
# ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc \

View File

@ -118,12 +118,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
# if [ -f ${build_dir}/lib/libjemalloc.a ]; then
# cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
# fi
# if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
# cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
# fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi

View File

@ -217,12 +217,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
# if [ -f ${build_dir}/lib/libjemalloc.a ]; then
# cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
# fi
# if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
# cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
# fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi

View File

@ -169,13 +169,13 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
# if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
# fi
# if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
# fi
if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi

View File

@ -1120,6 +1120,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
refreshMeta(pRequest->pTscObj, pRequest);
pRequest->prevCode = code;
doAsyncQuery(pRequest, true);
return;

View File

@ -77,6 +77,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;

View File

@ -1553,17 +1553,8 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
}
}
char cTmp = 0; // for print tmp if is raw
if (info->isRawLine) {
cTmp = tmp[len];
tmp[len] = '\0';
}
uDebug("SML:0x%" PRIx64 " smlParseLine israw:%d, numLines:%d, protocol:%d, len:%d, sql:%s", info->id,
info->isRawLine, numLines, info->protocol, len, tmp);
if (info->isRawLine) {
tmp[len] = cTmp;
}
info->isRawLine, numLines, info->protocol, len, info->isRawLine ? "rawdata" : tmp);
if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
if (info->dataFormat) {
@ -1584,8 +1575,7 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE;
}
if (code != TSDB_CODE_SUCCESS) {
tmp[len] = '\0';
uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp);
uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, info->isRawLine ? "rawdata" : tmp);
return code;
}
if (info->reRun) {

View File

@ -280,7 +280,7 @@ static const SSysDbTableSchema topicSchema[] = {
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "schema", .bytes = TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "schema", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "meta", .bytes = 4 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "type", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
};

View File

@ -791,8 +791,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
* @return
*/
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
// | version | total length | total rows | total columns | flag seg| block group id | column schema
// | each column length |
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
}
@ -1590,18 +1590,35 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int
int32_t nRows = payloadSize / rowSize;
ASSERT(nRows >= 1);
// the true value must be less than the value of nRows
int32_t additional = 0;
int32_t numVarCols = 0;
int32_t numFixCols = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
additional += nRows * sizeof(int32_t);
++numVarCols;
} else {
additional += BitmapLen(nRows);
++numFixCols;
}
}
int32_t newRows = (payloadSize - additional) / rowSize;
// find the data payload whose size is greater than payloadSize
int result = -1;
int start = 1;
int end = nRows;
while (start <= end) {
int mid = start + (end - start) / 2;
//data size + var data type columns offset + fixed data type columns bitmap len
int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
if (midSize > payloadSize) {
result = mid;
end = mid - 1;
} else {
start = mid + 1;
}
}
int32_t newRows = (result != -1) ? result - 1 : nRows;
// the true value must be less than the value of nRows
ASSERT(newRows <= nRows && newRows >= 1);
return newRows;

View File

@ -14,6 +14,7 @@
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tglobal.h"
#include "tconfig.h"
#include "tgrant.h"
@ -73,6 +74,7 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000;
// mnode
int64_t tsMndSdbWriteDelta = 200;
int64_t tsMndLogRetention = 2000;
int8_t tsGrant = 1;
bool tsMndSkipGrant = false;
// monitor
@ -108,6 +110,7 @@ int32_t tsQueryRspPolicy = 0;
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
bool tsEnableQueryHb = false;
bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true
int32_t tsQuerySmaOptimize = 0;
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
bool tsQueryPlannerTrace = false;
@ -511,6 +514,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "ttlChangeOnWrite", tsTtlChangeOnWrite, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, 0) != 0) return -1;
@ -871,6 +875,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval;
tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32;
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
@ -976,6 +981,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
taosSetCoreDump(enableCore);
} else if (strcasecmp("enableQueryHb", name) == 0) {
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
} else if (strcasecmp("ttlChangeOnWrite", name) == 0) {
tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval;
}
break;
}
@ -1525,3 +1532,5 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
int8_t taosGranted() { return atomic_load_8(&tsGrant); }

View File

@ -30,6 +30,9 @@
#include "tlog.h"
static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq);
static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq);
int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) {
if (pMsg == NULL) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
@ -3161,7 +3164,7 @@ int32_t tSerializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableReq
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->timestamp) < 0) return -1;
if (tEncodeI32(&encoder, pReq->timestampSec) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -3174,7 +3177,7 @@ int32_t tDeserializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableR
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->timestamp) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->timestampSec) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -6409,7 +6412,7 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
if (tEncodeI64(pCoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1;
if (tEncodeI64(pCoder, pReq->btime) < 0) return -1;
if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1;
if (tEncodeI8(pCoder, pReq->type) < 0) return -1;
if (tEncodeI32(pCoder, pReq->commentLen) < 0) return -1;
@ -6444,7 +6447,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->btime) < 0) return -1;
if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->type) < 0) return -1;
if (tDecodeI32(pCoder, &pReq->commentLen) < 0) return -1;
@ -6909,14 +6912,13 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
default:
break;
}
if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1;
tEndEncode(pEncoder);
return 0;
}
int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1;
if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->colId) < 0) return -1;
@ -6960,6 +6962,28 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
default:
break;
}
return 0;
}
int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1;
pReq->ctimeMs = 0;
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
}
int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1;
*(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs;
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
tEndDecode(pDecoder);
return 0;
@ -7238,6 +7262,7 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1;
if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1;
if (tEncodeI64(pCoder, pRes->ctimeMs) < 0) return -1;
return 0;
}
@ -7257,6 +7282,11 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeCStrTo(pCoder, pRes->tableFName) < 0) return -1;
if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
pRes->ctimeMs = 0;
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI64(pCoder, &pRes->ctimeMs) < 0) return -1;
}
return 0;
}
@ -7480,10 +7510,11 @@ int32_t tEncodeSBatchDeleteReq(SEncoder *pEncoder, const SBatchDeleteReq *pReq)
SSingleDeleteReq *pOneReq = taosArrayGet(pReq->deleteReqs, i);
if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1;
}
if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1;
return 0;
}
int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
if (tDecodeI64(pDecoder, &pReq->suid) < 0) return -1;
int32_t sz;
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
@ -7497,6 +7528,24 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
return 0;
}
int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1;
pReq->ctimeMs = 0;
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
return 0;
}
int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) {
if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1;
*(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs;
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
return 0;
}
static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubmitTbData) {
if (tStartEncode(pCoder) < 0) return -1;
@ -7531,6 +7580,7 @@ static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubm
pCoder->pos += rows[iRow]->len;
}
}
if (tEncodeI64(pCoder, pSubmitTbData->ctimeMs) < 0) return -1;
tEndEncode(pCoder);
return 0;
@ -7611,6 +7661,14 @@ static int32_t tDecodeSSubmitTbData(SDecoder *pCoder, SSubmitTbData *pSubmitTbDa
}
}
pSubmitTbData->ctimeMs = 0;
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI64(pCoder, &pSubmitTbData->ctimeMs) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
}
tEndDecode(pCoder);
_exit:

View File

@ -46,6 +46,7 @@ typedef struct {
int32_t vgId;
int32_t vgVersion;
int8_t dropped;
int32_t toVgId;
char path[PATH_MAX + 20];
} SWrapperCfg;
@ -55,6 +56,7 @@ typedef struct {
int32_t refCount;
int8_t dropped;
int8_t disable;
int32_t toVgId;
char *path;
SVnode *pImpl;
SMultiWorker pWriteW;
@ -70,6 +72,7 @@ typedef struct {
int32_t vnodeNum;
int32_t opened;
int32_t failed;
bool updateVnodesList;
int32_t threadIndex;
TdThread thread;
SVnodeMgmt *pMgmt;

View File

@ -71,6 +71,8 @@ static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg **
if (code < 0) goto _OVER;
tjsonGetInt32ValueFromDouble(vnode, "vgVersion", pCfg->vgVersion, code);
if (code < 0) goto _OVER;
tjsonGetInt32ValueFromDouble(vnode, "toVgId", pCfg->toVgId, code);
if (code < 0) goto _OVER;
snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId);
}
@ -165,6 +167,7 @@ static int32_t vmEncodeVnodeList(SJson *pJson, SVnodeObj **ppVnodes, int32_t num
if (tjsonAddDoubleToObject(vnode, "vgId", pVnode->vgId) < 0) return -1;
if (tjsonAddDoubleToObject(vnode, "dropped", pVnode->dropped) < 0) return -1;
if (tjsonAddDoubleToObject(vnode, "vgVersion", pVnode->vgVersion) < 0) return -1;
if (pVnode->toVgId && tjsonAddDoubleToObject(vnode, "toVgId", pVnode->toVgId) < 0) return -1;
if (tjsonAddItemToArray(vnodes, vnode) < 0) return -1;
}
@ -179,7 +182,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
SVnodeObj **ppVnodes = NULL;
char file[PATH_MAX] = {0};
char realfile[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP);
snprintf(realfile, sizeof(realfile), "%s%svnodes.json", pMgmt->path, TD_DIRSEP);
int32_t numOfVnodes = 0;

View File

@ -265,6 +265,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
#if 0
if (pMgmt->pTfs) {
if (tfsDirExistAt(pMgmt->pTfs, path, (SDiskID){0})) {
terrno = TSDB_CODE_VND_DIR_ALREADY_EXIST;
@ -278,8 +279,9 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
}
#endif
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
tFreeSCreateVnodeReq(&req);
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
code = terrno;
@ -482,10 +484,18 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
int32_t srcVgId = req.srcVgId;
int32_t dstVgId = req.dstVgId;
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, dstVgId);
if (pVnode != NULL) {
dError("vgId:%d, vnode already exist", dstVgId);
vmReleaseVnode(pMgmt, pVnode);
terrno = TSDB_CODE_VND_ALREADY_EXIST;
return -1;
}
dInfo("vgId:%d, start to alter vnode hashrange:[%u, %u], dstVgId:%d", req.srcVgId, req.hashBegin, req.hashEnd,
req.dstVgId);
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, srcVgId);
pVnode = vmAcquireVnode(pMgmt, srcVgId);
if (pVnode == NULL) {
dError("vgId:%d, failed to alter hashrange since %s", srcVgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
@ -499,6 +509,13 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
};
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
// prepare alter
pVnode->toVgId = dstVgId;
if (vmWriteVnodeListToFile(pMgmt) != 0) {
dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr());
return -1;
}
dInfo("vgId:%d, close vnode", srcVgId);
vmCloseVnode(pMgmt, pVnode, true);
@ -530,6 +547,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
// complete alter
if (vmWriteVnodeListToFile(pMgmt) != 0) {
dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr());
return -1;
@ -712,6 +730,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_ADD_CHECKINFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_DEL_CHECKINFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_CONSUME, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_CONSUME_PUSH, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_VG_WALINFO, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -158,6 +158,28 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal)
taosMemoryFree(pVnode);
}
static int32_t vmRestoreVgroupId(SWrapperCfg *pCfg, STfs *pTfs) {
int32_t srcVgId = pCfg->vgId;
int32_t dstVgId = pCfg->toVgId;
if (dstVgId == 0) return 0;
char srcPath[TSDB_FILENAME_LEN];
char dstPath[TSDB_FILENAME_LEN];
snprintf(srcPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, srcVgId);
snprintf(dstPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, dstVgId);
int32_t vgId = vnodeRestoreVgroupId(srcPath, dstPath, srcVgId, dstVgId, pTfs);
if (vgId <= 0) {
dError("vgId:%d, failed to restore vgroup id. srcPath: %s", pCfg->vgId, srcPath);
return -1;
}
pCfg->vgId = vgId;
pCfg->toVgId = 0;
return 0;
}
static void *vmOpenVnodeInThread(void *param) {
SVnodeThread *pThread = param;
SVnodeMgmt *pMgmt = pThread->pMgmt;
@ -174,17 +196,33 @@ static void *vmOpenVnodeInThread(void *param) {
pMgmt->state.openVnodes, pMgmt->state.totalVnodes);
tmsgReportStartup("vnode-open", stepDesc);
if (pCfg->toVgId) {
if (vmRestoreVgroupId(pCfg, pMgmt->pTfs) != 0) {
dError("vgId:%d, failed to restore vgroup id by thread:%d", pCfg->vgId, pThread->threadIndex);
pThread->failed++;
continue;
}
pThread->updateVnodesList = true;
}
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId);
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode by thread:%d", pCfg->vgId, pThread->threadIndex);
pThread->failed++;
} else {
vmOpenVnode(pMgmt, pCfg, pImpl);
dInfo("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex);
pThread->opened++;
atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
continue;
}
if (vmOpenVnode(pMgmt, pCfg, pImpl) != 0) {
dError("vgId:%d, failed to open vnode by thread:%d", pCfg->vgId, pThread->threadIndex);
pThread->failed++;
continue;
}
dInfo("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex);
pThread->opened++;
atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
}
dInfo("thread:%d, numOfVnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened,
@ -242,6 +280,8 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
taosThreadAttrDestroy(&thAttr);
}
bool updateVnodesList = false;
for (int32_t t = 0; t < threadNum; ++t) {
SVnodeThread *pThread = &threads[t];
if (pThread->vnodeNum > 0 && taosCheckPthreadValid(pThread->thread)) {
@ -249,6 +289,7 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
taosThreadClear(&pThread->thread);
}
taosMemoryFree(pThread->pCfgs);
if (pThread->updateVnodesList) updateVnodesList = true;
}
taosMemoryFree(threads);
taosMemoryFree(pCfgs);
@ -256,10 +297,15 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
if (pMgmt->state.openVnodes != pMgmt->state.totalVnodes) {
dError("there are total vnodes:%d, opened:%d", pMgmt->state.totalVnodes, pMgmt->state.openVnodes);
return -1;
} else {
dInfo("successfully opened %d vnodes", pMgmt->state.totalVnodes);
return 0;
}
if (updateVnodesList && vmWriteVnodeListToFile(pMgmt) != 0) {
dError("failed to write vnode list since %s", terrstr());
return -1;
}
dInfo("successfully opened %d vnodes", pMgmt->state.totalVnodes);
return 0;
}
static void *vmCloseVnodeInThread(void *param) {

View File

@ -218,6 +218,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
dDebug("vgId:%d, msg:%p put into vnode-write queue failed since its disable", pVnode->vgId, pMsg);
terrno = TSDB_CODE_VND_STOPPED;
code = terrno;
break;
}
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);

View File

@ -23,10 +23,6 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
SEpSet epSet = {0};
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
if (epSet.numOfEps == 1) {
return;
}
const int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->pCont = rpcMallocCont(contLen);
if (pMsg->pCont == NULL) {

View File

@ -105,6 +105,7 @@ typedef struct {
SHashObj *dnodeHash;
TdThreadRwlock lock;
SMsgCb msgCb;
bool validMnodeEps;
} SDnodeData;
typedef struct {

View File

@ -288,6 +288,8 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) {
taosHashPut(pData->dnodeHash, &pDnodeEp->id, sizeof(int32_t), pDnodeEp, sizeof(SDnodeEp));
}
pData->validMnodeEps = true;
dmPrintEps(pData);
}
@ -348,6 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) {
}
void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) {
if(!pData->validMnodeEps) return;
dmGetMnodeEpSet(pData, pEpSet);
dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse);
for (int32_t i = 0; i < pEpSet->numOfEps; ++i) {

View File

@ -227,6 +227,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
}
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) {
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version);
terrno = code;
goto _OVER;
}

View File

@ -888,7 +888,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
SVDropTtlTableReq ttlReq = {.timestamp = taosGetTimestampSec()};
SVDropTtlTableReq ttlReq = {.timestampSec = taosGetTimestampSec()};
int32_t reqLen = tSerializeSVDropTtlTableReq(NULL, 0, &ttlReq);
int32_t contLen = reqLen + sizeof(SMsgHead);
@ -914,7 +914,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
if (code != 0) {
mError("vgId:%d, failed to send drop ttl table request to vnode since 0x%x", pVgroup->vgId, code);
} else {
mInfo("vgId:%d, send drop ttl table request to vnode, time:%d", pVgroup->vgId, ttlReq.timestamp);
mInfo("vgId:%d, send drop ttl table request to vnode, time:%" PRId32, pVgroup->vgId, ttlReq.timestampSec);
}
sdbRelease(pSdb, pVgroup);
}
@ -3159,8 +3159,14 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
SSdb *pSdb = pMnode->pSdb;
SStbObj *pStb = NULL;
int32_t numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db);
int32_t numOfRows = 0;
if (!pShow->sysDbRsp) {
numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db);
pShow->sysDbRsp = true;
}
SDbObj *pDb = NULL;
if (strlen(pShow->db) > 0) {
pDb = mndAcquireDb(pMnode, pShow->db);

View File

@ -912,12 +912,14 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pTopic->createTime, false);
char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
char *sql = taosMemoryMalloc(strlen(pTopic->sql) + VARSTR_HEADER_SIZE);
STR_TO_VARSTR(sql, pTopic->sql);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
taosMemoryFree(sql);
char *schemaJson = taosMemoryMalloc(TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE);
if(pTopic->subType == TOPIC_SUB_TYPE__COLUMN){
schemaToJson(pTopic->schema.pSchema, pTopic->schema.nCols, schemaJson);

View File

@ -1217,6 +1217,7 @@ static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, i
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_VND_ALTER_HASHRANGE;
action.acceptableCode = TSDB_CODE_VND_ALREADY_EXIST;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);

View File

@ -27,6 +27,7 @@ target_sources(
"src/meta/metaEntry.c"
"src/meta/metaSnapshot.c"
"src/meta/metaCache.c"
"src/meta/metaTtl.c"
# sma
"src/sma/smaEnv.c"
@ -80,6 +81,7 @@ IF (TD_VNODE_PLUGINS)
)
ENDIF ()
IF (NOT ${TD_LINUX})
target_include_directories(
vnode
PUBLIC "inc"
@ -87,7 +89,25 @@ target_include_directories(
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
ELSE()
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
)
ENDIF (NOT ${TD_LINUX})
IF (TD_LINUX)
target_include_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
)
target_link_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
)
target_link_libraries(
vnode
PUBLIC os

View File

@ -54,6 +54,7 @@ void vnodeCleanup();
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs);
int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnodeHashRangeReq *pReq, STfs *pTfs);
int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs);
void vnodeDestroy(const char *path, STfs *pTfs);
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
void vnodePreClose(SVnode *pVnode);
@ -114,6 +115,7 @@ int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
int metaGetTableSzNameByUid(void *meta, uint64_t uid, char *tbName);
int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid);
int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType);
int metaGetTableTtlByUid(void *meta, uint64_t uid, int64_t *ttlDays);
bool metaIsTableExist(void* pVnode, tb_uid_t uid);
int32_t metaGetCachedTableUidList(void *pVnode, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList,
bool *acquired);

View File

@ -17,6 +17,7 @@
#define _TD_VNODE_META_H_
#include "index.h"
#include "metaTtl.h"
#include "vnodeInt.h"
#ifdef __cplusplus
@ -89,10 +90,10 @@ struct SMeta {
// ivt idx and idx
void* pTagIvtIdx;
TTB* pTagIdx;
TTB* pTtlIdx;
TTB* pTagIdx;
STtlManger* pTtlMgr;
TTB* pCtimeIdx; // table created time idx
TTB* pBtimeIdx; // table created time idx
TTB* pNcolIdx; // ncol of table idx, normal table only
TTB* pSmaIdx;
@ -138,20 +139,15 @@ typedef struct {
} STagIdxKey;
#pragma pack(pop)
typedef struct {
int64_t dtime;
tb_uid_t uid;
} STtlIdxKey;
typedef struct {
tb_uid_t uid;
int64_t smaUid;
} SSmaIdxKey;
typedef struct {
int64_t ctime;
int64_t btime;
tb_uid_t uid;
} SCtimeIdxKey;
} SBtimeIdxKey;
typedef struct {
int64_t ncol;

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_TTL_H_
#define _TD_VNODE_TTL_H_
#include "taosdef.h"
#include "thash.h"
#include "tdb.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum DirtyEntryType {
ENTRY_TYPE_DEL = 1,
ENTRY_TYPE_UPSERT = 2,
} DirtyEntryType;
typedef struct STtlManger {
TdThreadRwlock lock;
TTB* pOldTtlIdx; // btree<{deleteTime, tuid}, NULL>
SHashObj* pTtlCache; // key: tuid, value: {ttl, ctime}
SHashObj* pDirtyUids; // dirty tuid
TTB* pTtlIdx; // btree<{deleteTime, tuid}, ttl>
} STtlManger;
typedef struct {
int64_t ttlDays;
int64_t changeTimeMs;
} STtlCacheEntry;
typedef struct {
DirtyEntryType type;
} STtlDirtyEntry;
typedef struct {
int64_t deleteTimeSec;
tb_uid_t uid;
} STtlIdxKey;
typedef struct {
int64_t deleteTimeMs;
tb_uid_t uid;
} STtlIdxKeyV1;
typedef struct {
int64_t ttlDays;
} STtlIdxValue;
typedef struct {
tb_uid_t uid;
int64_t changeTimeMs;
} STtlUpdCtimeCtx;
typedef struct {
tb_uid_t uid;
int64_t changeTimeMs;
int64_t ttlDays;
} STtlUpdTtlCtx;
typedef struct {
tb_uid_t uid;
TXN* pTxn;
} STtlDelTtlCtx;
int ttlMgrOpen(STtlManger** ppTtlMgr, TDB* pEnv, int8_t rollback);
int ttlMgrClose(STtlManger* pTtlMgr);
int ttlMgrBegin(STtlManger* pTtlMgr, void* pMeta);
int ttlMgrConvert(TTB* pOldTtlIdx, TTB* pNewTtlIdx, void* pMeta);
int ttlMgrFlush(STtlManger* pTtlMgr, TXN* pTxn);
int ttlMgrInsertTtl(STtlManger* pTtlMgr, const STtlUpdTtlCtx* pUpdCtx);
int ttlMgrDeleteTtl(STtlManger* pTtlMgr, const STtlDelTtlCtx* pDelCtx);
int ttlMgrUpdateChangeTime(STtlManger* pTtlMgr, const STtlUpdCtimeCtx* pUpdCtimeCtx);
int ttlMgrFindExpired(STtlManger* pTtlMgr, int64_t timePointMs, SArray* pTbUids);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_TTL_H_*/

View File

@ -149,8 +149,9 @@ int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid);
int32_t metaTrimTables(SMeta* pMeta);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
int metaTtlDropTable(SMeta* pMeta, int64_t timePointMs, SArray* tbUids);
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
int metaUpdateChangeTime(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
int32_t metaGetTbTSchemaEx(SMeta* pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sver, STSchema** ppTSchema);
@ -176,7 +177,6 @@ SArray* metaGetSmaIdsByTable(SMeta* pMeta, tb_uid_t uid);
SArray* metaGetSmaTbUids(SMeta* pMeta);
void* metaGetIdx(SMeta* pMeta);
void* metaGetIvtIdx(SMeta* pMeta);
int metaTtlSmaller(SMeta* pMeta, uint64_t time, SArray* uidList);
void metaReaderInit(SMetaReader* pReader, SMeta* pMeta, int32_t flags);
@ -229,6 +229,7 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg);
// tq-stream

View File

@ -40,6 +40,12 @@ int metaBegin(SMeta *pMeta, int8_t heap) {
return -1;
}
if (ttlMgrBegin(pMeta->pTtlMgr, pMeta) < 0) {
return -1;
}
tdbCommit(pMeta->pEnv, pMeta->txn);
return 0;
}
@ -50,6 +56,7 @@ int metaFinishCommit(SMeta *pMeta, TXN *txn) { return tdbPostCommit(pMeta->pEnv
int metaPrepareAsyncCommit(SMeta *pMeta) {
// return tdbPrepareAsyncCommit(pMeta->pEnv, pMeta->txn);
int code = 0;
code = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn);
code = tdbCommit(pMeta->pEnv, pMeta->txn);
return code;

View File

@ -31,7 +31,7 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
if (tEncodeSRSmaParam(pCoder, &pME->stbEntry.rsmaParam) < 0) return -1;
}
} else if (pME->type == TSDB_CHILD_TABLE) {
if (tEncodeI64(pCoder, pME->ctbEntry.ctime) < 0) return -1;
if (tEncodeI64(pCoder, pME->ctbEntry.btime) < 0) return -1;
if (tEncodeI32(pCoder, pME->ctbEntry.ttlDays) < 0) return -1;
if (tEncodeI32v(pCoder, pME->ctbEntry.commentLen) < 0) return -1;
if (pME->ctbEntry.commentLen > 0) {
@ -40,7 +40,7 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
if (tEncodeI64(pCoder, pME->ctbEntry.suid) < 0) return -1;
if (tEncodeTag(pCoder, (const STag *)pME->ctbEntry.pTags) < 0) return -1;
} else if (pME->type == TSDB_NORMAL_TABLE) {
if (tEncodeI64(pCoder, pME->ntbEntry.ctime) < 0) return -1;
if (tEncodeI64(pCoder, pME->ntbEntry.btime) < 0) return -1;
if (tEncodeI32(pCoder, pME->ntbEntry.ttlDays) < 0) return -1;
if (tEncodeI32v(pCoder, pME->ntbEntry.commentLen) < 0) return -1;
if (pME->ntbEntry.commentLen > 0) {
@ -76,7 +76,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
if (tDecodeSRSmaParam(pCoder, &pME->stbEntry.rsmaParam) < 0) return -1;
}
} else if (pME->type == TSDB_CHILD_TABLE) {
if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1;
if (tDecodeI64(pCoder, &pME->ctbEntry.btime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ctbEntry.ttlDays) < 0) return -1;
if (tDecodeI32v(pCoder, &pME->ctbEntry.commentLen) < 0) return -1;
if (pME->ctbEntry.commentLen > 0) {
@ -85,7 +85,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
if (tDecodeI64(pCoder, &pME->ctbEntry.suid) < 0) return -1;
if (tDecodeTag(pCoder, (STag **)&pME->ctbEntry.pTags) < 0) return -1; // (TODO)
} else if (pME->type == TSDB_NORMAL_TABLE) {
if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1;
if (tDecodeI64(pCoder, &pME->ntbEntry.btime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1;
if (tDecodeI32v(pCoder, &pME->ntbEntry.commentLen) < 0) return -1;
if (pME->ntbEntry.commentLen > 0) {

View File

@ -19,12 +19,11 @@ static int tbDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen
static int skmDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ctbIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int uidIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int smaIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int taskIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ctimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int btimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ncolIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); }
@ -128,8 +127,8 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
goto _err;
}
// open pTtlIdx
ret = tdbTbOpen("ttl.idx", sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pMeta->pEnv, &pMeta->pTtlIdx, 0);
// open pTtlMgr ("ttlv1.idx")
ret = ttlMgrOpen(&pMeta->pTtlMgr, pMeta->pEnv, 0);
if (ret < 0) {
metaError("vgId:%d, failed to open meta ttl index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@ -143,7 +142,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
}
// idx table create time
ret = tdbTbOpen("ctime.idx", sizeof(SCtimeIdxKey), 0, ctimeIdxCmpr, pMeta->pEnv, &pMeta->pCtimeIdx, 0);
ret = tdbTbOpen("ctime.idx", sizeof(SBtimeIdxKey), 0, btimeIdxCmpr, pMeta->pEnv, &pMeta->pBtimeIdx, 0);
if (ret < 0) {
metaError("vgId:%d, failed to open meta ctime index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@ -184,9 +183,9 @@ _err:
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
if (pMeta->pCtimeIdx) tdbTbClose(pMeta->pCtimeIdx);
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx);
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
@ -209,9 +208,9 @@ int metaClose(SMeta **ppMeta) {
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
if (pMeta->pCtimeIdx) tdbTbClose(pMeta->pCtimeIdx);
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx);
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
@ -399,37 +398,18 @@ static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL
return 0;
}
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
STtlIdxKey *pTtlIdxKey1 = (STtlIdxKey *)pKey1;
STtlIdxKey *pTtlIdxKey2 = (STtlIdxKey *)pKey2;
if (pTtlIdxKey1->dtime > pTtlIdxKey2->dtime) {
static int btimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
SBtimeIdxKey *pBtimeIdxKey1 = (SBtimeIdxKey *)pKey1;
SBtimeIdxKey *pBtimeIdxKey2 = (SBtimeIdxKey *)pKey2;
if (pBtimeIdxKey1->btime > pBtimeIdxKey2->btime) {
return 1;
} else if (pTtlIdxKey1->dtime < pTtlIdxKey2->dtime) {
} else if (pBtimeIdxKey1->btime < pBtimeIdxKey2->btime) {
return -1;
}
if (pTtlIdxKey1->uid > pTtlIdxKey2->uid) {
if (pBtimeIdxKey1->uid > pBtimeIdxKey2->uid) {
return 1;
} else if (pTtlIdxKey1->uid < pTtlIdxKey2->uid) {
return -1;
}
return 0;
}
static int ctimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
SCtimeIdxKey *pCtimeIdxKey1 = (SCtimeIdxKey *)pKey1;
SCtimeIdxKey *pCtimeIdxKey2 = (SCtimeIdxKey *)pKey2;
if (pCtimeIdxKey1->ctime > pCtimeIdxKey2->ctime) {
return 1;
} else if (pCtimeIdxKey1->ctime < pCtimeIdxKey2->ctime) {
return -1;
}
if (pCtimeIdxKey1->uid > pCtimeIdxKey2->uid) {
return 1;
} else if (pCtimeIdxKey1->uid < pCtimeIdxKey2->uid) {
} else if (pBtimeIdxKey1->uid < pBtimeIdxKey2->uid) {
return -1;
}

View File

@ -212,6 +212,29 @@ int metaReadNext(SMetaReader *pReader) {
return 0;
}
int metaGetTableTtlByUid(void *meta, uint64_t uid, int64_t *ttlDays) {
int code = -1;
SMetaReader mr = {0};
metaReaderInit(&mr, (SMeta *)meta, 0);
code = metaReaderGetTableEntryByUid(&mr, uid);
if (code < 0) {
goto _exit;
}
if (mr.me.type == TSDB_CHILD_TABLE) {
*ttlDays = mr.me.ctbEntry.ttlDays;
} else if (mr.me.type == TSDB_NORMAL_TABLE) {
*ttlDays = mr.me.ntbEntry.ttlDays;
} else {
goto _exit;
}
code = 0;
_exit:
metaReaderClear(&mr);
return code;
}
#if 1 // ===================================================
SMTbCursor *metaOpenTbCursor(void *pVnode) {
SMTbCursor *pTbCur = NULL;
@ -387,37 +410,6 @@ _err:
return NULL;
}
int metaTtlSmaller(SMeta *pMeta, uint64_t ttl, SArray *uidList) {
TBC *pCur;
int ret = tdbTbcOpen(pMeta->pTtlIdx, &pCur, NULL);
if (ret < 0) {
return ret;
}
STtlIdxKey ttlKey = {0};
ttlKey.dtime = ttl;
ttlKey.uid = INT64_MAX;
int c = 0;
tdbTbcMoveTo(pCur, &ttlKey, sizeof(ttlKey), &c);
if (c < 0) {
tdbTbcMoveToPrev(pCur);
}
void *pKey = NULL;
int kLen = 0;
while (1) {
ret = tdbTbcPrev(pCur, &pKey, &kLen, NULL, NULL);
if (ret < 0) {
break;
}
ttlKey = *(STtlIdxKey *)pKey;
taosArrayPush(uidList, &ttlKey.uid);
}
tdbFree(pKey);
tdbTbcClose(pCur);
return 0;
}
struct SMCtbCursor {
SMeta *pMeta;
TBC *pCur;
@ -1018,17 +1010,17 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
pCursor->type = param->type;
metaRLock(pMeta);
ret = tdbTbcOpen(pMeta->pCtimeIdx, &pCursor->pCur, NULL);
ret = tdbTbcOpen(pMeta->pBtimeIdx, &pCursor->pCur, NULL);
if (ret != 0) {
goto END;
}
int64_t uidLimit = param->reverse ? INT64_MAX : 0;
SCtimeIdxKey ctimeKey = {.ctime = *(int64_t *)(param->val), .uid = uidLimit};
SCtimeIdxKey *pCtimeKey = &ctimeKey;
SBtimeIdxKey btimeKey = {.btime = *(int64_t *)(param->val), .uid = uidLimit};
SBtimeIdxKey *pBtimeKey = &btimeKey;
int cmp = 0;
if (tdbTbcMoveTo(pCursor->pCur, &ctimeKey, sizeof(ctimeKey), &cmp) < 0) {
if (tdbTbcMoveTo(pCursor->pCur, &btimeKey, sizeof(btimeKey), &cmp) < 0) {
goto END;
}
@ -1042,10 +1034,10 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
valid = tdbTbcGet(pCursor->pCur, (const void **)&entryKey, &nEntryKey, NULL, NULL);
if (valid < 0) break;
SCtimeIdxKey *p = entryKey;
SBtimeIdxKey *p = entryKey;
if (count > TRY_ERROR_LIMIT) break;
int32_t cmp = (*param->filterFunc)((void *)&p->ctime, (void *)&pCtimeKey->ctime, param->type);
int32_t cmp = (*param->filterFunc)((void *)&p->btime, (void *)&pBtimeKey->btime, param->type);
if (cmp == 0)
taosArrayPush(pUids, &p->uid);
else {
@ -1149,7 +1141,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
pCursor->type = param->type;
metaRLock(pMeta);
ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL);
//ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL);
END:
if (pCursor->pMeta) metaULock(pCursor->pMeta);

View File

@ -20,7 +20,7 @@ static int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, con
static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME);
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME);
@ -28,8 +28,8 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type);
static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey);
// opt ins_tables query
static int metaUpdateCtimeIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaDeleteCtimeIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateBtimeIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaDeleteBtimeIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaDeleteNcolIdx(SMeta *pMeta, const SMetaEntry *pME);
@ -734,7 +734,7 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
me.uid = pReq->uid;
me.name = pReq->name;
if (me.type == TSDB_CHILD_TABLE) {
me.ctbEntry.ctime = pReq->ctime;
me.ctbEntry.btime = pReq->btime;
me.ctbEntry.ttlDays = pReq->ttl;
me.ctbEntry.commentLen = pReq->commentLen;
me.ctbEntry.comment = pReq->comment;
@ -770,7 +770,7 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
metaTbGroupCacheClear(pMeta, me.ctbEntry.suid);
metaULock(pMeta);
} else {
me.ntbEntry.ctime = pReq->ctime;
me.ntbEntry.btime = pReq->btime;
me.ntbEntry.ttlDays = pReq->ttl;
me.ntbEntry.commentLen = pReq->commentLen;
me.ntbEntry.comment = pReq->comment;
@ -923,50 +923,40 @@ end:
return code;
}
int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) {
int ret = metaTtlSmaller(pMeta, ttl, tbUids);
int metaTtlDropTable(SMeta *pMeta, int64_t timePointMs, SArray *tbUids) {
int ret = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn);
if (ret != 0) {
metaError("ttl failed to flush, ret:%d", ret);
return ret;
}
ret = ttlMgrFindExpired(pMeta->pTtlMgr, timePointMs, tbUids);
if (ret != 0) {
metaError("ttl failed to find expired table, ret:%d", ret);
return ret;
}
if (TARRAY_SIZE(tbUids) == 0) {
return 0;
}
metaInfo("ttl find expired table count: %zu" , TARRAY_SIZE(tbUids));
metaDropTables(pMeta, tbUids);
return 0;
}
static void metaBuildTtlIdxKey(STtlIdxKey *ttlKey, const SMetaEntry *pME) {
int64_t ttlDays = 0;
int64_t ctime = 0;
static int metaBuildBtimeIdxKey(SBtimeIdxKey *btimeKey, const SMetaEntry *pME) {
int64_t btime;
if (pME->type == TSDB_CHILD_TABLE) {
ctime = pME->ctbEntry.ctime;
ttlDays = pME->ctbEntry.ttlDays;
btime = pME->ctbEntry.btime;
} else if (pME->type == TSDB_NORMAL_TABLE) {
ctime = pME->ntbEntry.ctime;
ttlDays = pME->ntbEntry.ttlDays;
} else {
metaError("meta/table: invalide table type: %" PRId8 " build ttl idx key failed.", pME->type);
return;
}
if (ttlDays <= 0) return;
ttlKey->dtime = ctime / 1000 + ttlDays * tsTtlUnit;
ttlKey->uid = pME->uid;
}
static int metaBuildCtimeIdxKey(SCtimeIdxKey *ctimeKey, const SMetaEntry *pME) {
int64_t ctime;
if (pME->type == TSDB_CHILD_TABLE) {
ctime = pME->ctbEntry.ctime;
} else if (pME->type == TSDB_NORMAL_TABLE) {
ctime = pME->ntbEntry.ctime;
btime = pME->ntbEntry.btime;
} else {
return -1;
}
ctimeKey->ctime = ctime;
ctimeKey->uid = pME->uid;
btimeKey->btime = btime;
btimeKey->uid = pME->uid;
return 0;
}
@ -980,11 +970,9 @@ static int metaBuildNColIdxKey(SNcolIdxKey *ncolKey, const SMetaEntry *pME) {
return 0;
}
static int metaDeleteTtlIdx(SMeta *pMeta, const SMetaEntry *pME) {
STtlIdxKey ttlKey = {0};
metaBuildTtlIdxKey(&ttlKey, pME);
if (ttlKey.dtime == 0) return 0;
return tdbTbDelete(pMeta->pTtlIdx, &ttlKey, sizeof(ttlKey), pMeta->txn);
static int metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) {
STtlDelTtlCtx ctx = {.uid = pME->uid, .pTxn = pMeta->txn};
return ttlMgrDeleteTtl(pMeta->pTtlMgr, &ctx);
}
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
@ -1066,10 +1054,10 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
tdbTbDelete(pMeta->pNameIdx, e.name, strlen(e.name) + 1, pMeta->txn);
tdbTbDelete(pMeta->pUidIdx, &uid, sizeof(uid), pMeta->txn);
if (e.type == TSDB_CHILD_TABLE || e.type == TSDB_NORMAL_TABLE) metaDeleteCtimeIdx(pMeta, &e);
if (e.type == TSDB_CHILD_TABLE || e.type == TSDB_NORMAL_TABLE) metaDeleteBtimeIdx(pMeta, &e);
if (e.type == TSDB_NORMAL_TABLE) metaDeleteNcolIdx(pMeta, &e);
if (e.type != TSDB_SUPER_TABLE) metaDeleteTtlIdx(pMeta, &e);
if (e.type != TSDB_SUPER_TABLE) metaDeleteTtl(pMeta, &e);
if (e.type == TSDB_CHILD_TABLE) {
tdbTbDelete(pMeta->pCtbIdx, &(SCtbIdxKey){.suid = e.ctbEntry.suid, .uid = uid}, sizeof(SCtbIdxKey), pMeta->txn);
@ -1102,23 +1090,23 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
return 0;
}
// opt ins_tables
int metaUpdateCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
SCtimeIdxKey ctimeKey = {0};
if (metaBuildCtimeIdxKey(&ctimeKey, pME) < 0) {
int metaUpdateBtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
SBtimeIdxKey btimeKey = {0};
if (metaBuildBtimeIdxKey(&btimeKey, pME) < 0) {
return 0;
}
metaTrace("vgId:%d, start to save version:%" PRId64 " uid:%" PRId64 " ctime:%" PRId64, TD_VID(pMeta->pVnode),
pME->version, pME->uid, ctimeKey.ctime);
metaTrace("vgId:%d, start to save version:%" PRId64 " uid:%" PRId64 " btime:%" PRId64, TD_VID(pMeta->pVnode),
pME->version, pME->uid, btimeKey.btime);
return tdbTbUpsert(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), NULL, 0, pMeta->txn);
return tdbTbUpsert(pMeta->pBtimeIdx, &btimeKey, sizeof(btimeKey), NULL, 0, pMeta->txn);
}
int metaDeleteCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
SCtimeIdxKey ctimeKey = {0};
if (metaBuildCtimeIdxKey(&ctimeKey, pME) < 0) {
int metaDeleteBtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
SBtimeIdxKey btimeKey = {0};
if (metaBuildBtimeIdxKey(&btimeKey, pME) < 0) {
return 0;
}
return tdbTbDelete(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), pMeta->txn);
return tdbTbDelete(pMeta->pBtimeIdx, &btimeKey, sizeof(btimeKey), pMeta->txn);
}
int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME) {
SNcolIdxKey ncolKey = {0};
@ -1328,6 +1316,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
metaULock(pMeta);
metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs);
metaUpdateMetaRsp(uid, pAlterTbReq->tbName, pSchema, pMetaRsp);
if (entry.pBuf) taosMemoryFree(entry.pBuf);
@ -1515,6 +1505,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
metaULock(pMeta);
metaUpdateChangeTime(pMeta, ctbEntry.uid, pAlterTbReq->ctimeMs);
tDecoderClear(&dc1);
tDecoderClear(&dc2);
taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
@ -1603,9 +1595,9 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
// build SMetaEntry
if (entry.type == TSDB_CHILD_TABLE) {
if (pAlterTbReq->updateTTL) {
metaDeleteTtlIdx(pMeta, &entry);
metaDeleteTtl(pMeta, &entry);
entry.ctbEntry.ttlDays = pAlterTbReq->newTTL;
metaUpdateTtlIdx(pMeta, &entry);
metaUpdateTtl(pMeta, &entry);
}
if (pAlterTbReq->newCommentLen >= 0) {
entry.ctbEntry.commentLen = pAlterTbReq->newCommentLen;
@ -1613,9 +1605,9 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
}
} else {
if (pAlterTbReq->updateTTL) {
metaDeleteTtlIdx(pMeta, &entry);
metaDeleteTtl(pMeta, &entry);
entry.ntbEntry.ttlDays = pAlterTbReq->newTTL;
metaUpdateTtlIdx(pMeta, &entry);
metaUpdateTtl(pMeta, &entry);
}
if (pAlterTbReq->newCommentLen >= 0) {
entry.ntbEntry.commentLen = pAlterTbReq->newCommentLen;
@ -1628,6 +1620,8 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
metaUpdateUidIdx(pMeta, &entry);
metaULock(pMeta);
metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs);
tdbTbcClose(pTbDbc);
tdbTbcClose(pUidIdxc);
tDecoderClear(&dc);
@ -1967,11 +1961,28 @@ static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) {
return tdbTbUpsert(pMeta->pNameIdx, pME->name, strlen(pME->name) + 1, &pME->uid, sizeof(tb_uid_t), pMeta->txn);
}
static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) {
STtlIdxKey ttlKey = {0};
metaBuildTtlIdxKey(&ttlKey, pME);
if (ttlKey.dtime == 0) return 0;
return tdbTbUpsert(pMeta->pTtlIdx, &ttlKey, sizeof(ttlKey), NULL, 0, pMeta->txn);
static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) {
if (pME->type != TSDB_CHILD_TABLE && pME->type != TSDB_NORMAL_TABLE) return 0;
STtlUpdTtlCtx ctx = {.uid = pME->uid};
if (pME->type == TSDB_CHILD_TABLE) {
ctx.ttlDays = pME->ctbEntry.ttlDays;
ctx.changeTimeMs = pME->ctbEntry.btime;
} else {
ctx.ttlDays = pME->ntbEntry.ttlDays;
ctx.changeTimeMs = pME->ntbEntry.btime;
}
return ttlMgrInsertTtl(pMeta->pTtlMgr, &ctx);
}
int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) {
if (!tsTtlChangeOnWrite) return 0;
STtlUpdCtimeCtx ctx = {.uid = uid, .changeTimeMs = changeTimeMs};
return ttlMgrUpdateChangeTime(pMeta->pTtlMgr, &ctx);
}
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) {
@ -2182,7 +2193,7 @@ int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) {
}
}
code = metaUpdateCtimeIdx(pMeta, pME);
code = metaUpdateBtimeIdx(pMeta, pME);
VND_CHECK_CODE(code, line, _err);
if (pME->type == TSDB_NORMAL_TABLE) {
@ -2191,7 +2202,7 @@ int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) {
}
if (pME->type != TSDB_SUPER_TABLE) {
code = metaUpdateTtlIdx(pMeta, pME);
code = metaUpdateTtl(pMeta, pME);
VND_CHECK_CODE(code, line, _err);
}

View File

@ -0,0 +1,434 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "metaTtl.h"
#include "meta.h"
typedef struct {
TTB *pNewTtlIdx;
SMeta *pMeta;
} SConvertData;
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid);
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ttlMgrFillCache(STtlManger *pTtlMgr);
static int32_t ttlMgrFillCacheOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pTtlCache);
static int32_t ttlMgrConvertOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pConvertData);
static int32_t ttlMgrWLock(STtlManger *pTtlMgr);
static int32_t ttlMgrRLock(STtlManger *pTtlMgr);
static int32_t ttlMgrULock(STtlManger *pTtlMgr);
const char *ttlTbname = "ttl.idx";
const char *ttlV1Tbname = "ttlv1.idx";
int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) {
int ret;
*ppTtlMgr = NULL;
STtlManger *pTtlMgr = (STtlManger *)tdbOsCalloc(1, sizeof(*pTtlMgr));
if (pTtlMgr == NULL) {
return -1;
}
if (tdbTbExist(ttlTbname, pEnv)) {
ret = tdbTbOpen(ttlTbname, sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pEnv, &pTtlMgr->pOldTtlIdx, rollback);
if (ret < 0) {
metaError("failed to open %s index since %s", ttlTbname, tstrerror(terrno));
return ret;
}
}
ret = tdbTbOpen(ttlV1Tbname, TDB_VARIANT_LEN, TDB_VARIANT_LEN, ttlIdxKeyV1Cmpr, pEnv, &pTtlMgr->pTtlIdx, rollback);
if (ret < 0) {
metaError("failed to open %s since %s", ttlV1Tbname, tstrerror(terrno));
tdbOsFree(pTtlMgr);
return ret;
}
pTtlMgr->pTtlCache = taosHashInit(8192, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
pTtlMgr->pDirtyUids = taosHashInit(8192, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
taosThreadRwlockInit(&pTtlMgr->lock, NULL);
*ppTtlMgr = pTtlMgr;
return 0;
}
int ttlMgrClose(STtlManger *pTtlMgr) {
taosHashCleanup(pTtlMgr->pTtlCache);
taosHashCleanup(pTtlMgr->pDirtyUids);
tdbTbClose(pTtlMgr->pTtlIdx);
taosThreadRwlockDestroy(&pTtlMgr->lock);
tdbOsFree(pTtlMgr);
return 0;
}
int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) {
metaInfo("ttl mgr start open");
int ret;
int64_t startNs = taosGetTimestampNs();
SMeta *meta = (SMeta *)pMeta;
if (pTtlMgr->pOldTtlIdx) {
ret = ttlMgrConvert(pTtlMgr->pOldTtlIdx, pTtlMgr->pTtlIdx, pMeta);
if (ret < 0) {
metaError("failed to convert ttl index since %s", tstrerror(terrno));
goto _out;
}
ret = tdbTbDropByName(ttlTbname, meta->pEnv, meta->txn);
if (ret < 0) {
metaError("failed to drop old ttl index since %s", tstrerror(terrno));
goto _out;
}
tdbTbClose(pTtlMgr->pOldTtlIdx);
pTtlMgr->pOldTtlIdx = NULL;
}
ret = ttlMgrFillCache(pTtlMgr);
if (ret < 0) {
metaError("failed to fill hash since %s", tstrerror(terrno));
goto _out;
}
int64_t endNs = taosGetTimestampNs();
metaInfo("ttl mgr open end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
endNs - startNs);
_out:
return ret;
}
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid) {
if (ttlDays <= 0) return;
pTtlKey->deleteTimeMs = changeTimeMs + ttlDays * tsTtlUnit * 1000;
pTtlKey->uid = uid;
}
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
STtlIdxKey *pTtlIdxKey1 = (STtlIdxKey *)pKey1;
STtlIdxKey *pTtlIdxKey2 = (STtlIdxKey *)pKey2;
if (pTtlIdxKey1->deleteTimeSec > pTtlIdxKey2->deleteTimeSec) {
return 1;
} else if (pTtlIdxKey1->deleteTimeSec < pTtlIdxKey2->deleteTimeSec) {
return -1;
}
if (pTtlIdxKey1->uid > pTtlIdxKey2->uid) {
return 1;
} else if (pTtlIdxKey1->uid < pTtlIdxKey2->uid) {
return -1;
}
return 0;
}
static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
STtlIdxKeyV1 *pTtlIdxKey1 = (STtlIdxKeyV1 *)pKey1;
STtlIdxKeyV1 *pTtlIdxKey2 = (STtlIdxKeyV1 *)pKey2;
if (pTtlIdxKey1->deleteTimeMs > pTtlIdxKey2->deleteTimeMs) {
return 1;
} else if (pTtlIdxKey1->deleteTimeMs < pTtlIdxKey2->deleteTimeMs) {
return -1;
}
if (pTtlIdxKey1->uid > pTtlIdxKey2->uid) {
return 1;
} else if (pTtlIdxKey1->uid < pTtlIdxKey2->uid) {
return -1;
}
return 0;
}
static int ttlMgrFillCache(STtlManger *pTtlMgr) {
return tdbTbTraversal(pTtlMgr->pTtlIdx, pTtlMgr->pTtlCache, ttlMgrFillCacheOneEntry);
}
static int32_t ttlMgrFillCacheOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pTtlCache) {
SHashObj *pCache = (SHashObj *)pTtlCache;
STtlIdxKeyV1 *ttlKey = (STtlIdxKeyV1 *)pKey;
tb_uid_t uid = ttlKey->uid;
int64_t ttlDays = *(int64_t *)pVal;
int64_t changeTimeMs = ttlKey->deleteTimeMs - ttlDays * tsTtlUnit * 1000;
STtlCacheEntry data = {.ttlDays = ttlDays, .changeTimeMs = changeTimeMs};
return taosHashPut(pCache, &uid, sizeof(uid), &data, sizeof(data));
}
static int ttlMgrConvertOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pConvertData) {
SConvertData *pData = (SConvertData *)pConvertData;
STtlIdxKey *ttlKey = (STtlIdxKey *)pKey;
tb_uid_t uid = ttlKey->uid;
int64_t ttlDays = 0;
int ret = metaGetTableTtlByUid(pData->pMeta, uid, &ttlDays);
if (ret < 0) {
metaError("ttlMgr convert failed to get ttl since %s", tstrerror(terrno));
goto _out;
}
STtlIdxKeyV1 ttlKeyV1 = {.deleteTimeMs = ttlKey->deleteTimeSec * 1000, .uid = uid};
ret = tdbTbUpsert(pData->pNewTtlIdx, &ttlKeyV1, sizeof(ttlKeyV1), &ttlDays, sizeof(ttlDays), pData->pMeta->txn);
if (ret < 0) {
metaError("ttlMgr convert failed to upsert since %s", tstrerror(terrno));
goto _out;
}
ret = 0;
_out:
return ret;
}
int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) {
SMeta *meta = pMeta;
metaInfo("ttlMgr convert ttl start.");
SConvertData cvData = {.pNewTtlIdx = pNewTtlIdx, .pMeta = meta};
int ret = tdbTbTraversal(pOldTtlIdx, &cvData, ttlMgrConvertOneEntry);
if (ret < 0) {
metaError("failed to convert ttl since %s", tstrerror(terrno));
}
metaInfo("ttlMgr convert ttl end.");
return ret;
}
int ttlMgrInsertTtl(STtlManger *pTtlMgr, const STtlUpdTtlCtx *updCtx) {
if (updCtx->ttlDays == 0) return 0;
STtlCacheEntry cacheEntry = {.ttlDays = updCtx->ttlDays, .changeTimeMs = updCtx->changeTimeMs};
STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_UPSERT};
ttlMgrWLock(pTtlMgr);
int ret = taosHashPut(pTtlMgr->pTtlCache, &updCtx->uid, sizeof(updCtx->uid), &cacheEntry, sizeof(cacheEntry));
if (ret < 0) {
metaError("ttlMgr insert failed to update ttl cache since %s", tstrerror(terrno));
goto _out;
}
ret = taosHashPut(pTtlMgr->pDirtyUids, &updCtx->uid, sizeof(updCtx->uid), &dirtryEntry, sizeof(dirtryEntry));
if (ret < 0) {
metaError("ttlMgr insert failed to update ttl dirty uids since %s", tstrerror(terrno));
goto _out;
}
ret = 0;
_out:
ttlMgrULock(pTtlMgr);
metaDebug("ttl mgr insert ttl, uid: %" PRId64 ", ctime: %" PRId64 ", ttlDays: %" PRId64, updCtx->uid,
updCtx->changeTimeMs, updCtx->ttlDays);
return ret;
}
int ttlMgrDeleteTtl(STtlManger *pTtlMgr, const STtlDelTtlCtx *delCtx) {
ttlMgrWLock(pTtlMgr);
STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_DEL};
int ret = taosHashPut(pTtlMgr->pDirtyUids, &delCtx->uid, sizeof(delCtx->uid), &dirtryEntry, sizeof(dirtryEntry));
if (ret < 0) {
metaError("ttlMgr del failed to update ttl dirty uids since %s", tstrerror(terrno));
goto _out;
}
ret = 0;
_out:
ttlMgrULock(pTtlMgr);
metaDebug("ttl mgr delete ttl, uid: %" PRId64, delCtx->uid);
return ret;
}
int ttlMgrUpdateChangeTime(STtlManger *pTtlMgr, const STtlUpdCtimeCtx *pUpdCtimeCtx) {
ttlMgrWLock(pTtlMgr);
STtlCacheEntry *oldData = taosHashGet(pTtlMgr->pTtlCache, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid));
if (oldData == NULL) {
goto _out;
}
STtlCacheEntry cacheEntry = {.ttlDays = oldData->ttlDays, .changeTimeMs = pUpdCtimeCtx->changeTimeMs};
STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_UPSERT};
int ret =
taosHashPut(pTtlMgr->pTtlCache, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &cacheEntry, sizeof(cacheEntry));
if (ret < 0) {
metaError("ttlMgr update ctime failed to update ttl cache since %s", tstrerror(terrno));
goto _out;
}
ret = taosHashPut(pTtlMgr->pDirtyUids, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &dirtryEntry,
sizeof(dirtryEntry));
if (ret < 0) {
metaError("ttlMgr update ctime failed to update ttl dirty uids since %s", tstrerror(terrno));
goto _out;
}
ret = 0;
_out:
ttlMgrULock(pTtlMgr);
metaDebug("ttl mgr update ctime, uid: %" PRId64 ", ctime: %" PRId64, pUpdCtimeCtx->uid, pUpdCtimeCtx->changeTimeMs);
return ret;
}
int ttlMgrFindExpired(STtlManger *pTtlMgr, int64_t timePointMs, SArray *pTbUids) {
ttlMgrRLock(pTtlMgr);
TBC *pCur;
int ret = tdbTbcOpen(pTtlMgr->pTtlIdx, &pCur, NULL);
if (ret < 0) {
goto _out;
}
STtlIdxKeyV1 ttlKey = {0};
ttlKey.deleteTimeMs = timePointMs;
ttlKey.uid = INT64_MAX;
int c = 0;
tdbTbcMoveTo(pCur, &ttlKey, sizeof(ttlKey), &c);
if (c < 0) {
tdbTbcMoveToPrev(pCur);
}
void *pKey = NULL;
int kLen = 0;
while (1) {
ret = tdbTbcPrev(pCur, &pKey, &kLen, NULL, NULL);
if (ret < 0) {
ret = 0;
break;
}
ttlKey = *(STtlIdxKeyV1 *)pKey;
taosArrayPush(pTbUids, &ttlKey.uid);
}
tdbFree(pKey);
tdbTbcClose(pCur);
ret = 0;
_out:
ttlMgrULock(pTtlMgr);
return ret;
}
int ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) {
ttlMgrWLock(pTtlMgr);
metaInfo("ttl mgr flush start.");
int ret = -1;
void *pIter = taosHashIterate(pTtlMgr->pDirtyUids, NULL);
while (pIter != NULL) {
STtlDirtyEntry *pEntry = (STtlDirtyEntry *)pIter;
tb_uid_t *pUid = taosHashGetKey(pIter, NULL);
STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid));
if (cacheEntry == NULL) {
metaError("ttlMgr flush failed to get ttl cache since %s", tstrerror(terrno));
goto _out;
}
STtlIdxKeyV1 ttlKey;
ttlMgrBuildKey(&ttlKey, cacheEntry->ttlDays, cacheEntry->changeTimeMs, *pUid);
if (pEntry->type == ENTRY_TYPE_UPSERT) {
ret = tdbTbUpsert(pTtlMgr->pTtlIdx, &ttlKey, sizeof(ttlKey), &cacheEntry->ttlDays, sizeof(cacheEntry->ttlDays),
pTxn);
if (ret < 0) {
metaError("ttlMgr flush failed to flush ttl cache upsert since %s", tstrerror(terrno));
goto _out;
}
} else if (pEntry->type == ENTRY_TYPE_DEL) {
ret = tdbTbDelete(pTtlMgr->pTtlIdx, &ttlKey, sizeof(ttlKey), pTxn);
if (ret < 0) {
metaError("ttlMgr flush failed to flush ttl cache del since %s", tstrerror(terrno));
goto _out;
}
ret = taosHashRemove(pTtlMgr->pTtlCache, pUid, sizeof(*pUid));
if (ret < 0) {
metaError("ttlMgr flush failed to delete ttl cache since %s", tstrerror(terrno));
goto _out;
}
} else {
metaError("ttlMgr flush failed to flush ttl cache, unknown type: %d", pEntry->type);
goto _out;
}
pIter = taosHashIterate(pTtlMgr->pDirtyUids, pIter);
}
taosHashClear(pTtlMgr->pDirtyUids);
ret = 0;
_out:
ttlMgrULock(pTtlMgr);
metaInfo("ttl mgr flush end.");
return ret;
}
static int32_t ttlMgrRLock(STtlManger *pTtlMgr) {
int32_t ret = 0;
metaTrace("ttlMgr rlock %p", &pTtlMgr->lock);
ret = taosThreadRwlockRdlock(&pTtlMgr->lock);
return ret;
}
static int32_t ttlMgrWLock(STtlManger *pTtlMgr) {
int32_t ret = 0;
metaTrace("ttlMgr wlock %p", &pTtlMgr->lock);
ret = taosThreadRwlockWrlock(&pTtlMgr->lock);
return ret;
}
static int32_t ttlMgrULock(STtlManger *pTtlMgr) {
int32_t ret = 0;
metaTrace("ttlMgr ulock %p", &pTtlMgr->lock);
ret = taosThreadRwlockUnlock(&pTtlMgr->lock);
return ret;
}

View File

@ -29,7 +29,7 @@ int32_t tdProcessTSmaInsert(SSma *pSma, int64_t indexUid, const char *msg) {
int32_t code = TSDB_CODE_SUCCESS;
if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) {
smaWarn("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
smaError("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
}
return code;
@ -346,6 +346,43 @@ _end:
return TSDB_CODE_SUCCESS;
}
static int32_t tsmaProcessDelReq(SSma *pSma, int64_t indexUid, SBatchDeleteReq *pDelReq) {
int32_t code = 0;
int32_t lino = 0;
if (taosArrayGetSize(pDelReq->deleteReqs) > 0) {
int32_t len = 0;
tEncodeSize(tEncodeSBatchDeleteReq, pDelReq, len, code);
TSDB_CHECK_CODE(code, lino, _exit);
void *pBuf = rpcMallocCont(len + sizeof(SMsgHead));
if (!pBuf) {
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
SEncoder encoder;
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), len);
tEncodeSBatchDeleteReq(&encoder, pDelReq);
tEncoderClear(&encoder);
((SMsgHead *)pBuf)->vgId = TD_VID(pSma->pVnode);
SRpcMsg delMsg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = pBuf, .contLen = len + sizeof(SMsgHead)};
code = tmsgPutToQueue(&pSma->pVnode->msgCb, WRITE_QUEUE, &delMsg);
TSDB_CHECK_CODE(code, lino, _exit);
}
_exit:
taosArrayDestroy(pDelReq->deleteReqs);
if (code) {
smaError("vgId:%d, failed at line %d to process delete req for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), lino,
indexUid, tstrerror(code));
}
return code;
}
/**
* @brief Insert/Update Time-range-wise SMA data.
*
@ -355,7 +392,6 @@ _end:
*/
static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
const SArray *pDataBlocks = (const SArray *)msg;
// TODO: destroy SSDataBlocks(msg)
if (!pDataBlocks) {
terrno = TSDB_CODE_TSMA_INVALID_PTR;
smaWarn("vgId:%d, insert tsma data failed since pDataBlocks is NULL", SMA_VID(pSma));
@ -419,8 +455,10 @@ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char
goto _err;
}
// TODO deleteReq
taosArrayDestroy(deleteReq.deleteReqs);
if ((terrno = tsmaProcessDelReq(pSma, indexUid, &deleteReq)) != 0) {
goto _err;
}
#if 0
if (!strncasecmp("td.tsma.rst.tb", pTsmaStat->pTSma->dstTbName, 14)) {
terrno = TSDB_CODE_APP_ERROR;

View File

@ -264,7 +264,7 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq*
tFormatOffset(buf1, TSDB_OFFSET_LEN, &pRsp->reqOffset);
tFormatOffset(buf2, TSDB_OFFSET_LEN, &pRsp->rspOffset);
tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId,
tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId,
pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
return 0;
@ -421,6 +421,35 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
return 0;
}
int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) {
int32_t vgId = TD_VID(pTq->pVnode);
taosWLockLatch(&pTq->lock);
if (taosHashGetSize(pTq->pPushMgr) > 0) {
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
while (pIter) {
STqHandle* pHandle = *(STqHandle**)pIter;
tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
if (ASSERT(pHandle->msg != NULL)) {
tqError("pHandle->msg should not be null");
break;
}else{
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
taosMemoryFree(pHandle->msg);
pHandle->msg = NULL;
}
pIter = taosHashIterate(pTq->pPushMgr, pIter);
}
taosHashClear(pTq->pPushMgr);
}
taosWUnLockLatch(&pTq->lock);
return 0;
}
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq req = {0};
int code = 0;

View File

@ -17,35 +17,16 @@
#include "vnd.h"
int32_t tqProcessSubmitReqForSubscribe(STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
taosWLockLatch(&pTq->lock);
if (taosHashGetSize(pTq->pPushMgr) > 0) {
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
while (pIter) {
STqHandle* pHandle = *(STqHandle**)pIter;
tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
if (ASSERT(pHandle->msg != NULL)) {
tqError("pHandle->msg should not be null");
break;
}else{
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
taosMemoryFree(pHandle->msg);
pHandle->msg = NULL;
}
pIter = taosHashIterate(pTq->pPushMgr, pIter);
}
taosHashClear(pTq->pPushMgr);
if (taosHashGetSize(pTq->pPushMgr) <= 0) {
return 0;
}
// unlock
taosWUnLockLatch(&pTq->lock);
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME_PUSH};
msg.pCont = rpcMallocCont(sizeof(SMsgHead));
msg.contLen = sizeof(SMsgHead);
SMsgHead *pHead = msg.pCont;
pHead->vgId = TD_VID(pTq->pVnode);
pHead->contLen = msg.contLen;
tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
return 0;
}

View File

@ -168,7 +168,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
if(code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
if (code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
goto end;
}
@ -176,13 +176,17 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST && dataRsp.blockNum == 0) {
// lock
taosWLockLatch(&pTq->lock);
code = tqRegisterPushHandle(pTq, pHandle, pMsg);
taosWUnLockLatch(&pTq->lock);
tDeleteMqDataRsp(&dataRsp);
return code;
int64_t ver = walGetCommittedVer(pTq->pVnode->pWal);
if (pOffset->version >= ver ||
dataRsp.rspOffset.version >= ver) { // check if there are data again to avoid lost data
code = tqRegisterPushHandle(pTq, pHandle, pMsg);
taosWUnLockLatch(&pTq->lock);
goto end;
} else {
taosWUnLockLatch(&pTq->lock);
}
}
// NOTE: this pHandle->consumerId may have been changed already.
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP, vgId);
end : {
@ -192,9 +196,8 @@ end : {
" code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
tDeleteMqDataRsp(&dataRsp);
}
return code;
}
}
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,

View File

@ -691,6 +691,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
.colVal = COL_VAL_NONE(idxKey->key.cid, pr->pSchema->columns[slotIds[i]].type)};
if (!pLastCol) {
pLastCol = &noneCol;
reallocVarData(&pLastCol->colVal);
}
taosArraySet(pLastArray, idxKey->idx, pLastCol);
@ -2848,14 +2849,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
if (pColVal->value.nData > 0) {
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
}
if (!COL_VAL_IS_VALUE(pColVal)) {
@ -3016,14 +3019,16 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
if (pColVal->value.nData > 0) {
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
}
/*if (COL_VAL_IS_NONE(pColVal)) {

View File

@ -129,15 +129,20 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *p
return 0;
}
static int32_t vnodeVgroupIdLen(int32_t vgId) {
char tmp[TSDB_FILENAME_LEN];
sprintf(tmp, "%d", vgId);
return strlen(tmp);
}
int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) {
int32_t ret = tfsRename(pTfs, srcPath, dstPath);
if (ret != 0) return ret;
int32_t ret = 0;
char oldRname[TSDB_FILENAME_LEN] = {0};
char newRname[TSDB_FILENAME_LEN] = {0};
char tsdbPath[TSDB_FILENAME_LEN] = {0};
char tsdbFilePrefix[TSDB_FILENAME_LEN] = {0};
snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", dstPath, TD_DIRSEP);
snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", srcPath, TD_DIRSEP);
snprintf(tsdbFilePrefix, TSDB_FILENAME_LEN, "tsdb%sv", TD_DIRSEP);
STfsDir *tsdbDir = tfsOpendir(pTfs, tsdbPath);
@ -154,8 +159,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
int32_t tsdbFileVgId = atoi(tsdbFilePrefixPos + 6);
if (tsdbFileVgId == srcVgId) {
char *tsdbFileSurfixPos = strstr(tsdbFilePrefixPos, "f");
if (tsdbFileSurfixPos == NULL) continue;
char *tsdbFileSurfixPos = tsdbFilePrefixPos + 6 + vnodeVgroupIdLen(srcVgId);
tsdbFilePrefixPos[6] = 0;
snprintf(newRname, TSDB_FILENAME_LEN, "%s%d%s", oldRname, dstVgId, tsdbFileSurfixPos);
@ -163,7 +167,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
ret = tfsRename(pTfs, tsdbFile->rname, newRname);
if (ret != 0) {
vInfo("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr());
vError("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr());
tfsClosedir(tsdbDir);
return ret;
}
@ -171,6 +175,21 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
}
tfsClosedir(tsdbDir);
vInfo("vgId:%d, rename dir from %s to %s", dstVgId, srcPath, dstPath);
ret = tfsRename(pTfs, srcPath, dstPath);
if (ret != 0) {
vError("vgId:%d, failed to rename dir from %s to %s since %s", dstVgId, srcPath, dstPath, terrstr());
}
return ret;
}
int32_t vnodeGetAbsDir(const char *relPath, STfs *pTfs, char *buf, size_t bufLen) {
if (pTfs) {
snprintf(buf, bufLen, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, relPath);
} else {
snprintf(buf, bufLen, "%s", relPath);
}
return 0;
}
@ -179,13 +198,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
char dir[TSDB_FILENAME_LEN] = {0};
int32_t ret = 0;
if (pTfs) {
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, srcPath);
} else {
snprintf(dir, TSDB_FILENAME_LEN, "%s", srcPath);
}
// todo add stat file to handle exception while vnode open
vnodeGetAbsDir(srcPath, pTfs, dir, TSDB_FILENAME_LEN);
ret = vnodeLoadInfo(dir, &info);
if (ret < 0) {
@ -240,6 +253,42 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
return 0;
}
int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) {
SVnodeInfo info = {0};
char dir[TSDB_FILENAME_LEN] = {0};
vnodeGetAbsDir(dstPath, pTfs, dir, TSDB_FILENAME_LEN);
if (vnodeLoadInfo(dir, &info) == 0) {
if (info.config.vgId != dstVgId) {
vError("vgId:%d, unexpected vnode config.vgId:%d", dstVgId, info.config.vgId);
return -1;
}
return dstVgId;
}
vnodeGetAbsDir(srcPath, pTfs, dir, TSDB_FILENAME_LEN);
if (vnodeLoadInfo(dir, &info) < 0) {
vError("vgId:%d, failed to read vnode config from %s since %s", srcVgId, srcPath, tstrerror(terrno));
return -1;
}
if (info.config.vgId == srcVgId) {
vInfo("vgId:%d, rollback alter hashrange", srcVgId);
return srcVgId;
} else if (info.config.vgId != dstVgId) {
vError("vgId:%d, unexpected vnode config.vgId:%d", dstVgId, info.config.vgId);
return -1;
}
vInfo("vgId:%d, rename %s to %s", dstVgId, srcPath, dstPath);
if (vnodeRenameVgroupId(srcPath, dstPath, srcVgId, dstVgId, pTfs) < 0) {
vError("vgId:%d, failed to rename vnode from %s to %s since %s", dstVgId, srcPath, dstPath, tstrerror(terrno));
return -1;
}
return dstVgId;
}
void vnodeDestroy(const char *path, STfs *pTfs) {
vInfo("path:%s is removed while destroy vnode", path);
tfsRmdir(pTfs, path);

View File

@ -37,7 +37,7 @@ static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t ver, void *pRe
static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, int64_t ctime, int64_t *pUid) {
static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, int64_t btime, int64_t *pUid) {
int32_t code = 0;
int32_t lino = 0;
@ -66,8 +66,8 @@ static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, i
}
*(int64_t *)(pCoder->data + pCoder->pos) = uid;
// ctime
*(int64_t *)(pCoder->data + pCoder->pos + 8) = ctime;
// btime
*(int64_t *)(pCoder->data + pCoder->pos + 8) = btime;
tEndDecode(pCoder);
@ -84,7 +84,7 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
int32_t lino = 0;
int64_t ctime = taosGetTimestampMs();
int64_t btime = taosGetTimestampMs();
SDecoder dc = {0};
int32_t nReqs;
@ -99,7 +99,7 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) {
TSDB_CHECK_CODE(code, lino, _exit);
}
for (int32_t iReq = 0; iReq < nReqs; iReq++) {
code = vnodePreprocessCreateTableReq(pVnode, &dc, ctime, NULL);
code = vnodePreprocessCreateTableReq(pVnode, &dc, btime, NULL);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -109,8 +109,35 @@ _exit:
tDecoderClear(&dc);
return code;
}
static int32_t vnodePreProcessAlterTableMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = TSDB_CODE_INVALID_MSG;
int32_t lino = 0;
SDecoder dc = {0};
tDecoderInit(&dc, (uint8_t *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead));
SVAlterTbReq vAlterTbReq = {0};
int64_t ctimeMs = taosGetTimestampMs();
if (tDecodeSVAlterTbReqSetCtime(&dc, &vAlterTbReq, ctimeMs) < 0) {
goto _exit;
}
code = 0;
_exit:
tDecoderClear(&dc);
if (code) {
vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
} else {
vTrace("vgId:%d %s done, table:%s ctimeMs generated:%" PRId64, TD_VID(pVnode), __func__, vAlterTbReq.tbName,
ctimeMs);
}
return code;
}
extern int64_t tsMaxKeyByPrecision[];
static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int64_t ctime) {
static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int64_t btimeMs, int64_t ctimeMs) {
int32_t code = 0;
int32_t lino = 0;
@ -127,7 +154,7 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
int64_t uid;
if (submitTbData.flags & SUBMIT_REQ_AUTO_CREATE_TABLE) {
code = vnodePreprocessCreateTableReq(pVnode, pCoder, ctime, &uid);
code = vnodePreprocessCreateTableReq(pVnode, pCoder, btimeMs, &uid);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -153,7 +180,7 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
}
// scan and check
TSKEY now = ctime;
TSKEY now = btimeMs;
if (pVnode->config.tsdbCfg.precision == TSDB_TIME_PRECISION_MICRO) {
now *= 1000;
} else if (pVnode->config.tsdbCfg.precision == TSDB_TIME_PRECISION_NANO) {
@ -170,7 +197,6 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
SColData colData = {0};
pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData);
if (colData.flag != HAS_VALUE) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
@ -182,6 +208,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
goto _exit;
}
}
for (uint64_t i = 1; i < nColData; i++) {
pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData);
}
} else {
uint64_t nRow;
if (tDecodeU64v(pCoder, &nRow) < 0) {
@ -200,6 +230,9 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
}
}
*(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs;
pCoder->pos += sizeof(int64_t);
tEndDecode(pCoder);
_exit:
@ -229,15 +262,20 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) {
TSDB_CHECK_CODE(code, lino, _exit);
}
int64_t ctime = taosGetTimestampMs();
int64_t btimeMs = taosGetTimestampMs();
int64_t ctimeMs = btimeMs;
for (int32_t i = 0; i < nSubmitTbData; i++) {
code = vnodePreProcessSubmitTbData(pVnode, pCoder, ctime);
code = vnodePreProcessSubmitTbData(pVnode, pCoder, btimeMs, ctimeMs);
TSDB_CHECK_CODE(code, lino, _exit);
}
tEndDecode(pCoder);
_exit:
if (code) {
vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
}
tDecoderClear(pCoder);
return code;
}
@ -257,6 +295,7 @@ static int32_t vnodePreProcessDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
code = qWorkerProcessDeleteMsg(&handle, pVnode->pQuery, pMsg, &res);
if (code) goto _exit;
res.ctimeMs = taosGetTimestampMs();
// malloc and encode
tEncodeSize(tEncodeDeleteRes, &res, size, ret);
pCont = rpcMallocCont(size + sizeof(SMsgHead));
@ -278,6 +317,31 @@ _exit:
return code;
}
static int32_t vnodePreProcessBatchDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
int32_t lino = 0;
int64_t ctimeMs = taosGetTimestampMs();
SBatchDeleteReq pReq = {0};
SDecoder *pCoder = &(SDecoder){0};
tDecoderInit(pCoder, (uint8_t *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead));
if (tDecodeSBatchDeleteReqSetCtime(pCoder, &pReq, ctimeMs) < 0) {
code = TSDB_CODE_INVALID_MSG;
}
tDecoderClear(pCoder);
taosArrayDestroy(pReq.deleteReqs);
if (code) {
vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
} else {
vTrace("vgId:%d %s done, ctimeMs generated:%" PRId64, TD_VID(pVnode), __func__, ctimeMs);
}
return code;
}
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
@ -285,19 +349,25 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
case TDMT_VND_CREATE_TABLE: {
code = vnodePreProcessCreateTableMsg(pVnode, pMsg);
} break;
case TDMT_VND_ALTER_TABLE: {
code = vnodePreProcessAlterTableMsg(pVnode, pMsg);
} break;
case TDMT_VND_SUBMIT: {
code = vnodePreProcessSubmitMsg(pVnode, pMsg);
} break;
case TDMT_VND_DELETE: {
code = vnodePreProcessDeleteMsg(pVnode, pMsg);
} break;
case TDMT_VND_BATCH_DEL: {
code = vnodePreProcessBatchDeleteMsg(pVnode, pMsg);
} break;
default:
break;
}
_exit:
if (code) {
vError("vgId%d failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
}
return code;
@ -505,7 +575,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME) && !syncIsReadyForRead(pVnode->sync)) {
if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME || pMsg->msgType == TDMT_VND_TMQ_CONSUME_PUSH) && !syncIsReadyForRead(pVnode->sync)) {
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
return 0;
}
@ -526,6 +596,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_VND_TMQ_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_VND_TMQ_CONSUME_PUSH:
return tqProcessPollPush(pVnode->pTq, pMsg);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_APP_ERROR;
@ -559,8 +631,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetTableCfg(pVnode, pMsg, true);
case TDMT_VND_BATCH_META:
return vnodeGetBatchMeta(pVnode, pMsg);
case TDMT_VND_TMQ_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
// case TDMT_VND_TMQ_CONSUME:
// return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_VND_TMQ_VG_WALINFO:
return tqProcessVgWalInfoReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
@ -587,9 +659,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
}
}
// TODO: remove the function
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
// TODO
// blockDebugShowDataBlocks(data, __func__);
tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
}
@ -637,8 +707,8 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq,
goto end;
}
vDebug("vgId:%d, drop ttl table req will be processed, time:%d", pVnode->config.vgId, ttlReq.timestamp);
int32_t ret = metaTtlDropTable(pVnode->pMeta, ttlReq.timestamp, tbUids);
vDebug("vgId:%d, drop ttl table req will be processed, time:%" PRId32, pVnode->config.vgId, ttlReq.timestampSec);
int32_t ret = metaTtlDropTable(pVnode->pMeta, (int64_t)ttlReq.timestampSec * 1000, tbUids);
if (ret != 0) {
goto end;
}
@ -646,7 +716,7 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq,
tqUpdateTbUidList(pVnode->pTq, tbUids, false);
}
vnodeAsyncRentention(pVnode, ttlReq.timestamp);
vnodeAsyncRentention(pVnode, ttlReq.timestampSec);
end:
taosArrayDestroy(tbUids);
@ -1382,6 +1452,9 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
code = tsdbInsertTableData(pVnode->pTsdb, ver, pSubmitTbData, &affectedRows);
if (code) goto _exit;
code = metaUpdateChangeTime(pVnode->pMeta, pSubmitTbData->uid, pSubmitTbData->ctimeMs);
if (code) goto _exit;
pSubmitRsp->affectedRows += affectedRows;
}
@ -1636,6 +1709,14 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t ver, void *pRe
TD_VID(pVnode), terrstr(), deleteReq.suid, uid, pOneReq->startTs, pOneReq->endTs);
}
code = metaUpdateChangeTime(pVnode->pMeta, uid, deleteReq.ctimeMs);
if (code < 0) {
terrno = code;
vError("vgId:%d, update change time error since %s, suid:%" PRId64 ", uid:%" PRId64 ", start ts:%" PRId64
", end ts:%" PRId64,
TD_VID(pVnode), terrstr(), deleteReq.suid, uid, pOneReq->startTs, pOneReq->endTs);
}
tDecoderClear(&mr.coder);
}
metaReaderClear(&mr);
@ -1664,8 +1745,10 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, in
ASSERT(taosArrayGetSize(pRes->uidList) == 0 || (pRes->skey != 0 && pRes->ekey != 0));
for (int32_t iUid = 0; iUid < taosArrayGetSize(pRes->uidList); iUid++) {
code = tsdbDeleteTableData(pVnode->pTsdb, ver, pRes->suid, *(uint64_t *)taosArrayGet(pRes->uidList, iUid),
pRes->skey, pRes->ekey);
uint64_t uid = *(uint64_t *)taosArrayGet(pRes->uidList, iUid);
code = tsdbDeleteTableData(pVnode->pTsdb, ver, pRes->suid, uid, pRes->skey, pRes->ekey);
if (code) goto _err;
code = metaUpdateChangeTime(pVnode->pMeta, uid, pRes->ctimeMs);
if (code) goto _err;
}

View File

@ -950,8 +950,8 @@ int32_t ctgCloneMetaOutput(STableMetaOutput* output, STableMetaOutput** pOutput)
int32_t ctgGenerateVgList(SCatalog* pCtg, SHashObj* vgHash, SArray** pList);
void ctgFreeJob(void* job);
void ctgFreeHandleImpl(SCatalog* pCtg);
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup);
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup);
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
char* dbFName, SArray* pNames, bool update);
int32_t ctgGetVgIdsFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, char* dbFName, const char* pTbs[], int32_t tbNum,
int32_t* vgId);

View File

@ -568,7 +568,7 @@ int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName*
return TSDB_CODE_SUCCESS;
}
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pConn ? &pConn->mgmtEps : NULL, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
_return:
@ -629,7 +629,7 @@ int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInf
return TSDB_CODE_SUCCESS;
}
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, pVgroup));
ctgRUnlockVgInfo(dbCache);

View File

@ -1112,7 +1112,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo));
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@ -1132,7 +1132,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@ -1282,7 +1282,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo));
ctgTaskDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@ -1302,7 +1302,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
ctgTaskDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@ -1501,7 +1501,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@ -1536,7 +1536,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx);
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@ -1799,7 +1799,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq* tReq, int32_t flag, SName* pName, int
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@ -1948,7 +1948,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask* pTask) {
if (NULL == pTask->res) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
@ -1996,7 +1996,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
tReq.pTask = pTask;
tReq.msgIdx = -1;
CTG_ERR_JRET(
ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
ctgGetVgInfosFromHashValue(pCtg, &pConn->mgmtEps, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
@ -2375,7 +2375,7 @@ int32_t ctgGetTbCfgCb(SCtgTask* pTask) {
SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res;
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo));
}
CTG_RET(ctgLaunchGetTbCfgTask(pTask));
@ -2395,7 +2395,7 @@ int32_t ctgGetTbTagCb(SCtgTask* pTask) {
if (NULL == pCtx->pVgInfo) {
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo));
}
CTG_RET(ctgLaunchGetTbTagTask(pTask));

View File

@ -2989,7 +2989,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg
}
*pVgroup = taosMemoryCalloc(1, sizeof(SVgroupInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, *pVgroup));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, *pVgroup));
_return:

View File

@ -969,7 +969,7 @@ int32_t ctgHashValueComp(void const* lp, void const* rp) {
return 0;
}
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) {
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) {
int32_t code = 0;
CTG_ERR_RET(ctgMakeVgArray(dbInfo));
@ -977,6 +977,14 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName
char db[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, db);
if (IS_SYS_DBNAME(pTableName->dbname)) {
pVgroup->vgId = MNODE_HANDLE;
if (pMgmtEps) {
memcpy(&pVgroup->epSet, pMgmtEps, sizeof(pVgroup->epSet));
}
return TSDB_CODE_SUCCESS;
}
if (vgNum <= 0) {
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", db, vgNum);
CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED);
@ -1020,23 +1028,53 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName
CTG_RET(code);
}
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
char* dbFName, SArray* pNames, bool update) {
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SMetaRes res = {0};
SVgroupInfo* vgInfo = NULL;
CTG_ERR_RET(ctgMakeVgArray(dbInfo));
int32_t tbNum = taosArrayGetSize(pNames);
char* pSep = strchr(dbFName, '.');
if (pSep && IS_SYS_DBNAME(pSep + 1)) {
SVgroupInfo mgmtInfo = {0};
mgmtInfo.vgId = MNODE_HANDLE;
if (pMgmgEpSet) {
memcpy(&mgmtInfo.epSet, pMgmgEpSet, sizeof(mgmtInfo.epSet));
}
for (int32_t i = 0; i < tbNum; ++i) {
vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));
if (NULL == vgInfo) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(vgInfo, &mgmtInfo, sizeof(mgmtInfo));
ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
if (update) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, tReq->msgIdx);
SMetaRes* pRes = taosArrayGet(pCtx->pResList, pFetch->resIdx + i);
pRes->pRes = vgInfo;
} else {
res.pRes = vgInfo;
taosArrayPush(pCtx->pResList, &res);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t vgNum = taosArrayGetSize(dbInfo->vgArray);
if (vgNum <= 0) {
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
SVgroupInfo* vgInfo = NULL;
int32_t tbNum = taosArrayGetSize(pNames);
if (1 == vgNum) {
for (int32_t i = 0; i < tbNum; ++i) {
vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));

View File

@ -145,7 +145,7 @@ typedef struct SExplainCtx {
SHashObj *groupHash; // Hash<SExplainGroup>
} SExplainCtx;
#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : "desc")
#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : ORDER_DESC == _order ? "desc" : "unknown")
#define EXPLAIN_JOIN_STRING(_type) ((JOIN_TYPE_INNER == _type) ? "Inner join" : "Join")
#define INVERAL_TIME_FROM_PRECISION_TO_UNIT(_t, _u, _p) (((_u) == 'n' || (_u) == 'y') ? (_t) : (convertTimeFromPrecisionToUnit(_t, _p, _u)))

View File

@ -499,6 +499,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pPrjNode->pProjections->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPrjNode->node.pOutputDataBlockDesc->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pPrjNode->node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@ -544,6 +547,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pJoinNode->pTargets->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pJoinNode->node.pOutputDataBlockDesc->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pJoinNode->node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@ -597,6 +603,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_GROUPS_FORMAT, pAggNode->pGroupKeys->length);
}
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pAggNode->node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@ -716,6 +725,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
case QUERY_NODE_PHYSICAL_PLAN_SORT: {
SSortPhysiNode *pSortNode = (SSortPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_SORT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pSortNode->node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pSortNode->node.outputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
@ -796,9 +810,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.outputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.outputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@ -847,6 +862,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.outputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@ -895,6 +914,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_MODE_FORMAT, nodesGetFillModeString(pFillNode->mode));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pFillNode->node.pOutputDataBlockDesc->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pFillNode->node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@ -1080,6 +1102,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pMergeNode->node.inputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pMergeNode->node.outputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));

View File

@ -399,6 +399,8 @@ typedef struct SOptrBasicInfo {
SResultRowInfo resultRowInfo;
SSDataBlock* pRes;
bool mergeResultBlock;
int32_t inputTsOrder;
int32_t outputTsOrder;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@ -411,8 +413,6 @@ typedef struct SIntervalAggOperatorInfo {
STimeWindow win; // query time range
bool timeWindowInterpo; // interpolation needed or not
SArray* pInterpCols; // interpolation columns
int32_t resultTsOrder; // result timestamp order
int32_t inputOrder; // input data ts order
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
STimeWindowAggSupp twAggSup;
SArray* pPrevValues; // SArray<SGroupKeys> used to keep the previous not null value for interpolation.

View File

@ -146,7 +146,7 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
* @return
*/
uint64_t tsortGetGroupId(STupleHandle* pVHandle);
void* tsortGetBlockInfo(STupleHandle* pVHandle);
/**
*
* @param pSortHandle

View File

@ -108,6 +108,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
pInfo->groupKeyOptimized = pAggNode->groupKeyOptimized;
pInfo->groupId = UINT64_MAX;
pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder;
pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder;
setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, true, OP_NOT_OPENED, pInfo,
pTaskInfo);
@ -164,10 +166,8 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
int64_t st = taosGetTimestampUs();
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
int32_t code = TSDB_CODE_SUCCESS;
int32_t order = pAggInfo->binfo.inputTsOrder;
bool hasValidBlock = false;
while (1) {
@ -185,12 +185,7 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
}
}
hasValidBlock = true;
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
T_LONG_JMP(pTaskInfo->env, code);
}
pAggInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
// there is an scalar expression that needs to be calculated before apply the group aggregation.
if (pAggInfo->scalarExprSup.pExprInfo != NULL && !blockAllocated) {
@ -204,7 +199,7 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId);
setInputDataBlock(pSup, pBlock, order, scanFlag, true);
setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true);
code = doAggregateImpl(pOperator, pSup->pCtx);
if (code != 0) {
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
@ -461,7 +456,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
uint32_t defaultPgsz = 0;
uint32_t defaultBufsz = 0;
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
code = getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (code) {
qError("failed to get buff page size, rowSize:%d", pAggSup->resultRowSize);
return code;
}
if (!osTempSpaceAvailable()) {
code = TSDB_CODE_NO_DISKSPACE;

View File

@ -208,6 +208,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
pRes->info.id.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes);
pRes->info.rows = 1;
pRes->info.scanFlag = MAIN_SCAN;
SExprSupp* pSup = &pInfo->pseudoExprSup;
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pRes,

View File

@ -120,6 +120,8 @@ SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNo
initBasicInfo(&pInfo->binfo, pResBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->binfo.inputTsOrder = physiNode->inputTsOrder;
pInfo->binfo.outputTsOrder = physiNode->outputTsOrder;
pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pEventWindowNode->window.watermark,
.calTrigger = pEventWindowNode->window.triggerType};
@ -174,6 +176,7 @@ void destroyEWindowOperatorInfo(void* param) {
colDataDestroy(&pInfo->twAggSup.timeWindowData);
cleanupAggSup(&pInfo->aggSup);
cleanupExprSupp(&pInfo->scalarSup);
taosMemoryFreeClear(param);
}
@ -182,7 +185,7 @@ static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SExprSupp* pSup = &pOperator->exprSupp;
int32_t order = TSDB_ORDER_ASC;
int32_t order = pInfo->binfo.inputTsOrder;
SSDataBlock* pRes = pInfo->binfo.pRes;
@ -195,6 +198,7 @@ static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) {
break;
}
pRes->info.scanFlag = pBlock->info.scanFlag;
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId);

View File

@ -520,6 +520,7 @@ int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pCo
// data from mnode
pRes->info.dataLoad = 1;
pRes->info.rows = pBlock->info.rows;
pRes->info.scanFlag = MAIN_SCAN;
relocateColumnData(pRes, pColList, pBlock->pDataBlock, false);
blockDataDestroy(pBlock);
}

View File

@ -922,8 +922,13 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) {
*defaultPgsz = 4096;
uint32_t last = *defaultPgsz;
while (*defaultPgsz < rowSize * 4) {
*defaultPgsz <<= 1u;
if (*defaultPgsz < last) {
return TSDB_CODE_INVALID_PARA;
}
last = *defaultPgsz;
}
// The default buffer for each operator in query is 10MB.
@ -932,6 +937,9 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
*defaultBufsz = 4096 * 2560;
if ((*defaultBufsz) <= (*defaultPgsz)) {
(*defaultBufsz) = (*defaultPgsz) * 4;
if (*defaultBufsz < ((int64_t)(*defaultPgsz)) * 4) {
return TSDB_CODE_INVALID_PARA;
}
}
return 0;

View File

@ -178,16 +178,15 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
blockDataCleanup(pResBlock);
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
getTableScanInfo(pOperator, &order, &scanFlag, false);
int32_t order = pInfo->pFillInfo->order;
SOperatorInfo* pDownstream = pOperator->pDownstream[0];
#if 0
// the scan order may be different from the output result order for agg interval operator.
if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) {
order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder;
}
#endif
doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, order);
if (pResBlock->info.rows > 0) {
@ -206,13 +205,14 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey);
} else {
pResBlock->info.scanFlag = pBlock->info.scanFlag;
pBlock->info.dataLoad = 1;
blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId);
blockDataCleanup(pInfo->pRes);
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
blockDataEnsureCapacity(pInfo->pFinalRes, pBlock->info.rows);
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
doApplyScalarCalculation(pOperator, pBlock, order, pBlock->info.scanFlag);
if (pInfo->curGroupId == 0 || (pInfo->curGroupId == pInfo->pRes->info.id.groupId)) {
if (pInfo->curGroupId == 0 && taosFillNotStarted(pInfo->pFillInfo)) {
@ -405,7 +405,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
: &((SIntervalAggOperatorInfo*)downstream->info)->interval;
int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
int32_t order = (pPhyFillNode->node.inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
int32_t type = convertFillType(pPhyFillNode->mode);
SResultInfo* pResultInfo = &pOperator->resultInfo;

View File

@ -378,9 +378,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
return buildGroupResultDataBlock(pOperator);
}
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
int32_t order = pInfo->binfo.inputTsOrder;
int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
@ -390,13 +388,10 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
break;
}
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(&pOperator->exprSupp, pBlock, order, scanFlag, true);
setInputDataBlock(&pOperator->exprSupp, pBlock, order, pBlock->info.scanFlag, true);
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->scalarSup.pExprInfo != NULL) {
@ -481,6 +476,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo);
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder;
pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder;
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo,
optrDefaultBufFn, NULL);
@ -762,6 +759,7 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
break;
}
pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->scalarSup.pExprInfo != NULL) {
pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
@ -871,7 +869,12 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
uint32_t defaultBufsz = 0;
pInfo->binfo.pRes = createDataBlockFromDescNode(pPartNode->node.pOutputDataBlockDesc);
getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
int32_t code = getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = code;
goto _error;
}
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_DISKSPACE;
@ -880,7 +883,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
goto _error;
}
int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = code;

View File

@ -253,9 +253,9 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
}
pInfo->inputOrder = TSDB_ORDER_ASC;
if (pJoinNode->inputTsOrder == ORDER_ASC) {
if (pJoinNode->node.inputTsOrder == ORDER_ASC) {
pInfo->inputOrder = TSDB_ORDER_ASC;
} else if (pJoinNode->inputTsOrder == ORDER_DESC) {
} else if (pJoinNode->node.inputTsOrder == ORDER_DESC) {
pInfo->inputOrder = TSDB_ORDER_DESC;
}
@ -684,6 +684,7 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes)
// the pDataBlock are always the same one, no need to call this again
pRes->info.rows = nrows;
pRes->info.dataLoad = 1;
pRes->info.scanFlag = MAIN_SCAN;
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
break;
}

View File

@ -93,6 +93,8 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pInfo->binfo.pRes = pResBlock;
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder;
pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder;
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
pInfo->mergeDataBlocks = false;
@ -238,8 +240,9 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
}
int64_t st = 0;
int32_t order = 0;
int32_t order = pInfo->inputTsOrder;
int32_t scanFlag = 0;
int32_t code = TSDB_CODE_SUCCESS;
if (pOperator->cost.openCost == 0) {
st = taosGetTimestampUs();
@ -284,10 +287,10 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
break;
}
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
if (pProjectInfo->mergeDataBlocks) {
pFinalRes->info.scanFlag = scanFlag = pBlock->info.scanFlag;
} else {
pRes->info.scanFlag = scanFlag = pBlock->info.scanFlag;
}
setInputDataBlock(pSup, pBlock, order, scanFlag, false);
@ -406,6 +409,8 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
}
pInfo->binfo.pRes = pResBlock;
pInfo->binfo.inputTsOrder = pNode->inputTsOrder;
pInfo->binfo.outputTsOrder = pNode->outputTsOrder;
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pSup->pCtx, numOfExpr);
setOperatorInfo(pOperator, "IndefinitOperator", QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC, false, OP_NOT_OPENED, pInfo,
@ -429,18 +434,13 @@ _error:
static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOperatorInfo* downstream,
SExecTaskInfo* pTaskInfo) {
int32_t order = 0;
int32_t scanFlag = 0;
SIndefOperatorInfo* pIndefInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pIndefInfo->binfo;
SExprSupp* pSup = &pOperator->exprSupp;
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
int32_t order = pInfo->inputTsOrder;
int32_t scanFlag = pBlock->info.scanFlag;
int32_t code = TSDB_CODE_SUCCESS;
// there is an scalar expression that needs to be calculated before apply the group aggregation.
SExprSupp* pScalarSup = &pIndefInfo->scalarSup;
@ -506,6 +506,7 @@ SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
setOperatorCompleted(pOperator);
break;
}
pInfo->pRes->info.scanFlag = pBlock->info.scanFlag;
if (pIndefInfo->groupId == 0 && pBlock->info.id.groupId != 0) {
pIndefInfo->groupId = pBlock->info.id.groupId; // this is the initial group result

View File

@ -712,6 +712,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
pOperator->cost.totalCost = pTableScanInfo->base.readRecorder.elapsedTime;
pBlock->info.scanFlag = pTableScanInfo->base.scanFlag;
return pBlock;
}
return NULL;

Some files were not shown because too many files have changed in this diff Show More