Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/3.0_mhli
This commit is contained in:
commit
1986a4019d
88
Jenkinsfile2
88
Jenkinsfile2
|
@ -38,40 +38,21 @@ def pre_test(){
|
|||
sh '''
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git remote prune origin
|
||||
git fetch
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git clean -fxd
|
||||
git remote prune origin
|
||||
git fetch
|
||||
'''
|
||||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout master
|
||||
cd ${WKC}
|
||||
git checkout master
|
||||
'''
|
||||
} else if(env.CHANGE_TARGET == '2.0') {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout 2.0
|
||||
cd ${WKC}
|
||||
git checkout 2.0
|
||||
'''
|
||||
} else if(env.CHANGE_TARGET == '3.0') {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout 3.0
|
||||
cd ${WKC}
|
||||
git checkout 3.0
|
||||
'''
|
||||
} else {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout develop
|
||||
cd ${WKC}
|
||||
git checkout develop
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
cd ${WKC}
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
'''
|
||||
}
|
||||
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
||||
sh '''
|
||||
|
@ -169,49 +150,24 @@ def pre_test_win(){
|
|||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git reset --hard
|
||||
git remote prune origin
|
||||
git fetch
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git reset --hard
|
||||
git remote prune origin
|
||||
git fetch
|
||||
'''
|
||||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git checkout master
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git checkout master
|
||||
'''
|
||||
} else if(env.CHANGE_TARGET == '2.0') {
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git checkout 2.0
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git checkout 2.0
|
||||
'''
|
||||
} else if(env.CHANGE_TARGET == '3.0') {
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git checkout 3.0
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git checkout 3.0
|
||||
'''
|
||||
} else {
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git checkout develop
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git checkout develop
|
||||
'''
|
||||
}
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
'''
|
||||
}
|
||||
script {
|
||||
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
||||
|
|
25
README-CN.md
25
README-CN.md
|
@ -50,19 +50,12 @@ TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后
|
|||
|
||||
## 安装工具
|
||||
|
||||
### Ubuntu 16.04 及以上版本 & Debian:
|
||||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
```
|
||||
|
||||
### Ubuntu 14.04:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||
export PATH=/usr/lib/binutils-2.26/bin:$PATH
|
||||
```
|
||||
|
||||
编译或打包 JDBC 驱动源码,需安装 Java JDK 8 或以上版本和 Apache Maven 2.7 或以上版本。
|
||||
|
||||
安装 OpenJDK 8:
|
||||
|
@ -89,7 +82,7 @@ taosTools 是用于 TDengine 的辅助工具软件集合。目前它包含 taosB
|
|||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7:
|
||||
### CentOS 7.9:
|
||||
|
||||
```bash
|
||||
sudo yum install -y gcc gcc-c++ make cmake git openssl-devel
|
||||
|
@ -195,7 +188,7 @@ apt install autoconf
|
|||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
```
|
||||
|
||||
在 X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
|
||||
在 X86-64、X86、arm64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 等。
|
||||
|
||||
aarch64:
|
||||
|
||||
|
@ -203,18 +196,6 @@ aarch64:
|
|||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
aarch32:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch32 && cmake --build .
|
||||
```
|
||||
|
||||
mips64:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=mips64 && cmake --build .
|
||||
```
|
||||
|
||||
### Windows 系统
|
||||
|
||||
如果你使用的是 Visual Studio 2013 版本:
|
||||
|
|
27
README.md
27
README.md
|
@ -53,19 +53,12 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
|||
|
||||
## Install build dependencies
|
||||
|
||||
### Ubuntu 16.04 and above or Debian
|
||||
### Ubuntu 18.04 and above or Debian
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
```
|
||||
|
||||
### Ubuntu 14.04
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||
export PATH=/usr/lib/binutils-2.26/bin:$PATH
|
||||
```
|
||||
|
||||
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
|
||||
|
||||
To install openjdk-8:
|
||||
|
@ -91,7 +84,7 @@ To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debia
|
|||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7
|
||||
### CentOS 7.9
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
|
@ -212,8 +205,8 @@ apt install autoconf
|
|||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
```
|
||||
|
||||
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
|
||||
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
|
||||
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform.
|
||||
You can also specify CPUTYPE option like aarch64 too if the detection result is not correct:
|
||||
|
||||
aarch64:
|
||||
|
||||
|
@ -221,18 +214,6 @@ aarch64:
|
|||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
aarch32:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch32 && cmake --build .
|
||||
```
|
||||
|
||||
mips64:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=mips64 && cmake --build .
|
||||
```
|
||||
|
||||
### On Windows platform
|
||||
|
||||
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
|
||||
|
|
|
@ -22,6 +22,9 @@ ELSEIF (TD_WINDOWS)
|
|||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
|
||||
IF (BUILD_TOOLS)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)
|
||||
|
|
|
@ -97,13 +97,13 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
ELSE ()
|
||||
# if generate ARM version:
|
||||
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
|
||||
IF (${CPUTYPE} MATCHES "aarch32")
|
||||
IF (${CPUTYPE} MATCHES "aarch32" OR ${CPUTYPE} MATCHES "arm32")
|
||||
SET(PLATFORM_ARCH_STR "arm")
|
||||
MESSAGE(STATUS "input cpuType: aarch32")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||
SET(TD_ARM_32 TRUE)
|
||||
ELSEIF (${CPUTYPE} MATCHES "aarch64")
|
||||
ELSEIF (${CPUTYPE} MATCHES "aarch64" OR ${CPUTYPE} MATCHES "arm64")
|
||||
SET(PLATFORM_ARCH_STR "arm64")
|
||||
MESSAGE(STATUS "input cpuType: aarch64")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 766dcc4
|
||||
GIT_TAG 3d21433
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 3c7dafe
|
||||
GIT_TAG 57bdfbf
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
|
||||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
|
||||
GIT_TAG 29424d5
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG 7a54d21
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -17,7 +17,6 @@ Currently, TDengine's native interface connectors can support platforms such as
|
|||
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
|
||||
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **ARM32** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **MIPS** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
|
||||
|
||||
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.
|
||||
|
|
|
@ -47,27 +47,28 @@ If the displayed content is followed by `...` you can use this command to change
|
|||
|
||||
You can change the behavior of TDengine CLI by specifying command-line parameters. The following parameters are commonly used.
|
||||
|
||||
- -h, --host=HOST: FQDN of the server where the TDengine server is to be connected. Default is to connect to the local service
|
||||
- -P, --port=PORT: Specify the port number to be used by the server. Default is `6030`
|
||||
- -u, --user=USER: the user name to use when connecting. Default is `root`
|
||||
- -p, --password=PASSWORD: the password to use when connecting to the server. Default is `taosdata`
|
||||
- -h HOST: FQDN of the server where the TDengine server is to be connected. Default is to connect to the local service
|
||||
- -P PORT: Specify the port number to be used by the server. Default is `6030`
|
||||
- -u USER: the user name to use when connecting. Default is `root`
|
||||
- -p PASSWORD: the password to use when connecting to the server. Default is `taosdata`
|
||||
- -?, --help: print out all command-line arguments
|
||||
|
||||
And many more parameters.
|
||||
|
||||
- -c, --config-dir: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg`
|
||||
- -C, --dump-config: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c
|
||||
- -d, --database=DATABASE: Specify the database to use when connecting to the server
|
||||
- -D, --directory=DIRECTORY: Import the SQL script file in the specified path
|
||||
- -f, --file=FILE: Execute the SQL script file in non-interactive mode
|
||||
- -k, --check=CHECK: Specify the table to be checked
|
||||
- -l, --pktlen=PKTLEN: Test package size to be used for network testing
|
||||
- -n, --netrole=NETROLE: test scope for network connection test, default is `startup`. The value can be `client`, `server`, `rpc`, `startup`, `sync`, `speed`, or `fqdn`.
|
||||
- -r, --raw-time: output the timestamp format as unsigned 64-bits integer (uint64_t in C language)
|
||||
- -s, --commands=COMMAND: execute SQL commands in non-interactive mode
|
||||
- -S, --pkttype=PKTTYPE: Specify the packet type used for network testing. The default is TCP, can be specified as either TCP or UDP when `speed` is specified to `netrole` parameter
|
||||
- -T, --thread=THREADNUM: The number of threads to import data in multi-threaded mode
|
||||
- -s, --commands: Run TDengine CLI commands without entering the terminal
|
||||
- -a AUTHSTR: The auth string to use when connecting to the server
|
||||
- -A: Generate auth string from password
|
||||
- -c CONFIGDIR: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg`
|
||||
- -C: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c
|
||||
- -d DATABASE: Specify the database to use when connecting to the server
|
||||
- -f FILE: Execute the SQL script file in non-interactive mode
|
||||
- -k: Check the service status, 0: unavailable,1: network ok,2: service ok,3: service degraded,4: exiting
|
||||
- -l PKTLEN: Test package length to be used for network testing
|
||||
- -n NETROLE: test scope for network connection test, default is `client`. The value can be `client`, `server`
|
||||
- -N PKTNUM: Test package numbers to be used for network testing
|
||||
- -r: output the timestamp format as unsigned 64-bits integer (uint64_t in C language)
|
||||
- -s COMMAND: execute SQL commands in non-interactive mode
|
||||
- -t: Check the details of the service status,status same as -k
|
||||
- -w DISPLAYWIDTH: 客户端列显示宽度
|
||||
- -z, --timezone=TIMEZONE: Specify time zone. Default is the value of current configuration file
|
||||
- -V, --version: Print out the current version number
|
||||
|
||||
|
|
|
@ -5,12 +5,11 @@ description: "List of platforms supported by TDengine server, client, and connec
|
|||
|
||||
## List of supported platforms for TDengine server
|
||||
|
||||
| | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** |
|
||||
| ------------ | -------------- | ------------------- | --------------- |
|
||||
| X64 | ● | ● | |
|
||||
| MIPS64 | | | ● |
|
||||
| ARM64 | | ○ | ○ |
|
||||
| Alpha64 | | | ○ |
|
||||
| | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **Other Linux** | **UOS** | **Kylin** | **Ningsi V60/V80** | **HUAWEI EulerOS** |
|
||||
| ------------------ | ----------------- | ---------------- | ---------------- | --------------- | ------- | --------- | ------------------ | ------------------ |
|
||||
| X64 | ● | ● | ● | | ● | ● | ● | |
|
||||
| Raspberry Pi ARM64 | | | | ● | | | | |
|
||||
| HUAWEI cloud ARM64 | | | | | | | | ● |
|
||||
|
||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
||||
|
||||
|
@ -20,15 +19,15 @@ TDengine's connector can support a wide range of platforms, including X64/X86/AR
|
|||
|
||||
The comparison matrix is as follows.
|
||||
|
||||
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS** | **Alpha** |
|
||||
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- |
|
||||
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- |
|
||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- |
|
||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **MIPS** | **Alpha** |
|
||||
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● | ○ | ● | ● | -- |
|
||||
| **Go** | ● | ● | ● | ○ | ● | ○ | -- |
|
||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ○ | -- |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ | ○ | -- |
|
||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ● |
|
||||
|
||||
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.
|
||||
|
|
|
@ -25,7 +25,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
|
|||
- _taosBenchmark_: TDengine testing tool
|
||||
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
|
||||
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
|
||||
- _tarbitrator_: provides arbitration for two-node cluster deployments
|
||||
- _TDinsight.sh_: script to download TDinsight and install it
|
||||
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
|
||||
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: 文档首页
|
|||
slug: /
|
||||
---
|
||||
|
||||
TDengine是一款开源、[高性能](https://www.taosdata.com/fast)、云原生的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
|
||||
TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
|
||||
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: 产品简介
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
|
||||
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
|
||||
|
||||
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
|
||||
|
||||
|
@ -33,17 +33,17 @@ TDengine的主要功能如下:
|
|||
|
||||
由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点:
|
||||
|
||||
- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
|
||||
- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
|
||||
|
||||
- **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
|
||||
- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
|
||||
|
||||
- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
|
||||
- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
|
||||
|
||||
- **分析能力**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
|
||||
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
|
||||
|
||||
- **简单易用**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
|
||||
- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
|
||||
|
||||
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
|
||||
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
|
||||
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
sidebar_label: Docker
|
||||
title: 通过 Docker 快速体验 TDengine
|
||||
---
|
||||
:::info
|
||||
如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
:::
|
||||
|
||||
本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。
|
||||
|
||||
|
@ -63,6 +66,10 @@ taos>
|
|||
|
||||
taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd` 和 `taosadapter` 两个服务组件。默认 Docker 镜像同时启动 TDengine 后台服务 taosd 和 taosAdatper。
|
||||
|
||||
```shell
|
||||
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
|
||||
```
|
||||
|
||||
可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
|
||||
|
||||
```
|
||||
|
|
|
@ -10,10 +10,12 @@ TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲
|
|||
|
||||
用户可以通过 UDF 实现两类函数: 标量函数和聚合函数。标量函数对每行数据返回一个值,如求绝对值 abs,正弦函数 sin,字符串拼接函数 concat 等。聚合函数对多行数据进行返回一个值,如求平均数 avg,最大值 max 等。
|
||||
|
||||
实现udf时,需要实现规定的接口函数。接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀(_start, _finish, _init, _destroy)的连接后。以下列表中的scalarfn,aggfn, udf需要替换成udf函数名。
|
||||
- 标量函数需要实现标量接口函数 scalarfn,
|
||||
实现 UDF 时,需要实现规定的接口函数
|
||||
- 标量函数需要实现标量接口函数 scalarfn 。
|
||||
- 聚合函数需要实现聚合接口函数 aggfn_start , aggfn , aggfn_finish。
|
||||
- 无论标量函数还是聚合函数,如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destory。
|
||||
- 如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destroy。
|
||||
|
||||
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(_start, _finish, _init, _destroy)的连接。列表中的scalarfn,aggfn, udf需要替换成udf函数名。
|
||||
|
||||
## 实现标量函数
|
||||
标量函数实现模板如下
|
||||
|
@ -102,17 +104,19 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
|||
|
||||
接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀(_start, _finish, _init, _destroy)的连接。以下描述中函数名称中的 scalarfn,aggfn, udf 需要替换成udf函数名。
|
||||
|
||||
接口函数返回值表示是否成功,如果错误返回错误代码,错误代码见taoserror.h。参数类型见数据结构定义。
|
||||
接口函数返回值表示是否成功,如果错误返回错误代码,错误代码见taoserror.h。
|
||||
|
||||
接口函数参数类型见数据结构定义。
|
||||
|
||||
### 标量接口函数
|
||||
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
其中 udf 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算。
|
||||
其中 scalarFn 是函数名的占位符。这个函数对数据块进行标量计算,通过设置resultColumn结构体中的变量设置值
|
||||
|
||||
- 其中各参数的具体含义是:
|
||||
参数的具体含义是:
|
||||
- inputDataBlock: 输入的数据块
|
||||
- resultColumn: 输出列
|
||||
- resultColumn: 输出列。输出列
|
||||
|
||||
### 聚合接口函数
|
||||
|
||||
|
@ -121,22 +125,22 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
|||
`int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)`
|
||||
|
||||
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
|
||||
其中 aggfn 是函数名的占位符。其中各参数的具体含义是:
|
||||
|
||||
其中 aggfn 是函数名的占位符。首先调用aggfn_start生成结果buffer,然后相关的数据会被分为多个行数据块,对每个数据块调用 aggfn 用数据块更新中间结果,最后再调用 aggfn_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。
|
||||
|
||||
参数的具体含义是:
|
||||
- interBuf:中间结果 buffer。
|
||||
- inputBlock:输入的数据块。
|
||||
- newInterBuf:新的中间结果buffer。
|
||||
- result:最终结果。
|
||||
|
||||
|
||||
其计算过程为:首先调用aggfn_start生成结果buffer,然后相关的数据会被分为多个行数据块,对每个行数据块调用 aggfn 用数据块更新中间结果,最后再调用 aggfn_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。
|
||||
|
||||
### UDF 初始化和销毁
|
||||
`int32_t udf_init()`
|
||||
|
||||
`int32_t udf_destroy()`
|
||||
|
||||
其中 udf 是函数名的占位符,可以替换成自己的函数名。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作,无需定义udf_init函数。如果没有清理工作,无需定义udf_destroy函数。
|
||||
其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作,无需定义udf_init函数。如果没有清理工作,无需定义udf_destroy函数。
|
||||
|
||||
|
||||
## UDF 数据结构
|
||||
|
@ -190,10 +194,10 @@ typedef struct SUdfInterBuf {
|
|||
数据结构说明如下:
|
||||
|
||||
- SUdfDataBlock 数据块包含行数 numOfRows 和列数 numCols。udfCols[i] (0 <= i <= numCols-1)表示每一列数据,类型为SUdfColumn*。
|
||||
- SUdfColumn 包含列的数据类型定义 colMeta 和列的数据colData。
|
||||
- SUdfColumn 包含列的数据类型定义 colMeta 和列的数据 colData。
|
||||
- SUdfColumnMeta 成员定义同 taos.h 数据类型定义。
|
||||
- SUdfColumnData 数据可以变长,varLenCol定义变长数据,fixLenCol定义定长数据。
|
||||
- SUdfInterBuf 定义中间结构buffer,以及buffer中结果个数 numOfResult
|
||||
- SUdfColumnData 数据可以变长,varLenCol 定义变长数据,fixLenCol 定义定长数据。
|
||||
- SUdfInterBuf 定义中间结构 buffer,以及 buffer 中结果个数 numOfResult
|
||||
|
||||
为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。
|
||||
|
||||
|
|
|
@ -3,27 +3,11 @@ sidebar_label: Kubernetes
|
|||
title: 在 Kubernetes 上部署 TDengine 集群
|
||||
---
|
||||
|
||||
## 配置 ConfigMap
|
||||
以下配置文件可以从 [GitHub 仓库](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine) 下载。
|
||||
|
||||
为 TDengine 创建 `taoscfg.yaml`,此文件中的配置将作为环境变量传入 TDengine 镜像,更新此配置将导致所有 TDengine POD 重启。
|
||||
## 配置 Service 服务
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: taoscfg
|
||||
labels:
|
||||
app: tdengine
|
||||
data:
|
||||
CLUSTER: "1"
|
||||
TAOS_KEEP: "3650"
|
||||
TAOS_DEBUG_FLAG: "135"
|
||||
```
|
||||
|
||||
## 配置服务
|
||||
|
||||
创建一个 service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
|
||||
创建一个 Service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
@ -38,45 +22,9 @@ spec:
|
|||
- name: tcp6030
|
||||
protocol: "TCP"
|
||||
port: 6030
|
||||
- name: tcp6035
|
||||
protocol: "TCP"
|
||||
port: 6035
|
||||
- name: tcp6041
|
||||
protocol: "TCP"
|
||||
port: 6041
|
||||
- name: udp6030
|
||||
protocol: "UDP"
|
||||
port: 6030
|
||||
- name: udp6031
|
||||
protocol: "UDP"
|
||||
port: 6031
|
||||
- name: udp6032
|
||||
protocol: "UDP"
|
||||
port: 6032
|
||||
- name: udp6033
|
||||
protocol: "UDP"
|
||||
port: 6033
|
||||
- name: udp6034
|
||||
protocol: "UDP"
|
||||
port: 6034
|
||||
- name: udp6035
|
||||
protocol: "UDP"
|
||||
port: 6035
|
||||
- name: udp6036
|
||||
protocol: "UDP"
|
||||
port: 6036
|
||||
- name: udp6037
|
||||
protocol: "UDP"
|
||||
port: 6037
|
||||
- name: udp6038
|
||||
protocol: "UDP"
|
||||
port: 6038
|
||||
- name: udp6039
|
||||
protocol: "UDP"
|
||||
port: 6039
|
||||
- name: udp6040
|
||||
protocol: "UDP"
|
||||
port: 6040
|
||||
selector:
|
||||
app: "tdengine"
|
||||
```
|
||||
|
@ -109,7 +57,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: "tdengine"
|
||||
image: "zitsen/taosd:develop"
|
||||
image: "tdengine/tdengine:3.0.0.0"
|
||||
imagePullPolicy: "Always"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
|
@ -118,45 +66,9 @@ spec:
|
|||
- name: tcp6030
|
||||
protocol: "TCP"
|
||||
containerPort: 6030
|
||||
- name: tcp6035
|
||||
protocol: "TCP"
|
||||
containerPort: 6035
|
||||
- name: tcp6041
|
||||
protocol: "TCP"
|
||||
containerPort: 6041
|
||||
- name: udp6030
|
||||
protocol: "UDP"
|
||||
containerPort: 6030
|
||||
- name: udp6031
|
||||
protocol: "UDP"
|
||||
containerPort: 6031
|
||||
- name: udp6032
|
||||
protocol: "UDP"
|
||||
containerPort: 6032
|
||||
- name: udp6033
|
||||
protocol: "UDP"
|
||||
containerPort: 6033
|
||||
- name: udp6034
|
||||
protocol: "UDP"
|
||||
containerPort: 6034
|
||||
- name: udp6035
|
||||
protocol: "UDP"
|
||||
containerPort: 6035
|
||||
- name: udp6036
|
||||
protocol: "UDP"
|
||||
containerPort: 6036
|
||||
- name: udp6037
|
||||
protocol: "UDP"
|
||||
containerPort: 6037
|
||||
- name: udp6038
|
||||
protocol: "UDP"
|
||||
containerPort: 6038
|
||||
- name: udp6039
|
||||
protocol: "UDP"
|
||||
containerPort: 6039
|
||||
- name: udp6040
|
||||
protocol: "UDP"
|
||||
containerPort: 6040
|
||||
env:
|
||||
# POD_NAME for FQDN config
|
||||
- name: POD_NAME
|
||||
|
@ -190,14 +102,13 @@ spec:
|
|||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- taos
|
||||
- -s
|
||||
- "show mnodes"
|
||||
- taos-check
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5000
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 6030
|
||||
exec:
|
||||
command:
|
||||
- taos-check
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
volumeClaimTemplates:
|
||||
|
@ -206,44 +117,78 @@ spec:
|
|||
spec:
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "csi-rbd-sc"
|
||||
storageClassName: "standard"
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
## 启动集群
|
||||
## 使用 kubectl 命令部署 TDengine 集群
|
||||
|
||||
将前述三个文件添加到 Kubernetes 集群中:
|
||||
顺序执行以下命令。
|
||||
|
||||
```bash
|
||||
kubectl apply -f taoscfg.yaml
|
||||
kubectl apply -f taosd-service.yaml
|
||||
kubectl apply -f tdengine.yaml
|
||||
|
||||
```
|
||||
|
||||
上面的配置将生成一个两节点的 TDengine 集群,dnode 是自动配置的,可以使用 `show dnodes` 命令查看当前集群的节点:
|
||||
上面的配置将生成一个三节点的 TDengine 集群,dnode 是自动配置的,可以使用 show dnodes 命令查看当前集群的节点:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
||||
kubectl exec -i -t tdengine-1 -- taos -s "show dnodes"
|
||||
|
||||
kubectl exec -i -t tdengine-2 -- taos -s "show dnodes"
|
||||
```
|
||||
|
||||
输出如下:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 17:13:24.181 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 17:14:09.257 | |
|
||||
Query OK, 2 row(s) in set (0.000997s)
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
||||
Query OK, 3 rows in database (0.003655s)
|
||||
```
|
||||
|
||||
## 使能端口转发
|
||||
|
||||
利用 kubectl 端口转发功能可以使应用可以访问 Kubernetes 环境运行的 TDengine 集群。
|
||||
|
||||
```
|
||||
kubectl port-forward tdengine-0 6041:6041 &
|
||||
```
|
||||
|
||||
使用 curl 命令验证 TDengine REST API 使用的 6041 接口。
|
||||
|
||||
```
|
||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||
Handling connection for 6041
|
||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
||||
```
|
||||
|
||||
## 使用 dashboard 进行图形化管理
|
||||
|
||||
minikube 提供 dashboard 命令支持图形化管理界面。
|
||||
|
||||
```
|
||||
$ minikube dashboard
|
||||
* Verifying dashboard health ...
|
||||
* Launching proxy ...
|
||||
* Verifying proxy health ...
|
||||
* Opening http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser...
|
||||
http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
|
||||
```
|
||||
|
||||
对于某些公有云环境,minikube 绑定在 127.0.0.1 IP 地址上无法通过远程访问,需要使用 kubectl proxy 命令将端口映射到 0.0.0.0 IP 地址上,再通过浏览器访问虚拟机公网 IP 和端口以及相同的 dashboard URL 路径即可远程访问 dashboard。
|
||||
|
||||
```
|
||||
$ kubectl proxy --accept-hosts='^.*$' --address='0.0.0.0'
|
||||
```
|
||||
|
||||
## 集群扩容
|
||||
|
@ -252,14 +197,12 @@ TDengine 集群支持自动扩容:
|
|||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=4
|
||||
|
||||
```
|
||||
|
||||
上面命令行中参数 `--replica=4` 表示要将 TDengine 集群扩容到 4 个节点,执行后首先检查 POD 的状态:
|
||||
|
||||
```bash
|
||||
kubectl get pods -l app=tdengine
|
||||
|
||||
```
|
||||
|
||||
输出如下:
|
||||
|
@ -270,102 +213,112 @@ tdengine-0 1/1 Running 0 161m
|
|||
tdengine-1 1/1 Running 0 161m
|
||||
tdengine-2 1/1 Running 0 32m
|
||||
tdengine-3 1/1 Running 0 32m
|
||||
|
||||
```
|
||||
|
||||
此时 POD 的状态仍然是 Running,TDengine 集群中的 dnode 状态要等 POD 状态为 `ready` 之后才能看到:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
kubectl exec -i -t tdengine-3 -- taos -s "show dnodes"
|
||||
```
|
||||
|
||||
扩容后的四节点 TDengine 集群的 dnode 列表:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 14:07:27.078 | |
|
||||
4 | tdengine-3.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 14:07:48.362 | |
|
||||
Query OK, 4 row(s) in set (0.001293s)
|
||||
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
||||
4 | tdengine-3.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:33:16.039 | |
|
||||
Query OK, 4 rows in database (0.008377s)
|
||||
```
|
||||
|
||||
## 集群缩容
|
||||
|
||||
TDengine 的缩容并没有自动化,我们尝试将一个三节点集群缩容到两节点。
|
||||
由于 TDengine 集群在扩缩容时会对数据进行节点间迁移,使用 kubectl 命令进行缩容需要首先使用 "drop dnodes" 命令,节点删除完成后再进行 Kubernetes 集群缩容。
|
||||
|
||||
首先,确认一个三节点 TDengine 集群正常工作,在 TDengine CLI 中查看 dnode 的状态:
|
||||
注意:由于 Kubernetes Statefulset 中 Pod 的只能按创建顺序逆序移除,所以 TDengine drop dnode 也需要按照创建顺序逆序移除,否则会导致 Pod 处于错误状态。
|
||||
|
||||
```
|
||||
$ kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 4"
|
||||
```
|
||||
|
||||
```bash
|
||||
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:28:49.787 | |
|
||||
Query OK, 3 row(s) in set (0.001101s)
|
||||
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
||||
Query OK, 3 rows in database (0.004861s)
|
||||
```
|
||||
|
||||
想要安全的缩容,首先需要将节点从 dnode 列表中移除,也即从集群中移除:
|
||||
确认移除成功后(使用 kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" 查看和确认 dnode 列表),使用 kubectl 命令移除 POD:
|
||||
|
||||
```
|
||||
kubectl scale statefulsets tdengine --replicas=3
|
||||
```
|
||||
|
||||
最后一个 POD 将会被删除。使用命令 kubectl get pods -l app=tdengine 查看POD状态:
|
||||
|
||||
```
|
||||
$ kubectl get pods -l app=tdengine
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 4m7s
|
||||
tdengine-1 1/1 Running 0 3m55s
|
||||
tdengine-2 1/1 Running 0 2m28s
|
||||
```
|
||||
|
||||
POD删除后,需要手动删除PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 'tdengine-2.taosd.default.svc.cluster.local:6030'"
|
||||
|
||||
```
|
||||
|
||||
通过 `show dondes` 命令确认移除成功后,移除相应的 POD:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=2
|
||||
|
||||
```
|
||||
|
||||
最后一个 POD 会被删除,使用 `kubectl get pods -l app=tdengine` 查看集群状态:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 3h40m
|
||||
tdengine-1 1/1 Running 0 3h40m
|
||||
|
||||
```
|
||||
|
||||
POD 删除后,需要手动删除 PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
|
||||
|
||||
```bash
|
||||
kubectl delete pvc taosdata-tdengine-2
|
||||
|
||||
$ kubectl delete pvc taosdata-tdengine-3
|
||||
```
|
||||
|
||||
此时的集群状态是安全的,需要时还可以再次进行扩容:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=3
|
||||
$ kubectl scale statefulsets tdengine --replicas=4
|
||||
statefulset.apps/tdengine scaled
|
||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 35m
|
||||
tdengine-1 1/1 Running 0 34m
|
||||
tdengine-2 1/1 Running 0 12m
|
||||
tdengine-3 0/1 ContainerCreating 0 4s
|
||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 35m
|
||||
tdengine-1 1/1 Running 0 34m
|
||||
tdengine-2 1/1 Running 0 12m
|
||||
tdengine-3 0/1 Running 0 7s
|
||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
|
||||
```
|
||||
|
||||
`show dnodes` 输出如下:
|
||||
|
||||
```
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
|
||||
4 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:40:49.177 | |
|
||||
|
||||
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
|
||||
5 | tdengine-2.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:01:36.479 | |
|
||||
6 | tdengine-3.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:13:54.411 | |
|
||||
Query OK, 4 row(s) in set (0.001348s)
|
||||
```
|
||||
|
||||
## 删除集群
|
||||
## 清理 TDengine 集群
|
||||
|
||||
完整移除 TDengine 集群,需要分别清理 statefulset、svc、configmap、pvc。
|
||||
|
||||
|
@ -381,26 +334,26 @@ kubectl delete configmap taoscfg
|
|||
|
||||
### 错误一
|
||||
|
||||
扩容到四节点之后缩容到两节点,删除的 POD 会进入 offline 状态:
|
||||
未进行 "drop dnode" 直接进行缩容,由于 TDengine 尚未删除节点,缩容 pod 导致 TDengine 集群中部分节点处于 offline 状态。
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | offline | any | 2021-06-01 14:07:27.078 | status msg timeout |
|
||||
4 | tdengine-3.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 14:07:48.362 | status msg timeout |
|
||||
Query OK, 4 row(s) in set (0.001236s)
|
||||
|
||||
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
|
||||
5 | tdengine-2.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:01:36.479 | status msg timeout |
|
||||
6 | tdengine-3.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:13:54.411 | status msg timeout |
|
||||
Query OK, 4 row(s) in set (0.001323s)
|
||||
```
|
||||
|
||||
但 `drop dnode` 的行为按不会按照预期进行,且下次集群重启后,所有的 dnode 节点将无法启动 dropping 状态无法退出。
|
||||
|
||||
### 错误二
|
||||
|
||||
TDengine 集群会持有 replica 参数,如果缩容后的节点数小于这个值,集群将无法使用:
|
||||
|
|
|
@ -22,7 +22,7 @@ Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes,可以参
|
|||
TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载:
|
||||
|
||||
```bash
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/main/helm/tdengine-0.3.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
|
||||
|
||||
```
|
||||
|
||||
|
@ -38,7 +38,7 @@ kubectl get storageclass
|
|||
之后,使用 helm 命令安装:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz \
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
--set storage.className=<your storage class name>
|
||||
|
||||
```
|
||||
|
@ -46,7 +46,7 @@ helm install tdengine tdengine-0.3.0.tgz \
|
|||
在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz \
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
--set storage.className=standard \
|
||||
--set storage.dataSize=2Gi \
|
||||
--set storage.logSize=10Mi
|
||||
|
@ -83,14 +83,14 @@ TDengine 支持 `values.yaml` 自定义。
|
|||
通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表:
|
||||
|
||||
```bash
|
||||
helm show values tdengine-0.3.0.tgz
|
||||
helm show values tdengine-3.0.0.tgz
|
||||
|
||||
```
|
||||
|
||||
你可以将结果保存为 values.yaml,之后可以修改其中的各项参数,如 replica 数量,存储类名称,容量大小,TDengine 配置等,然后使用如下命令安装 TDengine 集群:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
|
||||
|
||||
```
|
||||
|
||||
|
@ -107,37 +107,17 @@ image:
|
|||
prefix: tdengine/tdengine
|
||||
#pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
#tag: "2.4.0.5"
|
||||
# tag: "3.0.0.0"
|
||||
|
||||
service:
|
||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||
type: ClusterIP
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp:
|
||||
[
|
||||
6030,
|
||||
6031,
|
||||
6032,
|
||||
6033,
|
||||
6034,
|
||||
6035,
|
||||
6036,
|
||||
6037,
|
||||
6038,
|
||||
6039,
|
||||
6040,
|
||||
6041,
|
||||
6042,
|
||||
6043,
|
||||
6044,
|
||||
6045,
|
||||
6060,
|
||||
]
|
||||
# UDP range 6030-6039
|
||||
udp: [6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039]
|
||||
tcp: [6030, 6041, 6042, 6043, 6044, 6046, 6047, 6048, 6049, 6060]
|
||||
# UDP range
|
||||
udp: [6044, 6045]
|
||||
|
||||
arbitrator: true
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
@ -182,11 +162,14 @@ clusterDomainSuffix: ""
|
|||
#
|
||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||
taoscfg:
|
||||
# Starts as cluster or not, must be 0 or 1.
|
||||
# 0: all pods will start as a seperate TDengine server
|
||||
# 1: pods will start as TDengine server cluster. [default]
|
||||
CLUSTER: "1"
|
||||
|
||||
# number of replications, for cluster only
|
||||
TAOS_REPLICA: "1"
|
||||
|
||||
# number of management nodes in the system
|
||||
TAOS_NUM_OF_MNODES: "1"
|
||||
|
||||
# number of days per DB file
|
||||
# TAOS_DAYS: "10"
|
||||
|
@ -422,7 +405,7 @@ kubectl --namespace default exec $POD_NAME -- taos -s 'drop dnode "<you dnode in
|
|||
|
||||
```
|
||||
|
||||
## 删除集群
|
||||
## 清理集群
|
||||
|
||||
Helm 管理下,清理操作也变得简单:
|
||||
|
||||
|
|
|
@ -89,10 +89,6 @@ T = 最新事件时间 - watermark
|
|||
|
||||
无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。
|
||||
|
||||
## 流式计算的数据填充策略
|
||||
|
||||
TODO
|
||||
|
||||
## 流式计算与会话窗口(session window)
|
||||
|
||||
```sql
|
||||
|
@ -105,14 +101,6 @@ window_clause: {
|
|||
|
||||
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
|
||||
|
||||
## 流式计算的监控与流任务分布查询
|
||||
|
||||
TODO
|
||||
|
||||
## 流式计算的内存控制与存算分离
|
||||
|
||||
TODO
|
||||
|
||||
## 流式计算的暂停与恢复
|
||||
|
||||
```sql
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: 元数据库
|
||||
title: 元数据库
|
||||
sidebar_label: 元数据
|
||||
title: 存储元数据的 Information_Schema 数据库
|
||||
---
|
||||
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
---
|
||||
sidebar_label: 统计数据
|
||||
title: 存储统计数据的 Performance_Schema 数据库
|
||||
---
|
||||
|
||||
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。
|
||||
|
||||
## PERF_APP
|
||||
|
||||
提供接入集群的应用(客户端)的相关信息。也可以使用 SHOW APPS 来查询这些信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | ------------------------------- |
|
||||
| 1 | app_id | UBIGINT | 客户端 ID |
|
||||
| 2 | ip | BINARY(16) | 客户端地址 |
|
||||
| 3 | pid | INT | 客户端进程 号 |
|
||||
| 4 | name | BINARY(24) | 客户端名称 |
|
||||
| 5 | start_time | TIMESTAMP | 客户端启动时间 |
|
||||
| 6 | insert_req | UBIGINT | insert 请求次数 |
|
||||
| 7 | insert_row | UBIGINT | insert 插入行数 |
|
||||
| 8 | insert_time | UBIGINT | insert 请求的处理时间,单位微秒 |
|
||||
| 9 | insert_bytes | UBIGINT | insert 请求消息字节数 |
|
||||
| 10 | fetch_bytes | UBIGINT | 查询结果字节数 |
|
||||
| 11 | query_time | UBIGINT | 查询请求处理时间 |
|
||||
| 12 | slow_query | UBIGINT | 慢查询(处理时间 >= 3 秒)个数 |
|
||||
| 13 | total_req | UBIGINT | 总请求数 |
|
||||
| 14 | current_req | UBIGINT | 当前正在处理的请求个数 |
|
||||
| 15 | last_access | TIMESTAMP | 最后更新时间 |
|
||||
|
||||
## PERF_CONNECTIONS
|
||||
|
||||
数据库的连接的相关信息。也可以使用 SHOW CONNECTIONS 来查询这些信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
||||
| 1 | conn_id | INT | 连接 ID |
|
||||
| 2 | user | BINARY(24) | 用户名 |
|
||||
| 3 | app | BINARY(24) | 客户端名称 |
|
||||
| 4 | pid | UINT | 发起此连接的客户端在自己所在服务器或主机上的进程号 |
|
||||
| 5 | end_point | BINARY(128) | 客户端地址 |
|
||||
| 6 | login_time | TIMESTAMP | 登录时间 |
|
||||
| 7 | last_access | TIMESTAMP | 最后更新时间 |
|
||||
|
||||
## PERF_QUERIES
|
||||
|
||||
提供当前正在执行的 SQL 语句的信息。也可以使用 SHOW QUERIES 来查询这些信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | ---------------------------- |
|
||||
| 1 | kill_id | UBIGINT | 用来停止查询的 ID |
|
||||
| 2 | query_id | INT | 查询 ID |
|
||||
| 3 | conn_id | UINT | 连接 ID |
|
||||
| 4 | app | BINARY(24) | app 名称 |
|
||||
| 5 | pid | INT | app 在自己所在主机上的进程号 |
|
||||
| 6 | user | BINARY(24) | 用户名 |
|
||||
| 7 | end_point | BINARY(16) | 客户端地址 |
|
||||
| 8 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 9 | exec_usec | BIGINT | 已执行时间 |
|
||||
| 10 | stable_query | BOOL | 是否是超级表查询 |
|
||||
| 11 | sub_num | INT | 子查询数量 |
|
||||
| 12 | sub_status | BINARY(1000) | 子查询状态 |
|
||||
| 13 | sql | BINARY(1024) | SQL 语句 |
|
||||
|
||||
## PERF_TOPICS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | topic 名称 |
|
||||
| 2 | db_name | BINARY(64) | topic 相关的 DB |
|
||||
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
|
||||
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
|
||||
|
||||
## PERF_CONSUMERS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ----------------------------------------------------------- |
|
||||
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
|
||||
| 2 | consumer_group | BINARY(192) | 消费者组 |
|
||||
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
|
||||
| 4 | status | BINARY(20) | 消费者当前状态 |
|
||||
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic,则展示为多行 |
|
||||
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
|
||||
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
|
||||
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
|
||||
|
||||
## PERF_SUBSCRIPTIONS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
|
||||
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
|
||||
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
||||
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
||||
|
||||
## PERF_TRANS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :--------------: | ------------ | -------- |
|
||||
| 1 | id | INT | |
|
||||
| 2 | create_time | TIMESTAMP | |
|
||||
| 3 | stage | BINARY(12) | |
|
||||
| 4 | db1 | BINARY(64) | |
|
||||
| 5 | db2 | BINARY(64) | |
|
||||
| 6 | failed_times | INT | |
|
||||
| 7 | last_exec_time | TIMESTAMP | |
|
||||
| 8 | last_action_info | BINARY(511) | |
|
||||
|
||||
## PERF_SMAS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------------------------------- |
|
||||
| 1 | sma_name | BINARY(192) | 时间维度的预计算 (time-range-wise sma) 名称 |
|
||||
| 2 | create_time | TIMESTAMP | sma 创建时间 |
|
||||
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
|
||||
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
|
||||
|
||||
## PERF_STREAMS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | --------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | 流计算名称 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
|
||||
| 4 | status | BIANRY(20) | 流当前状态 |
|
||||
| 5 | source_db | BINARY(64) | 源数据库 |
|
||||
| 6 | target_db | BIANRY(64) | 目的数据库 |
|
||||
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
|
|
@ -5,11 +5,12 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ
|
|||
|
||||
本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。
|
||||
|
||||
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。
|
||||
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
|
||||
|
||||
本章节 SQL 语法遵循如下约定:
|
||||
|
||||
- <\> 里的内容是用户需要输入的,但不要输入 <\> 本身
|
||||
- 用大写字母表示关键字,但 SQL 本身并不区分关键字和标识符的大小写
|
||||
- 用小写字母表示需要用户输入的内容
|
||||
- \[ \] 表示内容为可选项,但不能输入 [] 本身
|
||||
- | 表示多选一,选择其中一个即可,但不能输入 | 本身
|
||||
- … 表示前面的项可重复多个
|
||||
|
|
|
@ -10,14 +10,13 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
|
||||
目前 TDengine 的原生接口连接器可支持的平台包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。对照矩阵如下:
|
||||
|
||||
| **CPU** | **OS** | **JDBC** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||
| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
|
||||
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
|
||||
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **ARM32** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **MIPS 龙芯** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
|
||||
| **Alpha 申威** | **Linux** | ○ | ○ | -- | -- | -- | -- | ○ |
|
||||
| **X86 海光** | **Linux** | ○ | ○ | ○ | -- | -- | -- | ○ |
|
||||
|
@ -32,6 +31,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
|
||||
| **TDengine 版本** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||
| --------------------- | -------- | ---------- | ------------ | ------------- | --------------- | -------- |
|
||||
| **3.0.0.0 及以上** | 3.0.0 | 当前版本 | 3.0 分支 | 3.0.0 | 3.0.0 | 当前版本 |
|
||||
| **2.4.0.14 及以上** | 2.0.38 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
|
||||
| **2.4.0.6 及以上** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
|
||||
| **2.4.0.4 - 2.4.0.5** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 |
|
||||
|
@ -48,9 +48,8 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
|
||||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **连续查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **订阅功能** | 支持 | 支持 | 支持 | 支持 | 支持 | 暂不支持 |
|
||||
| ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
||||
|
@ -58,17 +57,17 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
|
||||
:::
|
||||
|
||||
### 使用 REST 接口
|
||||
### 使用 http (REST 或 WebSocket) 接口
|
||||
|
||||
| **功能特性** | **Java** | **Python** | **Go** | **C#(暂不支持)** | **Node.js** | **Rust** |
|
||||
| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
|
||||
| **连接管理** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
||||
| **连续查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
|
||||
| **参数绑定** | 不支持 | 不支持 | 不支持 | N/A | 不支持 | 不支持 |
|
||||
| **订阅功能** | 不支持 | 不支持 | 不支持 | N/A | 不支持 | 不支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | N/A | 暂不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 暂不支持 | 暂不支持 | N/A | 暂不支持 | 暂不支持 |
|
||||
| **参数绑定** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
||||
| ** TMQ ** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | N/A | 不支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | N/A | 不支持 | 不支持 |
|
||||
|
||||
:::warning
|
||||
|
|
|
@ -279,7 +279,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
2. 调用 `taos_stmt_prepare()` 解析 INSERT 语句;
|
||||
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname()` 来设置表名;
|
||||
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags()` 来设置表名和 TAGS 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch()` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch()` 以多行的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值;
|
||||
6. 调用 `taos_stmt_add_batch()` 把当前绑定的参数加入批处理;
|
||||
7. 可以重复第 3 ~ 6 步,为批处理加入更多的数据行;
|
||||
8. 调用 `taos_stmt_execute()` 执行已经准备好的批处理指令;
|
||||
|
|
|
@ -9,7 +9,7 @@ description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。
|
||||
`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例。REST 连接实现的功能集合和原生连接有少量不同。
|
||||
|
||||

|
||||
|
||||
|
@ -41,19 +41,19 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
||||
|
||||
| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType (driver 版本 >= 2.0.24) |
|
||||
| ----------------- | --------------------------------- | ---------------------------------- |
|
||||
| TIMESTAMP | java.lang.Long | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long | java.lang.Long |
|
||||
| FLOAT | java.lang.Float | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean | java.lang.Boolean |
|
||||
| BINARY | java.lang.String | byte array |
|
||||
| NCHAR | java.lang.String | java.lang.String |
|
||||
| JSON | - | java.lang.String |
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ---------------------------------- |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
|
||||
**注意**:JSON 类型仅在 tag 中支持。
|
||||
|
||||
|
@ -198,7 +198,7 @@ url 中的配置参数如下:
|
|||
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
|
||||
|
@ -216,7 +216,7 @@ url 中的配置参数如下:
|
|||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
||||
```
|
||||
|
||||
- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
|
||||
- 从 taos-jdbcdriver-2.0.36 开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
|
||||
|
||||
:::
|
||||
|
||||
|
@ -358,11 +358,11 @@ JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错
|
|||
具体的错误码请参考:
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h)
|
||||
- [TDengine_ERROR_CODE](../error-code)
|
||||
|
||||
### 通过参数绑定写入数据
|
||||
|
||||
从 2.1.2.0 版本开始,TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
|
||||
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
|
||||
|
||||
**注意**:
|
||||
|
||||
|
@ -630,7 +630,7 @@ public void setNString(int columnIndex, ArrayList<String> list, int size) throws
|
|||
|
||||
### 无模式写入
|
||||
|
||||
从 2.2.0.0 版本开始,TDengine 增加了对无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](/reference/schemaless/)。
|
||||
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。
|
||||
|
||||
**注意**:
|
||||
|
||||
|
@ -670,55 +670,127 @@ public class SchemalessInsertTest {
|
|||
|
||||
TDengine Java 连接器支持订阅功能,应用 API 如下:
|
||||
|
||||
#### 创建订阅
|
||||
#### 创建 Topic
|
||||
|
||||
```java
|
||||
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
|
||||
Connection connection = DriverManager.getConnection(url, properties);
|
||||
Statement statement = connection.createStatement();
|
||||
statement.executeUpdate("create topic if not exists topic_speed as select ts, speed from speed_table");
|
||||
```
|
||||
|
||||
`subscribe` 方法的三个参数含义如下:
|
||||
|
||||
- topic:订阅的主题(即名称),此参数是订阅的唯一标识
|
||||
- sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
|
||||
- restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
|
||||
- topic_speed:订阅的主题(即名称),此参数是订阅的唯一标识。
|
||||
- sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据。
|
||||
|
||||
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
|
||||
如上面的例子将使用 SQL 语句 `select ts, speed from speed_table` 创建一个名为 `topic_speed` 的订阅。
|
||||
|
||||
#### 创建 Consumer
|
||||
|
||||
```java
|
||||
Properties config = new Properties();
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||
|
||||
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||
```
|
||||
|
||||
- enable.auto.commit: 是否允许自动提交。
|
||||
- group.id: consumer: 所在的 group。
|
||||
- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。
|
||||
- 其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
```java
|
||||
int total = 0;
|
||||
while(true) {
|
||||
TSDBResultSet rs = sub.consume();
|
||||
int count = 0;
|
||||
while(rs.next()) {
|
||||
count++;
|
||||
}
|
||||
total += count;
|
||||
System.out.printf("%d rows consumed, total %d\n", count, total);
|
||||
Thread.sleep(1000);
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
|
||||
`poll` 方法返回一个结果集,其中包含从上次 `poll` 到目前为止的所有新数据。请务必按需选择合理的调用 `poll` 的频率(如例子中的 `Duration.ofMillis(100)`),否则会给服务端造成不必要的压力。
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
sub.close(true);
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
|
||||
|
||||
### 关闭资源
|
||||
### 使用示例如下:
|
||||
|
||||
```java
|
||||
resultSet.close();
|
||||
stmt.close();
|
||||
conn.close();
|
||||
```
|
||||
public abstract class ConsumerLoop {
|
||||
private final TaosConsumer<ResultBean> consumer;
|
||||
private final List<String> topics;
|
||||
private final AtomicBoolean shutdown;
|
||||
private final CountDownLatch shutdownLatch;
|
||||
|
||||
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
this.shutdown = new AtomicBoolean(false);
|
||||
this.shutdownLatch = new CountDownLatch(1);
|
||||
}
|
||||
|
||||
public abstract void process(ResultBean result);
|
||||
|
||||
public void pollData() throws SQLException {
|
||||
try {
|
||||
consumer.subscribe(topics);
|
||||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
consumer.close();
|
||||
shutdownLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() throws InterruptedException {
|
||||
shutdown.set(true);
|
||||
shutdownLatch.await();
|
||||
}
|
||||
|
||||
static class ResultDeserializer extends ReferenceDeserializer<ResultBean> {
|
||||
|
||||
}
|
||||
|
||||
static class ResultBean {
|
||||
private Timestamp ts;
|
||||
private int speed;
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public int getSpeed() {
|
||||
return speed;
|
||||
}
|
||||
|
||||
public void setSpeed(int speed) {
|
||||
this.speed = speed;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 与连接池使用
|
||||
|
||||
|
@ -787,20 +859,6 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。
|
||||
|
||||
**注意事项:**
|
||||
|
||||
- TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
|
||||
|
||||
如下所示,`select server_status()` 执行成功会返回 `1`。
|
||||
|
||||
```sql
|
||||
taos> select server_status();
|
||||
server_status()|
|
||||
================
|
||||
1 |
|
||||
Query OK, 1 row(s) in set (0.000141s)
|
||||
```
|
||||
|
||||
### 更多示例程序
|
||||
|
||||
示例程序源码位于 `TDengine/examples/JDBC` 下:
|
||||
|
@ -811,7 +869,7 @@ Query OK, 1 row(s) in set (0.000141s)
|
|||
- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。
|
||||
- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。
|
||||
|
||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC)
|
||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
## 最近更新记录
|
||||
|
||||
|
@ -842,7 +900,7 @@ Query OK, 1 row(s) in set (0.000141s)
|
|||
|
||||
**解决方法**:重新安装 64 位 JDK。
|
||||
|
||||
4. 其它问题请参考 [FAQ](/train-faq/faq)
|
||||
4. 其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -306,8 +306,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 |
|
||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 |
|
||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 |
|
||||
| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | 异步订阅 |
|
||||
| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | 同步订阅 |
|
||||
| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | tmq 订阅 |
|
||||
|
||||
## 其它说明
|
||||
|
||||
|
@ -326,23 +325,15 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
|
||||
2. https://www.python.org/dev/peps/pep-0564/
|
||||
|
||||
|
||||
## 常见问题
|
||||
|
||||
欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。
|
||||
|
||||
## 重要更新
|
||||
|
||||
| 连接器版本 | 重要更新 | 发布日期 |
|
||||
| ---------- | --------------------------------------------------------------------------------- | ---------- |
|
||||
| 2.3.1 | 1. support TDengine REST API <br/> 2. remove support for Python version below 3.6 | 2022-04-28 |
|
||||
| 2.2.5 | support timezone option when connect | 2022-04-13 |
|
||||
| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 |
|
||||
|
||||
|
||||
[**Release Notes**](https://github.com/taosdata/taos-connector-python/releases)
|
||||
|
||||
## API 参考
|
||||
|
||||
- [taos](https://docs.taosdata.com/api/taospy/taos/)
|
||||
- [taosrest](https://docs.taosdata.com/api/taospy/taosrest)
|
||||
|
||||
## 常见问题
|
||||
|
||||
欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。
|
||||
|
|
|
@ -48,29 +48,30 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
|||
|
||||
您可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数:
|
||||
|
||||
- -h, --host=HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
|
||||
- -P, --port=PORT: 指定服务端所用端口号
|
||||
- -u, --user=USER: 连接时使用的用户名
|
||||
- -p, --password=PASSWORD: 连接服务端时使用的密码
|
||||
- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
|
||||
- -P PORT: 指定服务端所用端口号
|
||||
- -u USER: 连接时使用的用户名
|
||||
- -p PASSWORD: 连接服务端时使用的密码
|
||||
- -?, --help: 打印出所有命令行参数
|
||||
|
||||
还有更多其他参数:
|
||||
|
||||
- -c, --config-dir: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg`
|
||||
- -C, --dump-config: 打印 -c 指定的目录中 `taos.cfg` 的配置参数
|
||||
- -d, --database=DATABASE: 指定连接到服务端时使用的数据库
|
||||
- -D, --directory=DIRECTORY: 导入指定路径中的 SQL 脚本文件
|
||||
- -f, --file=FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行
|
||||
- -k, --check=CHECK: 指定要检查的表
|
||||
- -l, --pktlen=PKTLEN: 网络测试时使用的测试包大小
|
||||
- -n, --netrole=NETROLE: 网络连接测试时的测试范围,默认为 `startup`, 可选值为 `client`、`server`、`rpc`、`startup`、`sync`、`speed` 和 `fqdn` 之一
|
||||
- -r, --raw-time: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
|
||||
- -s, --commands=COMMAND: 以非交互模式执行的 SQL 命令
|
||||
- -S, --pkttype=PKTTYPE: 指定网络测试所用的包类型,默认为 TCP。只有 netrole 为 `speed` 时既可以指定为 TCP 也可以指定为 UDP
|
||||
- -T, --thread=THREADNUM: 以多线程模式导入数据时的线程数
|
||||
- -s, --commands: 在不进入终端的情况下运行 TDengine 命令
|
||||
- -z, --timezone=TIMEZONE: 指定时区,默认为本地时区
|
||||
- -V, --version: 打印出当前版本号
|
||||
- -a AUTHSTR: 连接服务端的授权信息
|
||||
- -A: 通过用户名和密码计算授权信息
|
||||
- -c CONFIGDIR: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg`
|
||||
- -C: 打印 -c 指定的目录中 `taos.cfg` 的配置参数
|
||||
- -d DATABASE: 指定连接到服务端时使用的数据库
|
||||
- -f FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行
|
||||
- -k: 测试服务端运行状态,0: unavailable,1: network ok,2: service ok,3: service degraded,4: exiting
|
||||
- -l PKTLEN: 网络测试时使用的测试包大小
|
||||
- -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server`
|
||||
- -N PKTNUM: 网络测试时使用的测试包数量
|
||||
- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
|
||||
- -s COMMAND: 以非交互模式执行的 SQL 命令
|
||||
- -t: 测试服务端启动状态,状态同-k
|
||||
- -w DISPLAYWIDTH: 客户端列显示宽度
|
||||
- -z TIMEZONE: 指定时区,默认为本地时区
|
||||
- -V: 打印出当前版本号
|
||||
|
||||
示例:
|
||||
|
||||
|
|
|
@ -5,18 +5,11 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
|
|||
|
||||
## TDengine 服务端支持的平台列表
|
||||
|
||||
| | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
||||
| ------------ | -------------- | ------------------- | --------------- | ------------ | ----------------- | ---------------- | ---------------- |
|
||||
| X64 | ● | ● | | ○ | ● | ● | ● |
|
||||
| 龙芯 MIPS64 | | | ● | | | | |
|
||||
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
|
||||
| 申威 Alpha64 | | | ○ | ● | | | |
|
||||
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
|
||||
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
|
||||
| 瑞芯微 ARM64 | | | ○ | | | | |
|
||||
| 全志 ARM64 | | | ○ | | | | |
|
||||
| 炬力 ARM64 | | | ○ | | | | |
|
||||
| 华为云 ARM64 | | | | | | | ● |
|
||||
| | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
||||
| ------------ | ----------------- | ---------------- | ---------------- | --------------- | ------------ | ----------------- | ---------------- | ---------------- |
|
||||
| X64 | ● | ● | ● | | ● | ● | ● | |
|
||||
| 树莓派 ARM64 | | | | ● | | | | |
|
||||
| 华为云 ARM64 | | | | | | | | ● |
|
||||
|
||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||
|
||||
|
@ -26,15 +19,15 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
|
|||
|
||||
对照矩阵如下:
|
||||
|
||||
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** |
|
||||
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | ------------- | -------------- | ------------ |
|
||||
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
|
||||
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
|
||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** |
|
||||
| ----------- | ------------- | --------- | --------- | ------------- | --------- | ------------- | -------------- | ------------ |
|
||||
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● | ○ | ● | ● | -- | ● |
|
||||
| **Go** | ● | ● | ● | ○ | ● | ○ | -- | -- |
|
||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ○ | -- | -- |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
|
||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● |
|
||||
|
||||
注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。
|
||||
|
|
|
@ -25,7 +25,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
- _taosBenchmark_:TDengine 测试工具
|
||||
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos
|
||||
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
|
||||
- _tarbitrator_: 提供双节点集群部署的仲裁功能
|
||||
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
|
||||
- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本
|
||||
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
|
||||
|
|
|
@ -21,17 +21,17 @@
|
|||
#include "taos.h"
|
||||
|
||||
static int running = 1;
|
||||
static char dbName[64] = "tmqdb";
|
||||
static char stbName[64] = "stb";
|
||||
static char dbName[64] = "tmqdb";
|
||||
static char stbName[64] = "stb";
|
||||
static char topicName[64] = "topicname";
|
||||
|
||||
static int32_t msg_process(TAOS_RES* msg) {
|
||||
char buf[1024];
|
||||
char buf[1024];
|
||||
int32_t rows = 0;
|
||||
|
||||
const char* topicName = tmq_get_topic_name(msg);
|
||||
const char* dbName = tmq_get_db_name(msg);
|
||||
int32_t vgroupId = tmq_get_vgroup_id(msg);
|
||||
const char* dbName = tmq_get_db_name(msg);
|
||||
int32_t vgroupId = tmq_get_vgroup_id(msg);
|
||||
|
||||
printf("topic: %s\n", topicName);
|
||||
printf("db: %s\n", dbName);
|
||||
|
@ -41,14 +41,14 @@ static int32_t msg_process(TAOS_RES* msg) {
|
|||
TAOS_ROW row = taos_fetch_row(msg);
|
||||
if (row == NULL) break;
|
||||
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
int32_t numOfFields = taos_field_count(msg);
|
||||
int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
const char* tbName = tmq_get_table_name(msg);
|
||||
rows++;
|
||||
int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
const char* tbName = tmq_get_table_name(msg);
|
||||
rows++;
|
||||
taos_print_row(buf, row, fields, numOfFields);
|
||||
printf("row content from %s: %s\n", (tbName != NULL ? tbName : "null table"), buf);
|
||||
printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
|
||||
}
|
||||
|
||||
return rows;
|
||||
|
@ -80,7 +80,8 @@ static int32_t init_env() {
|
|||
|
||||
// create super table
|
||||
printf("create super table\n");
|
||||
pRes = taos_query(pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
|
||||
pRes = taos_query(
|
||||
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
|
@ -166,7 +167,6 @@ int32_t create_topic() {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
|
||||
pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
|
||||
|
@ -184,26 +184,28 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
|
|||
|
||||
tmq_t* build_consumer() {
|
||||
tmq_conf_res_t code;
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
code = tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "client.id", "user defined name");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "td.connect.user", "root");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
tmq_conf_destroy(conf);
|
||||
return tmq;
|
||||
|
@ -211,7 +213,7 @@ tmq_t* build_consumer() {
|
|||
|
||||
tmq_list_t* build_topic_list() {
|
||||
tmq_list_t* topicList = tmq_list_new();
|
||||
int32_t code = tmq_list_append(topicList, "topicname");
|
||||
int32_t code = tmq_list_append(topicList, "topicname");
|
||||
if (code) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -228,18 +230,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
|
|||
|
||||
int32_t totalRows = 0;
|
||||
int32_t msgCnt = 0;
|
||||
int32_t consumeDelay = 5000;
|
||||
int32_t timeout = 5000;
|
||||
while (running) {
|
||||
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, consumeDelay);
|
||||
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
|
||||
if (tmqmsg) {
|
||||
msgCnt++;
|
||||
totalRows += msg_process(tmqmsg);
|
||||
taos_free_result(tmqmsg);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
/*} else {*/
|
||||
/*break;*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||
}
|
||||
|
||||
|
@ -256,32 +258,30 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
tmq_t* tmq = build_consumer();
|
||||
if (NULL == tmq) {
|
||||
fprintf(stderr, "%% build_consumer() fail!\n");
|
||||
fprintf(stderr, "%% build_consumer() fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmq_list_t* topic_list = build_topic_list();
|
||||
if (NULL == topic_list) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
basic_consume_loop(tmq, topic_list);
|
||||
|
||||
code = tmq_unsubscribe(tmq);
|
||||
if (code) {
|
||||
fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "%% unsubscribe\n");
|
||||
}
|
||||
|
||||
code = tmq_consumer_close(tmq);
|
||||
if (code) {
|
||||
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "%% Consumer closed\n");
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -196,15 +196,6 @@ DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, vo
|
|||
DLL_EXPORT void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param);
|
||||
DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res);
|
||||
|
||||
// Shuduo: temporary enable for app build
|
||||
#if 1
|
||||
typedef void (*__taos_sub_fn_t)(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code);
|
||||
DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char *topic, const char *sql, __taos_sub_fn_t fp,
|
||||
void *param, int interval);
|
||||
DLL_EXPORT TAOS_RES *taos_consume(TAOS_SUB *tsub);
|
||||
DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress);
|
||||
#endif
|
||||
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision);
|
||||
|
||||
|
@ -281,10 +272,6 @@ DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
|
|||
|
||||
/* ------------------------------ TMQ END -------------------------------- */
|
||||
|
||||
#if 1 // Shuduo: temporary enable for app build
|
||||
typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code);
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
TSDB_SRV_STATUS_UNAVAILABLE = 0,
|
||||
TSDB_SRV_STATUS_NETWORK_OK = 1,
|
||||
|
|
|
@ -103,12 +103,12 @@ typedef struct SDataBlockInfo {
|
|||
int16_t hasVarCol;
|
||||
uint32_t capacity;
|
||||
// TODO: optimize and remove following
|
||||
int64_t version; // used for stream, and need serialization
|
||||
int64_t ts; // used for stream, and need serialization
|
||||
int32_t childId; // used for stream, do not serialize
|
||||
EStreamType type; // used for stream, do not serialize
|
||||
STimeWindow calWin; // used for stream, do not serialize
|
||||
TSKEY watermark;// used for stream
|
||||
int64_t version; // used for stream, and need serialization
|
||||
int64_t ts; // used for stream, and need serialization
|
||||
int32_t childId; // used for stream, do not serialize
|
||||
EStreamType type; // used for stream, do not serialize
|
||||
STimeWindow calWin; // used for stream, do not serialize
|
||||
TSKEY watermark; // used for stream
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
@ -268,6 +268,15 @@ typedef struct SSortExecInfo {
|
|||
int32_t readBytes; // read io bytes
|
||||
} SSortExecInfo;
|
||||
|
||||
// stream special block column
|
||||
|
||||
#define START_TS_COLUMN_INDEX 0
|
||||
#define END_TS_COLUMN_INDEX 1
|
||||
#define UID_COLUMN_INDEX 2
|
||||
#define GROUPID_COLUMN_INDEX 3
|
||||
#define CALCULATE_START_TS_COLUMN_INDEX 4
|
||||
#define CALCULATE_END_TS_COLUMN_INDEX 5
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -239,7 +239,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId
|
|||
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
|
||||
|
||||
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress);
|
||||
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
|
||||
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
|
||||
|
||||
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag);
|
||||
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
|
||||
|
|
|
@ -200,8 +200,6 @@ struct STag {
|
|||
#if 1 //================================================================================================================================================
|
||||
// Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap.
|
||||
#define TD_SUPPORT_BITMAP
|
||||
#define TD_SUPPORT_READ2
|
||||
#define TD_SUPPORT_BACK2 // suppport back compatibility of 2.0
|
||||
|
||||
#define TASSERT(x) ASSERT(x)
|
||||
|
||||
|
|
|
@ -296,13 +296,13 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp);
|
|||
#define COL_IDX_ON ((int8_t)0x2)
|
||||
#define COL_SET_NULL ((int8_t)0x10)
|
||||
#define COL_SET_VAL ((int8_t)0x20)
|
||||
typedef struct SSchema {
|
||||
struct SSchema {
|
||||
int8_t type;
|
||||
int8_t flags;
|
||||
col_id_t colId;
|
||||
int32_t bytes;
|
||||
char name[TSDB_COL_NAME_LEN];
|
||||
} SSchema;
|
||||
};
|
||||
|
||||
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
|
||||
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
|
||||
|
@ -648,7 +648,7 @@ typedef struct {
|
|||
};
|
||||
bool output; // TODO remove it later
|
||||
|
||||
int16_t type;
|
||||
int8_t type;
|
||||
int32_t bytes;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
|
@ -1364,12 +1364,13 @@ typedef struct {
|
|||
int8_t compressed;
|
||||
int8_t streamBlockType;
|
||||
int32_t compLen;
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfRows;
|
||||
int32_t numOfCols;
|
||||
int64_t skey;
|
||||
int64_t ekey;
|
||||
int64_t version; // for stream
|
||||
TSKEY watermark;// for stream
|
||||
int64_t version; // for stream
|
||||
TSKEY watermark; // for stream
|
||||
char data[];
|
||||
} SRetrieveTableRsp;
|
||||
|
||||
|
@ -3079,6 +3080,22 @@ typedef struct SDeleteRes {
|
|||
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
|
||||
int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes);
|
||||
|
||||
typedef struct {
|
||||
int64_t uid;
|
||||
int64_t ts;
|
||||
} SSingleDeleteReq;
|
||||
|
||||
int32_t tEncodeSSingleDeleteReq(SEncoder* pCoder, const SSingleDeleteReq* pReq);
|
||||
int32_t tDecodeSSingleDeleteReq(SDecoder* pCoder, SSingleDeleteReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int64_t suid;
|
||||
SArray* deleteReqs; // SArray<SSingleDeleteReq>
|
||||
} SBatchDeleteReq;
|
||||
|
||||
int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq);
|
||||
int32_t tDecodeSBatchDeleteReq(SDecoder* pCoder, SBatchDeleteReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t msgIdx;
|
||||
int32_t msgType;
|
||||
|
|
|
@ -202,6 +202,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
|
||||
|
|
|
@ -38,7 +38,8 @@ typedef struct {
|
|||
uint16_t type : 2;
|
||||
uint16_t del : 1;
|
||||
uint16_t endian : 1;
|
||||
uint16_t reserve : 12;
|
||||
uint16_t statis : 1; // 0 all normal, 1 has null or none
|
||||
uint16_t reserve : 11;
|
||||
uint16_t sver;
|
||||
};
|
||||
};
|
||||
|
@ -149,6 +150,8 @@ typedef struct {
|
|||
void *pBitmap;
|
||||
void *pOffset;
|
||||
int32_t extendedRowSize;
|
||||
bool hasNone;
|
||||
bool hasNull;
|
||||
} SRowBuilder;
|
||||
|
||||
#define TD_ROW_HEAD_LEN (sizeof(STSRow))
|
||||
|
@ -287,9 +290,13 @@ int32_t tdSRowSetTpInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t flen);
|
|||
int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBoundCols, int32_t flen,
|
||||
int32_t allNullLen, int32_t boundNullLen);
|
||||
int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf);
|
||||
static FORCE_INLINE void tdSRowEnd(SRowBuilder *pBuilder) {
|
||||
STSRow *pRow = (STSRow *)pBuilder->pBuf;
|
||||
if (pBuilder->hasNull || pBuilder->hasNone) {
|
||||
pRow->statis = 1;
|
||||
}
|
||||
}
|
||||
int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf);
|
||||
int32_t tdSRowInitEx(SRowBuilder *pBuilder, void *pBuf, uint32_t allNullLen, uint32_t boundNullLen, int32_t nCols,
|
||||
int32_t nBoundCols, int32_t flen);
|
||||
void tdSRowReset(SRowBuilder *pBuilder);
|
||||
int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const void *val, bool isCopyVarData,
|
||||
int8_t colType, int16_t colIdx, int32_t offset);
|
||||
|
@ -312,21 +319,17 @@ typedef struct {
|
|||
col_id_t kvIdx; // [0, nKvCols)
|
||||
} STSRowIter;
|
||||
|
||||
void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow);
|
||||
void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema);
|
||||
void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema);
|
||||
void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow);
|
||||
bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal);
|
||||
|
||||
int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow);
|
||||
bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
|
||||
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal);
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx,
|
||||
SCellVal *pVal);
|
||||
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
|
||||
void tdSCellValPrint(SCellVal *pVal, int8_t colType);
|
||||
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_COMMON_ROW_H_*/
|
||||
#endif /*_TD_COMMON_ROW_H_*/
|
|
@ -67,6 +67,7 @@ typedef struct SInputData {
|
|||
} SInputData;
|
||||
|
||||
typedef struct SOutputData {
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfRows;
|
||||
int32_t numOfCols;
|
||||
int8_t compressed;
|
||||
|
|
|
@ -123,7 +123,8 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
|
|||
* @param handle
|
||||
* @return
|
||||
*/
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds);
|
||||
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds);
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
|
||||
|
||||
/**
|
||||
* kill the ongoing query and free the query handle and corresponding resources automatically
|
||||
|
|
|
@ -54,10 +54,6 @@ typedef struct SFuncExecFuncs {
|
|||
FExecCombine combine;
|
||||
} SFuncExecFuncs;
|
||||
|
||||
typedef struct SFileBlockInfo {
|
||||
int32_t numBlocksOfStep;
|
||||
} SFileBlockInfo;
|
||||
|
||||
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
|
||||
|
||||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||
|
@ -171,8 +167,6 @@ typedef struct tExprNode {
|
|||
};
|
||||
} tExprNode;
|
||||
|
||||
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
|
||||
|
||||
struct SScalarParam {
|
||||
bool colAlloced;
|
||||
SColumnInfoData *columnData;
|
||||
|
@ -182,14 +176,10 @@ struct SScalarParam {
|
|||
int32_t numOfRows;
|
||||
};
|
||||
|
||||
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength,
|
||||
bool isSuperTable);
|
||||
|
||||
void resetResultRowEntryResult(SqlFunctionCtx* pCtx, int32_t num);
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
|
||||
int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
|
||||
|
||||
typedef struct SPoint {
|
||||
int64_t key;
|
||||
|
|
|
@ -380,6 +380,7 @@ typedef struct SAggPhysiNode {
|
|||
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
|
||||
SNodeList* pGroupKeys;
|
||||
SNodeList* pAggFuncs;
|
||||
bool mergeDataBlock;
|
||||
} SAggPhysiNode;
|
||||
|
||||
typedef struct SDownstreamSourceNode {
|
||||
|
@ -418,6 +419,7 @@ typedef struct SWinodwPhysiNode {
|
|||
int8_t igExpired;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
bool mergeDataBlock;
|
||||
} SWinodwPhysiNode;
|
||||
|
||||
typedef struct SIntervalPhysiNode {
|
||||
|
|
|
@ -44,7 +44,7 @@ extern int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict
|
|||
extern int32_t filterConverNcharColumns(SFilterInfo *pFilterInfo, int32_t rows, bool *gotNchar);
|
||||
extern int32_t filterFreeNcharColumns(SFilterInfo *pFilterInfo);
|
||||
extern void filterFreeInfo(SFilterInfo *info);
|
||||
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows);
|
||||
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg **pColsAgg, int32_t numOfCols, int32_t numOfRows);
|
||||
|
||||
/* condition split interface */
|
||||
int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode **pTagIndexCond, SNode **pTagCond,
|
||||
|
|
|
@ -34,6 +34,8 @@ typedef struct SStreamTask SStreamTask;
|
|||
|
||||
enum {
|
||||
STREAM_STATUS__NORMAL = 0,
|
||||
STREAM_STATUS__STOP,
|
||||
STREAM_STATUS__FAILED,
|
||||
STREAM_STATUS__RECOVER,
|
||||
};
|
||||
|
||||
|
@ -42,8 +44,8 @@ enum {
|
|||
TASK_STATUS__DROPPING,
|
||||
TASK_STATUS__FAIL,
|
||||
TASK_STATUS__STOP,
|
||||
TASK_STATUS__PREPARE_RECOVER,
|
||||
TASK_STATUS__RECOVERING,
|
||||
TASK_STATUS__RECOVER_DOWNSTREAM,
|
||||
TASK_STATUS__RECOVER_SELF,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -118,7 +120,7 @@ typedef struct {
|
|||
int64_t sourceVer;
|
||||
int64_t reqId;
|
||||
|
||||
SArray* blocks; // SArray<SSDataBlock*>
|
||||
SArray* blocks; // SArray<SSDataBlock>
|
||||
} SStreamDataBlock;
|
||||
|
||||
typedef struct {
|
||||
|
@ -154,7 +156,10 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
|
|||
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) { return queue->qItem; }
|
||||
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
|
||||
//
|
||||
return queue->qItem;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
|
||||
int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING);
|
||||
|
@ -226,14 +231,12 @@ typedef struct {
|
|||
int32_t nodeId;
|
||||
int32_t childId;
|
||||
int32_t taskId;
|
||||
// int64_t checkpointVer;
|
||||
// int64_t processedVer;
|
||||
SEpSet epSet;
|
||||
SEpSet epSet;
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId;
|
||||
int32_t childId;
|
||||
int32_t srcNodeId;
|
||||
int32_t srcChildId;
|
||||
int64_t stateSaveVer;
|
||||
int64_t stateProcessedVer;
|
||||
} SStreamCheckpointInfo;
|
||||
|
@ -372,15 +375,6 @@ static FORCE_INLINE int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBloc
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int32_t reserved;
|
||||
} SStreamTaskDeployRsp;
|
||||
|
||||
typedef struct {
|
||||
// SMsgHead head;
|
||||
SStreamTask* task;
|
||||
} SStreamTaskDeployReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
|
@ -394,9 +388,6 @@ typedef struct {
|
|||
int32_t upstreamTaskId;
|
||||
int32_t upstreamChildId;
|
||||
int32_t upstreamNodeId;
|
||||
#if 0
|
||||
int64_t sourceVer;
|
||||
#endif
|
||||
int32_t blockNum;
|
||||
SArray* dataLen; // SArray<int32_t>
|
||||
SArray* data; // SArray<SRetrieveTableRsp*>
|
||||
|
@ -426,6 +417,7 @@ typedef struct {
|
|||
int32_t rspToTaskId;
|
||||
} SStreamRetrieveRsp;
|
||||
|
||||
#if 0
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
|
@ -462,13 +454,36 @@ int32_t tDecodeSMStreamTaskRecoverReq(SDecoder* pDecoder, SMStreamTaskRecoverReq
|
|||
int32_t tEncodeSMStreamTaskRecoverRsp(SEncoder* pEncoder, const SMStreamTaskRecoverRsp* pRsp);
|
||||
int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
} SPStreamTaskRecoverReq;
|
||||
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
int8_t reserved;
|
||||
} SPStreamTaskRecoverRsp;
|
||||
int64_t streamId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t taskId;
|
||||
} SStreamRecoverDownstreamReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer; // SArray<SStreamCheckpointInfo>
|
||||
} SStreamRecoverDownstreamRsp;
|
||||
|
||||
int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq);
|
||||
int32_t tDecodeSStreamTaskRecoverReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq);
|
||||
|
||||
int32_t tEncodeSStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamRecoverDownstreamRsp* pRsp);
|
||||
int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstreamRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t waitingRspCnt;
|
||||
int32_t totReq;
|
||||
SArray* info; // SArray<SArray<SStreamCheckpointInfo>*>
|
||||
} SStreamRecoverStatus;
|
||||
|
||||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
|
@ -479,8 +494,6 @@ int32_t streamSetupTrigger(SStreamTask* pTask);
|
|||
int32_t streamProcessRunReq(SStreamTask* pTask);
|
||||
int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
|
||||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp);
|
||||
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
|
||||
|
||||
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp);
|
||||
|
@ -496,7 +509,7 @@ typedef struct SStreamMeta {
|
|||
TTB* pTaskDb;
|
||||
TTB* pStateDb;
|
||||
SHashObj* pTasks;
|
||||
SHashObj* pRecoveringState;
|
||||
SHashObj* pRecoverStatus;
|
||||
void* ahandle;
|
||||
TXN txn;
|
||||
FTaskExpand* expandFunc;
|
||||
|
|
|
@ -47,6 +47,8 @@ bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, u
|
|||
void updateInfoDestroy(SUpdateInfo *pInfo);
|
||||
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
|
||||
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
|
||||
int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
|
||||
int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -47,8 +47,6 @@ typedef struct SRpcHandleInfo {
|
|||
int8_t persistHandle; // persist handle or not
|
||||
int8_t hasEpSet;
|
||||
|
||||
STraceId traceId;
|
||||
|
||||
// app info
|
||||
void *ahandle; // app handle set by client
|
||||
void *wrapper; // wrapper handle
|
||||
|
@ -58,7 +56,8 @@ typedef struct SRpcHandleInfo {
|
|||
void *rsp;
|
||||
int32_t rspLen;
|
||||
|
||||
// conn info
|
||||
STraceId traceId;
|
||||
|
||||
SRpcConnInfo conn;
|
||||
} SRpcHandleInfo;
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ extern "C" {
|
|||
typedef struct TdCmd *TdCmdPtr;
|
||||
|
||||
TdCmdPtr taosOpenCmd(const char* cmd);
|
||||
int64_t taosGetsCmd(TdCmdPtr pCmd, int32_t maxSize, char *__restrict buf);
|
||||
int64_t taosGetLineCmd(TdCmdPtr pCmd, char** __restrict ptrBuf);
|
||||
int32_t taosEOFCmd(TdCmdPtr pCmd);
|
||||
int64_t taosCloseCmd(TdCmdPtr* ppCmd);
|
||||
|
|
|
@ -284,11 +284,13 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_CONSUMER_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x03EA)
|
||||
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
|
||||
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
|
||||
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
|
||||
|
||||
// mnode-stream
|
||||
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
|
||||
#define TSDB_CODE_MND_STREAM_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F1)
|
||||
#define TSDB_CODE_MND_INVALID_STREAM_OPTION TAOS_DEF_ERROR_CODE(0, 0x03F2)
|
||||
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
|
||||
|
||||
// mnode-sma
|
||||
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
|
||||
|
@ -619,6 +621,7 @@ int32_t* taosGetErrno();
|
|||
|
||||
//tmq
|
||||
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
|
||||
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define _TD_UTIL_BLOOMFILTER_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tencode.h"
|
||||
#include "thash.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -42,6 +43,8 @@ int32_t tBloomFilterNoContain(const SBloomFilter *pBF, const void *keyBuf,
|
|||
void tBloomFilterDestroy(SBloomFilter *pBF);
|
||||
void tBloomFilterDump(const SBloomFilter *pBF);
|
||||
bool tBloomFilterIsFull(const SBloomFilter *pBF);
|
||||
int32_t tBloomFilterEncode(const SBloomFilter *pBF, SEncoder* pEncoder);
|
||||
SBloomFilter* tBloomFilterDecode(SDecoder* pDecoder);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -396,7 +396,7 @@ typedef enum ELogicConditionType {
|
|||
#ifdef WINDOWS
|
||||
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
|
||||
#else
|
||||
#define TSDB_MAX_RPC_THREADS 5
|
||||
#define TSDB_MAX_RPC_THREADS 10
|
||||
#endif
|
||||
|
||||
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
||||
|
|
|
@ -33,7 +33,8 @@ int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len);
|
|||
int32_t tScalableBfNoContain(const SScalableBf *pSBf, const void *keyBuf,
|
||||
uint32_t len);
|
||||
void tScalableBfDestroy(SScalableBf *pSBf);
|
||||
void tScalableBfDump(const SScalableBf *pSBf);
|
||||
int32_t tScalableBfEncode(const SScalableBf *pSBf, SEncoder* pEncoder);
|
||||
SScalableBf* tScalableBfDecode(SDecoder* pDecoder);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat
|
|||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
|
|
|
@ -11,13 +11,13 @@ if !%2==! GOTO USAGE
|
|||
if "%1" == "cluster" (
|
||||
set work_dir=%internal_dir%
|
||||
set packagServerName_x64=TDengine-enterprise-server-%2-beta-Windows-x64
|
||||
set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86
|
||||
@REM set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86
|
||||
set packagClientName_x64=TDengine-enterprise-client-%2-beta-Windows-x64
|
||||
set packagClientName_x86=TDengine-enterprise-client-%2-beta-Windows-x86
|
||||
) else (
|
||||
set work_dir=%community_dir%
|
||||
set packagServerName_x64=TDengine-server-%2-Windows-x64
|
||||
set packagServerName_x86=TDengine-server-%2-Windows-x86
|
||||
@REM set packagServerName_x86=TDengine-server-%2-Windows-x86
|
||||
set packagClientName_x64=TDengine-client-%2-Windows-x64
|
||||
set packagClientName_x86=TDengine-client-%2-Windows-x86
|
||||
)
|
||||
|
@ -59,8 +59,8 @@ rd /s /Q C:\TDengine
|
|||
cmake --install .
|
||||
if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1)
|
||||
cd %package_dir%
|
||||
iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
|
||||
if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
|
||||
@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
|
||||
@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
|
||||
iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
|
||||
if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1)
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
|||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
|
|
|
@ -194,6 +194,9 @@ function install_bin() {
|
|||
${csudo}rm -f ${bin_link_dir}/${serverName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${adapterName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${demoName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
||||
|
||||
|
@ -205,7 +208,6 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || :
|
||||
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
|
||||
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
|
||||
[ -x ${install_main_dir}/bin/${tmqName} ] && ${csudo}ln -s ${install_main_dir}/bin/${tmqName} ${bin_link_dir}/${tmqName} || :
|
||||
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
|
||||
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
|
@ -964,12 +966,17 @@ function installProduct() {
|
|||
## ==============================Main program starts from here============================
|
||||
serverFqdn=$(hostname)
|
||||
if [ "$verType" == "server" ]; then
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/${serverName} ]; then
|
||||
update_flag=1
|
||||
updateProduct
|
||||
# Check default 2.x data file.
|
||||
if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then
|
||||
echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m"
|
||||
else
|
||||
installProduct
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/${serverName} ]; then
|
||||
update_flag=1
|
||||
updateProduct
|
||||
else
|
||||
installProduct
|
||||
fi
|
||||
fi
|
||||
elif [ "$verType" == "client" ]; then
|
||||
interactiveFqdn=no
|
||||
|
|
|
@ -132,6 +132,7 @@ function install_bin() {
|
|||
[ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
|
||||
[ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || :
|
||||
[ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
|
|
|
@ -101,10 +101,6 @@ typedef struct SQueryExecMetric {
|
|||
int64_t rsp; // receive response from server, us
|
||||
} SQueryExecMetric;
|
||||
|
||||
typedef struct SHeartBeatInfo {
|
||||
void* pTimer; // timer, used to send request msg to mnode
|
||||
} SHeartBeatInfo;
|
||||
|
||||
struct SAppInstInfo {
|
||||
int64_t numOfConns;
|
||||
SCorEpSet mgmtEp;
|
||||
|
@ -256,6 +252,8 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
|
|||
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
|
||||
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
|
||||
|
||||
static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
|
||||
SMqRspObj* msg = (SMqRspObj*)res;
|
||||
return (SReqResultInfo*)&msg->resInfo;
|
||||
|
|
|
@ -611,65 +611,6 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
|
|||
}
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con,
|
||||
jboolean restart, jstring jtopic,
|
||||
jstring jsql, jint jinterval) {
|
||||
jlong sub = 0;
|
||||
TAOS *taos = (TAOS *)con;
|
||||
char *topic = NULL;
|
||||
char *sql = NULL;
|
||||
|
||||
jniGetGlobalMethod(env);
|
||||
jniDebug("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj);
|
||||
|
||||
if (jtopic != NULL) {
|
||||
topic = (char *)(*env)->GetStringUTFChars(env, jtopic, NULL);
|
||||
}
|
||||
if (jsql != NULL) {
|
||||
sql = (char *)(*env)->GetStringUTFChars(env, jsql, NULL);
|
||||
}
|
||||
|
||||
if (topic == NULL || sql == NULL) {
|
||||
jniDebug("jobj:%p, invalid argument: topic or sql is NULL", jobj);
|
||||
return sub;
|
||||
}
|
||||
|
||||
TAOS_SUB *tsub = taos_subscribe(taos, (int)restart, topic, sql, NULL, NULL, jinterval);
|
||||
sub = (jlong)tsub;
|
||||
|
||||
if (sub == 0) {
|
||||
jniDebug("jobj:%p, failed to subscribe: topic:%s", jobj, topic);
|
||||
} else {
|
||||
jniDebug("jobj:%p, successfully subscribe: topic: %s", jobj, topic);
|
||||
}
|
||||
|
||||
(*env)->ReleaseStringUTFChars(env, jtopic, topic);
|
||||
(*env)->ReleaseStringUTFChars(env, jsql, sql);
|
||||
|
||||
return sub;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) {
|
||||
jniDebug("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%lld", jobj, sub);
|
||||
jniGetGlobalMethod(env);
|
||||
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
TAOS_RES *res = taos_consume(tsub);
|
||||
|
||||
if (res == NULL) {
|
||||
jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub);
|
||||
return 0l;
|
||||
}
|
||||
|
||||
return (jlong)res;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub,
|
||||
jboolean keepProgress) {
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
taos_unsubscribe(tsub, keepProgress);
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj,
|
||||
jlong con, jbyteArray jsql) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
|
@ -757,7 +698,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J
|
|||
int32_t code = taos_stmt_prepare(pStmt, str, len);
|
||||
taosMemoryFreeClear(str);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
jniError("prepareStmt jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
|
||||
|
@ -785,7 +726,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
(*env)->ReleaseStringUTFChars(env, jname, name);
|
||||
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
|
||||
jniError("bindTableName jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
|
||||
|
@ -860,7 +801,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
|
|||
(*env)->ReleaseStringUTFChars(env, tableName, name);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
|
||||
jniError("tableNameTags jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
return JNI_SUCCESS;
|
||||
|
@ -926,7 +867,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(
|
|||
taosMemoryFreeClear(b);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
jniError("bindColData jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
|
||||
|
@ -949,7 +890,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEn
|
|||
|
||||
int32_t code = taos_stmt_add_batch(pStmt);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
jniError("add batch jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
|
||||
|
@ -973,7 +914,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J
|
|||
|
||||
int32_t code = taos_stmt_execute(pStmt);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
jniError("excute batch jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
|
||||
|
@ -997,7 +938,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv
|
|||
|
||||
int32_t code = taos_stmt_close(pStmt);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
jniError("close stmt jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
return JNI_TDENGINE_ERROR;
|
||||
}
|
||||
|
||||
|
@ -1087,4 +1028,4 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInsert
|
|||
return JNI_OUT_OF_MEMORY;
|
||||
}
|
||||
return (jlong)tres;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1571,10 +1571,18 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
|
||||
int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3);
|
||||
ASSERT(numOfCols == cols);
|
||||
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t));
|
||||
}
|
||||
|
||||
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
|
||||
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
|
||||
// version + length + numOfRows + numOfCol + groupId + flag_segment + column_info
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
int32_t* colLength = (int32_t*)(p + len);
|
||||
len += sizeof(int32_t) * numOfCols;
|
||||
|
||||
|
@ -1642,7 +1650,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
char* p1 = pResultInfo->convertJson;
|
||||
|
||||
int32_t totalLen = 0;
|
||||
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
memcpy(p1, p, len);
|
||||
|
||||
p += len;
|
||||
|
@ -1739,7 +1747,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
pStart1 += colLen1;
|
||||
}
|
||||
|
||||
*(int32_t*)(pResultInfo->convertJson) = totalLen;
|
||||
*(int32_t*)(pResultInfo->convertJson + 4) = totalLen;
|
||||
pResultInfo->pData = pResultInfo->convertJson;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1762,16 +1770,31 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
|
||||
// version:
|
||||
int32_t blockVersion = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
int32_t dataLen = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
int32_t rows = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
int32_t cols = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
ASSERT(rows == numOfRows && cols == numOfCols);
|
||||
|
||||
int32_t hasColumnSeg = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
uint64_t groupId = *(uint64_t*)p;
|
||||
p += sizeof(uint64_t);
|
||||
|
||||
// check fields
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int16_t type = *(int16_t*)p;
|
||||
p += sizeof(int16_t);
|
||||
p += sizeof(int8_t);
|
||||
|
||||
int32_t bytes = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
|
|
@ -939,21 +939,6 @@ const void *taos_get_raw_block(TAOS_RES *res) {
|
|||
return pRequest->body.resInfo.pData;
|
||||
}
|
||||
|
||||
TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char *topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp,
|
||||
void *param, int interval) {
|
||||
// TODO
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
||||
// TODO
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
|
|
|
@ -122,6 +122,7 @@ enum {
|
|||
TMQ_CONSUMER_STATUS__INIT = 0,
|
||||
TMQ_CONSUMER_STATUS__READY,
|
||||
TMQ_CONSUMER_STATUS__NO_TOPIC,
|
||||
TMQ_CONSUMER_STATUS__RECOVER,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -134,10 +135,8 @@ typedef struct {
|
|||
// statistics
|
||||
int64_t pollCnt;
|
||||
// offset
|
||||
/*int64_t committedOffset;*/
|
||||
/*int64_t currentOffset;*/
|
||||
STqOffsetVal committedOffsetNew;
|
||||
STqOffsetVal currentOffsetNew;
|
||||
STqOffsetVal committedOffset;
|
||||
STqOffsetVal currentOffset;
|
||||
// connection info
|
||||
int32_t vgId;
|
||||
int32_t vgStatus;
|
||||
|
@ -152,7 +151,6 @@ typedef struct {
|
|||
|
||||
SArray* vgs; // SArray<SMqClientVg>
|
||||
|
||||
int8_t isSchemaAdaptive;
|
||||
SSchemaWrapper schema;
|
||||
} SMqClientTopic;
|
||||
|
||||
|
@ -190,10 +188,9 @@ typedef struct {
|
|||
} SMqPollCbParam;
|
||||
|
||||
typedef struct {
|
||||
tmq_t* tmq;
|
||||
int8_t automatic;
|
||||
int8_t async;
|
||||
/*int8_t freeOffsets;*/
|
||||
tmq_t* tmq;
|
||||
int8_t automatic;
|
||||
int8_t async;
|
||||
int32_t waitingRspNum;
|
||||
int32_t totalRspNum;
|
||||
int32_t rspErr;
|
||||
|
@ -207,7 +204,7 @@ typedef struct {
|
|||
typedef struct {
|
||||
SMqCommitCbParamSet* params;
|
||||
STqOffset* pOffset;
|
||||
} SMqCommitCbParam2;
|
||||
} SMqCommitCbParam;
|
||||
|
||||
tmq_conf_t* tmq_conf_new() {
|
||||
tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t));
|
||||
|
@ -215,6 +212,7 @@ tmq_conf_t* tmq_conf_new() {
|
|||
conf->autoCommit = true;
|
||||
conf->autoCommitInterval = 5000;
|
||||
conf->resetOffset = TMQ_CONF__RESET_OFFSET__EARLIEAST;
|
||||
conf->hbBgEnable = true;
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
@ -371,8 +369,8 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
|
|||
return sprintf(dst, "%s:%d", topicName, vg);
|
||||
}
|
||||
|
||||
int32_t tmqCommitCb2(void* param, SDataBuf* pBuf, int32_t code) {
|
||||
SMqCommitCbParam2* pParam = (SMqCommitCbParam2*)param;
|
||||
int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
|
||||
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
|
||||
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
|
||||
// push into array
|
||||
#if 0
|
||||
|
@ -418,7 +416,7 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
|
|||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
pOffset->val = pVg->currentOffsetNew;
|
||||
pOffset->val = pVg->currentOffset;
|
||||
|
||||
int32_t groupLen = strlen(tmq->groupId);
|
||||
memcpy(pOffset->subKey, tmq->groupId, groupLen);
|
||||
|
@ -443,7 +441,7 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
|
|||
tEncodeSTqOffset(&encoder, pOffset);
|
||||
|
||||
// build param
|
||||
SMqCommitCbParam2* pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam2));
|
||||
SMqCommitCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam));
|
||||
pParam->params = pParamSet;
|
||||
pParam->pOffset = pOffset;
|
||||
|
||||
|
@ -462,13 +460,13 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
|
|||
pVg->vgId, pOffset->val.version);
|
||||
|
||||
// TODO: put into cb
|
||||
pVg->committedOffsetNew = pVg->currentOffsetNew;
|
||||
pVg->committedOffset = pVg->currentOffset;
|
||||
|
||||
pMsgSendInfo->requestId = generateRequestId();
|
||||
pMsgSendInfo->requestObjRefId = 0;
|
||||
pMsgSendInfo->param = pParam;
|
||||
pMsgSendInfo->paramFreeFp = taosMemoryFree;
|
||||
pMsgSendInfo->fp = tmqCommitCb2;
|
||||
pMsgSendInfo->fp = tmqCommitCb;
|
||||
pMsgSendInfo->msgType = TDMT_VND_MQ_COMMIT_OFFSET;
|
||||
// send msg
|
||||
|
||||
|
@ -504,7 +502,6 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
|
|||
pParamSet->tmq = tmq;
|
||||
pParamSet->automatic = 0;
|
||||
pParamSet->async = async;
|
||||
/*pParamSet->freeOffsets = 1;*/
|
||||
pParamSet->userCb = userCb;
|
||||
pParamSet->userParam = userParam;
|
||||
tsem_init(&pParamSet->rspSem, 0, 0);
|
||||
|
@ -518,7 +515,7 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
|
|||
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
|
||||
if (pVg->vgId != vgId) continue;
|
||||
|
||||
if (pVg->currentOffsetNew.type > 0 && !tOffsetEqual(&pVg->currentOffsetNew, &pVg->committedOffsetNew)) {
|
||||
if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) {
|
||||
if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
@ -550,8 +547,8 @@ FAIL:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async, tmq_commit_cb* userCb,
|
||||
void* userParam) {
|
||||
int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async, tmq_commit_cb* userCb,
|
||||
void* userParam) {
|
||||
int32_t code = -1;
|
||||
|
||||
if (msg != NULL) {
|
||||
|
@ -566,7 +563,6 @@ int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_
|
|||
pParamSet->tmq = tmq;
|
||||
pParamSet->automatic = automatic;
|
||||
pParamSet->async = async;
|
||||
/*pParamSet->freeOffsets = 1;*/
|
||||
pParamSet->userCb = userCb;
|
||||
pParamSet->userParam = userParam;
|
||||
tsem_init(&pParamSet->rspSem, 0, 0);
|
||||
|
@ -583,7 +579,9 @@ int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_
|
|||
tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName,
|
||||
pVg->vgId);
|
||||
|
||||
if (pVg->currentOffsetNew.type > 0 && !tOffsetEqual(&pVg->currentOffsetNew, &pVg->committedOffsetNew)) {
|
||||
if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) {
|
||||
tscDebug("consumer: %ld, vg:%d, current %ld, committed %ld", tmq->consumerId, pVg->vgId,
|
||||
pVg->currentOffset.version, pVg->committedOffset.version);
|
||||
if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) {
|
||||
continue;
|
||||
}
|
||||
|
@ -699,7 +697,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
|
|||
tmqAskEp(tmq, true);
|
||||
taosTmrReset(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer, &tmq->epTimer);
|
||||
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
|
||||
tmqCommitInner2(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
|
||||
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
|
||||
taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer);
|
||||
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
|
||||
} else {
|
||||
|
@ -888,12 +886,6 @@ FAIL:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int32_t async) {
|
||||
return tmqCommitInner2(tmq, offsets, 0, async, tmq->commitCb, tmq->commitCbUserParam);
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
||||
const SArray* container = &topic_list->container;
|
||||
int32_t sz = taosArrayGetSize(container);
|
||||
|
@ -967,7 +959,11 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
|||
code = param.rspErr;
|
||||
if (code != 0) goto FAIL;
|
||||
|
||||
int32_t retryCnt = 0;
|
||||
while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) {
|
||||
if (retryCnt++ > 10) {
|
||||
goto FAIL;
|
||||
}
|
||||
tscDebug("consumer not ready, retry");
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
@ -1006,8 +1002,12 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
int32_t epoch = pParam->epoch;
|
||||
taosMemoryFree(pParam);
|
||||
if (code != 0) {
|
||||
tscWarn("msg discard from vgId:%d, epoch %d, code:%x", vgId, epoch, code);
|
||||
if (pMsg->pData) taosMemoryFreeClear(pMsg->pData);
|
||||
tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr());
|
||||
if (pMsg->pData) taosMemoryFree(pMsg->pData);
|
||||
if (code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) {
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__RECOVER);
|
||||
goto CREATE_MSG_FAIL;
|
||||
}
|
||||
if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM);
|
||||
if (pRspWrapper == NULL) {
|
||||
|
@ -1083,7 +1083,7 @@ CREATE_MSG_FAIL:
|
|||
return -1;
|
||||
}
|
||||
|
||||
bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
||||
bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
||||
bool set = false;
|
||||
|
||||
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
|
||||
|
@ -1112,10 +1112,10 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
|||
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
|
||||
sprintf(vgKey, "%s:%d", pTopicCur->topicName, pVgCur->vgId);
|
||||
char buf[80];
|
||||
tFormatOffset(buf, 80, &pVgCur->currentOffsetNew);
|
||||
tFormatOffset(buf, 80, &pVgCur->currentOffset);
|
||||
tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch,
|
||||
pVgCur->vgId, vgKey, buf);
|
||||
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffsetNew, sizeof(STqOffsetVal));
|
||||
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(STqOffsetVal));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1142,7 +1142,7 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
|||
|
||||
SMqClientVg clientVg = {
|
||||
.pollCnt = 0,
|
||||
.currentOffsetNew = offsetNew,
|
||||
.currentOffset = offsetNew,
|
||||
.vgId = pVgEp->vgId,
|
||||
.epSet = pVgEp->epSet,
|
||||
.vgStatus = TMQ_VG_STATUS__IDLE,
|
||||
|
@ -1166,93 +1166,6 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
|||
return set;
|
||||
}
|
||||
|
||||
#if 0
|
||||
bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
|
||||
/*printf("call update ep %d\n", epoch);*/
|
||||
bool set = false;
|
||||
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
|
||||
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
|
||||
tscDebug("consumer:%" PRId64 ", update ep epoch %d to epoch %d, topic num: %d", tmq->consumerId, tmq->epoch, epoch,
|
||||
topicNumGet);
|
||||
SArray* newTopics = taosArrayInit(topicNumGet, sizeof(SMqClientTopic));
|
||||
if (newTopics == NULL) {
|
||||
return false;
|
||||
}
|
||||
SHashObj* pHash = taosHashInit(64, MurmurHash3_32, false, HASH_NO_LOCK);
|
||||
if (pHash == NULL) {
|
||||
taosArrayDestroy(newTopics);
|
||||
return false;
|
||||
}
|
||||
|
||||
// find topic, build hash
|
||||
for (int32_t i = 0; i < topicNumGet; i++) {
|
||||
SMqClientTopic topic = {0};
|
||||
SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i);
|
||||
topic.schema = pTopicEp->schema;
|
||||
taosHashClear(pHash);
|
||||
topic.topicName = strdup(pTopicEp->topic);
|
||||
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
|
||||
|
||||
tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName);
|
||||
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
|
||||
for (int32_t j = 0; j < topicNumCur; j++) {
|
||||
// find old topic
|
||||
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, j);
|
||||
if (pTopicCur->vgs && strcmp(pTopicCur->topicName, pTopicEp->topic) == 0) {
|
||||
int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
|
||||
tscDebug("consumer:%" PRId64 ", new vg num: %d", tmq->consumerId, vgNumCur);
|
||||
if (vgNumCur == 0) break;
|
||||
for (int32_t k = 0; k < vgNumCur; k++) {
|
||||
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, k);
|
||||
sprintf(vgKey, "%s:%d", topic.topicName, pVgCur->vgId);
|
||||
tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d build %s", tmq->consumerId, epoch, pVgCur->vgId, vgKey);
|
||||
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(int64_t));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs);
|
||||
topic.vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg));
|
||||
for (int32_t j = 0; j < vgNumGet; j++) {
|
||||
SMqSubVgEp* pVgEp = taosArrayGet(pTopicEp->vgs, j);
|
||||
sprintf(vgKey, "%s:%d", topic.topicName, pVgEp->vgId);
|
||||
int64_t* pOffset = taosHashGet(pHash, vgKey, strlen(vgKey));
|
||||
int64_t offset = pVgEp->offset;
|
||||
tscDebug("consumer:%" PRId64 ", (epoch %d) original offset of vgId:%d is %" PRId64, tmq->consumerId, epoch, pVgEp->vgId, offset);
|
||||
if (pOffset != NULL) {
|
||||
offset = *pOffset;
|
||||
tscDebug("consumer:%" PRId64 ", (epoch %d) receive offset of vgId:%d, full key is %s", tmq->consumerId, epoch, pVgEp->vgId,
|
||||
vgKey);
|
||||
}
|
||||
tscDebug("consumer:%" PRId64 ", (epoch %d) offset of vgId:%d updated to %" PRId64, tmq->consumerId, epoch, pVgEp->vgId, offset);
|
||||
SMqClientVg clientVg = {
|
||||
.pollCnt = 0,
|
||||
.currentOffset = offset,
|
||||
.vgId = pVgEp->vgId,
|
||||
.epSet = pVgEp->epSet,
|
||||
.vgStatus = TMQ_VG_STATUS__IDLE,
|
||||
.vgSkipCnt = 0,
|
||||
};
|
||||
taosArrayPush(topic.vgs, &clientVg);
|
||||
set = true;
|
||||
}
|
||||
taosArrayPush(newTopics, &topic);
|
||||
}
|
||||
if (tmq->clientTopics) taosArrayDestroy(tmq->clientTopics);
|
||||
taosHashCleanup(pHash);
|
||||
tmq->clientTopics = newTopics;
|
||||
|
||||
if (taosArrayGetSize(tmq->clientTopics) == 0)
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__NO_TOPIC);
|
||||
else
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__READY);
|
||||
|
||||
atomic_store_32(&tmq->epoch, epoch);
|
||||
return set;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param;
|
||||
tmq_t* tmq = pParam->tmq;
|
||||
|
@ -1278,7 +1191,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp);
|
||||
/*printf("rsp epoch %" PRId64 " sz %" PRId64 "\n", rsp.epoch, rsp.topics->size);*/
|
||||
/*printf("tmq epoch %" PRId64 " sz %" PRId64 "\n", tmq->epoch, tmq->clientTopics->size);*/
|
||||
tmqUpdateEp2(tmq, head->epoch, &rsp);
|
||||
tmqUpdateEp(tmq, head->epoch, &rsp);
|
||||
tDeleteSMqAskEpRsp(&rsp);
|
||||
} else {
|
||||
SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper), DEF_QITEM);
|
||||
|
@ -1401,17 +1314,6 @@ int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
|
|||
#endif
|
||||
|
||||
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
|
||||
/*int64_t reqOffset;*/
|
||||
/*if (pVg->currentOffset >= 0) {*/
|
||||
/*reqOffset = pVg->currentOffset;*/
|
||||
/*} else {*/
|
||||
/*if (tmq->resetOffsetCfg == TMQ_CONF__RESET_OFFSET__NONE) {*/
|
||||
/*tscError("unable to poll since no committed offset but reset offset is set to none");*/
|
||||
/*return NULL;*/
|
||||
/*}*/
|
||||
/*reqOffset = tmq->resetOffsetCfg;*/
|
||||
/*}*/
|
||||
|
||||
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
|
||||
if (pReq == NULL) {
|
||||
return NULL;
|
||||
|
@ -1430,7 +1332,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic*
|
|||
pReq->consumerId = tmq->consumerId;
|
||||
pReq->epoch = tmq->epoch;
|
||||
/*pReq->currentOffset = reqOffset;*/
|
||||
pReq->reqOffset = pVg->currentOffsetNew;
|
||||
pReq->reqOffset = pVg->currentOffset;
|
||||
pReq->reqId = generateRequestId();
|
||||
|
||||
pReq->useSnapshot = tmq->useSnapshot;
|
||||
|
@ -1534,7 +1436,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
/*printf("send poll\n");*/
|
||||
|
||||
char offsetFormatBuf[80];
|
||||
tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffsetNew);
|
||||
tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffset);
|
||||
tscDebug("consumer:%" PRId64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:%" PRIu64,
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, pReq->reqId);
|
||||
/*printf("send vgId:%d %" PRId64 "\n", pVg->vgId, pVg->currentOffset);*/
|
||||
|
@ -1552,7 +1454,7 @@ int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset)
|
|||
if (rspWrapper->epoch > atomic_load_32(&tmq->epoch)) {
|
||||
SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper;
|
||||
SMqAskEpRsp* rspMsg = &pEpRspWrapper->msg;
|
||||
tmqUpdateEp2(tmq, rspWrapper->epoch, rspMsg);
|
||||
tmqUpdateEp(tmq, rspWrapper->epoch, rspMsg);
|
||||
/*tmqClearUnhandleMsg(tmq);*/
|
||||
*pReset = true;
|
||||
} else {
|
||||
|
@ -1586,7 +1488,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
SMqClientVg* pVg = pollRspWrapper->vgHandle;
|
||||
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
|
||||
* rspMsg->msg.rspOffset);*/
|
||||
pVg->currentOffsetNew = pollRspWrapper->dataRsp.rspOffset;
|
||||
pVg->currentOffset = pollRspWrapper->dataRsp.rspOffset;
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
if (pollRspWrapper->dataRsp.blockNum == 0) {
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -1609,8 +1511,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
SMqClientVg* pVg = pollRspWrapper->vgHandle;
|
||||
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
|
||||
* rspMsg->msg.rspOffset);*/
|
||||
pVg->currentOffsetNew.version = pollRspWrapper->metaRsp.rspOffset;
|
||||
pVg->currentOffsetNew.type = TMQ_OFFSET__LOG;
|
||||
pVg->currentOffset.version = pollRspWrapper->metaRsp.rspOffset;
|
||||
pVg->currentOffset.type = TMQ_OFFSET__LOG;
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
// build rsp
|
||||
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
|
||||
|
@ -1653,6 +1555,17 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__RECOVER) {
|
||||
int32_t retryCnt = 0;
|
||||
while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) {
|
||||
if (retryCnt++ > 10) {
|
||||
return NULL;
|
||||
}
|
||||
tscDebug("consumer not ready, retry");
|
||||
taosMsleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
while (1) {
|
||||
tmqHandleAllDelayedTask(tmq);
|
||||
if (tmqPollImpl(tmq, timeout) < 0) return NULL;
|
||||
|
@ -2947,7 +2860,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
tdSRowSetTpInfo(&rb, numOfCols, fLen);
|
||||
int32_t dataLen = 0;
|
||||
|
||||
char* pStart = pData + sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
|
||||
char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
|
||||
int32_t* colLength = (int32_t*)pStart;
|
||||
pStart += sizeof(int32_t) * numOfCols;
|
||||
|
||||
|
@ -2977,6 +2890,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
char* data = pCol[k].pData + pCol[k].offset[j];
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
|
||||
} else {
|
||||
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
}
|
||||
} else {
|
||||
|
@ -2990,6 +2904,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
|
@ -3220,6 +3135,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
|
|||
}
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
|
@ -3381,10 +3297,10 @@ int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
|
|||
|
||||
void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) {
|
||||
//
|
||||
tmqCommitInner2(tmq, msg, 0, 1, cb, param);
|
||||
tmqCommitInner(tmq, msg, 0, 1, cb, param);
|
||||
}
|
||||
|
||||
int32_t tmq_commit_sync(tmq_t* tmq, const TAOS_RES* msg) {
|
||||
//
|
||||
return tmqCommitInner2(tmq, msg, 0, 0, NULL, NULL);
|
||||
return tmqCommitInner(tmq, msg, 0, 0, NULL, NULL);
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ TEST(testCase, driverInit_Test) {
|
|||
}
|
||||
|
||||
TEST(testCase, connect_Test) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/Documents/workspace/tdengine/sim/dnode1/cfg");
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
|
||||
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
|
@ -670,54 +670,56 @@ TEST(testCase, projection_query_tables) {
|
|||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(pConn, nullptr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in create db, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
// TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("error in create db, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "use abc1");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "use abc1");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tu using st1 tags(1)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for(int32_t i = 0; i < 1; ++i) {
|
||||
printf("create table :%d\n", i);
|
||||
createNewTable(pConn, i);
|
||||
}
|
||||
|
||||
pRes = taos_query(pConn, "select * from tu");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
|
||||
taos_free_result(pRes);
|
||||
ASSERT_TRUE(false);
|
||||
}
|
||||
|
||||
TAOS_ROW pRow = NULL;
|
||||
TAOS_FIELD* pFields = taos_fetch_fields(pRes);
|
||||
int32_t numOfFields = taos_num_fields(pRes);
|
||||
|
||||
char str[512] = {0};
|
||||
while ((pRow = taos_fetch_row(pRes)) != NULL) {
|
||||
int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
|
||||
printf("%s\n", str);
|
||||
}
|
||||
pRes = taos_query(pConn, "explain verbose true select _wstart,count(*),a from st1 partition by a interval(1s)");
|
||||
printResult(pRes);
|
||||
// pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
//
|
||||
// pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
//
|
||||
// pRes = taos_query(pConn, "create table tu using st1 tags(1)");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
// }
|
||||
// taos_free_result(pRes);
|
||||
//
|
||||
// for(int32_t i = 0; i < 1; ++i) {
|
||||
// printf("create table :%d\n", i);
|
||||
// createNewTable(pConn, i);
|
||||
// }
|
||||
//
|
||||
// pRes = taos_query(pConn, "select * from tu");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
// printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
|
||||
// taos_free_result(pRes);
|
||||
// ASSERT_TRUE(false);
|
||||
// }
|
||||
//
|
||||
// TAOS_ROW pRow = NULL;
|
||||
// TAOS_FIELD* pFields = taos_fetch_fields(pRes);
|
||||
// int32_t numOfFields = taos_num_fields(pRes);
|
||||
//
|
||||
// char str[512] = {0};
|
||||
// while ((pRow = taos_fetch_row(pRes)) != NULL) {
|
||||
// int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
|
||||
// printf("%s\n", str);
|
||||
// }
|
||||
|
||||
taos_free_result(pRes);
|
||||
taos_close(pConn);
|
||||
|
|
|
@ -74,7 +74,7 @@ static const SSysDbTableSchema clusterSchema[] = {
|
|||
static const SSysDbTableSchema userDBSchema[] = {
|
||||
{.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||
{.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||
{.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
@ -135,12 +135,12 @@ static const SSysDbTableSchema streamSchema[] = {
|
|||
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
};
|
||||
|
||||
static const SSysDbTableSchema userTblsSchema[] = {
|
||||
|
@ -284,8 +284,7 @@ static const SSysDbTableSchema consumerSchema[] = {
|
|||
{.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
/*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/
|
||||
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
|
|
|
@ -676,9 +676,9 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
|
|||
* @return
|
||||
*/
|
||||
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
||||
// | total rows/total length | block group id | column schema | each column length |
|
||||
return sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t)) +
|
||||
numOfCols * sizeof(int32_t);
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
|
||||
}
|
||||
|
||||
double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
|
||||
|
@ -1329,10 +1329,6 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i);
|
||||
if (pSrc->pData == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
colDataAssign(pDst, pSrc, pDataBlock->info.rows, &pDataBlock->info);
|
||||
}
|
||||
|
||||
|
@ -1581,7 +1577,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
|
|||
for (int32_t i = 0; i < sz; i++) {
|
||||
SColumnInfoData* pColData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);
|
||||
tlen += taosEncodeFixedI16(buf, pColData->info.colId);
|
||||
tlen += taosEncodeFixedI16(buf, pColData->info.type);
|
||||
tlen += taosEncodeFixedI8(buf, pColData->info.type);
|
||||
tlen += taosEncodeFixedI32(buf, pColData->info.bytes);
|
||||
tlen += taosEncodeFixedBool(buf, pColData->hasNull);
|
||||
|
||||
|
@ -1613,7 +1609,7 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) {
|
|||
for (int32_t i = 0; i < sz; i++) {
|
||||
SColumnInfoData data = {0};
|
||||
buf = taosDecodeFixedI16(buf, &data.info.colId);
|
||||
buf = taosDecodeFixedI16(buf, &data.info.type);
|
||||
buf = taosDecodeFixedI8(buf, &data.info.type);
|
||||
buf = taosDecodeFixedI32(buf, &data.info.bytes);
|
||||
buf = taosDecodeFixedBool(buf, &data.hasNull);
|
||||
|
||||
|
@ -2001,6 +1997,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
|||
}
|
||||
offset += TYPE_BYTES[pCol->type]; // sum/avg would convert to int64_t/uint64_t/double during aggregation
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
dataLen += TD_ROW_LEN(rb.pBuf);
|
||||
#ifdef TD_DEBUG_PRINT_ROW
|
||||
tdSRowPrint(rb.pBuf, pTSchema, __func__);
|
||||
|
@ -2070,17 +2067,36 @@ char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId) {
|
|||
|
||||
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) {
|
||||
// todo extract method
|
||||
int32_t* version = (int32_t*)data;
|
||||
*version = 1;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
int32_t* actualLen = (int32_t*)data;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
int32_t* rows = (int32_t*)data;
|
||||
*rows = pBlock->info.rows;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
int32_t* cols = (int32_t*)data;
|
||||
*cols = numOfCols;
|
||||
data += sizeof(int32_t);
|
||||
|
||||
// flag segment.
|
||||
// the inital bit is for column info
|
||||
int32_t* flagSegment = (int32_t*)data;
|
||||
*flagSegment = (1<<31);
|
||||
|
||||
data += sizeof(int32_t);
|
||||
|
||||
uint64_t* groupId = (uint64_t*)data;
|
||||
data += sizeof(uint64_t);
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
*((int16_t*)data) = pColInfoData->info.type;
|
||||
data += sizeof(int16_t);
|
||||
*((int8_t*)data) = pColInfoData->info.type;
|
||||
data += sizeof(int8_t);
|
||||
|
||||
*((int32_t*)data) = pColInfoData->info.bytes;
|
||||
data += sizeof(int32_t);
|
||||
|
@ -2126,12 +2142,31 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
|
|||
*groupId = pBlock->info.groupId;
|
||||
}
|
||||
|
||||
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData) {
|
||||
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
||||
const char* pStart = pData;
|
||||
|
||||
int32_t version = *(int32_t*) pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
ASSERT(version == 1);
|
||||
|
||||
// total length sizeof(int32_t)
|
||||
int32_t dataLen = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// total rows sizeof(int32_t)
|
||||
int32_t numOfRows = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// total columns sizeof(int32_t)
|
||||
int32_t numOfCols = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// has column info segment
|
||||
int32_t flagSeg = *(int32_t*)pStart;
|
||||
int32_t hasColumnInfo = (flagSeg >> 31);
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
// group id sizeof(uint64_t)
|
||||
pBlock->info.groupId = *(uint64_t*)pStart;
|
||||
pStart += sizeof(uint64_t);
|
||||
|
||||
|
@ -2143,7 +2178,7 @@ const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRow
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
pColInfoData->info.type = *(int16_t*)pStart;
|
||||
pStart += sizeof(int16_t);
|
||||
pStart += sizeof(int8_t);
|
||||
|
||||
pColInfoData->info.bytes = *(int32_t*)pStart;
|
||||
pStart += sizeof(int32_t);
|
||||
|
|
|
@ -355,7 +355,11 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
|
|||
static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||
if (cfgAddDir(pCfg, "dataDir", tsDataDir, 0) != 0) return -1;
|
||||
if (cfgAddFloat(pCfg, "minimalDataDirGB", 2.0f, 0.001f, 10000000, 0) != 0) return -1;
|
||||
|
||||
tsNumOfSupportVnodes = tsNumOfCores * 2;
|
||||
tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
|
||||
if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1;
|
||||
|
|
|
@ -305,7 +305,7 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
|
|||
taosArrayPush(desc.subDesc, &sDesc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc));
|
||||
|
||||
taosArrayPush(pReq->query->queryDesc, &desc);
|
||||
|
@ -5770,3 +5770,40 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
|
||||
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSSingleDeleteReq(SDecoder *pDecoder, SSingleDeleteReq *pReq) {
|
||||
if (tDecodeI64(pDecoder, &pReq->uid) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pReq->ts) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSBatchDeleteReq(SEncoder *pEncoder, const SBatchDeleteReq *pReq) {
|
||||
if (tEncodeI64(pEncoder, pReq->suid) < 0) return -1;
|
||||
int32_t sz = taosArrayGetSize(pReq->deleteReqs);
|
||||
if (tEncodeI32(pEncoder, sz) < 0) return -1;
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSingleDeleteReq *pOneReq = taosArrayGet(pReq->deleteReqs, i);
|
||||
if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
|
||||
if (tDecodeI64(pDecoder, &pReq->suid) < 0) return -1;
|
||||
int32_t sz;
|
||||
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
|
||||
pReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
if (pReq->deleteReqs == NULL) return -1;
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSingleDeleteReq deleteReq;
|
||||
if (tDecodeSSingleDeleteReq(pDecoder, &deleteReq) < 0) return -1;
|
||||
taosArrayPush(pReq->deleteReqs, &deleteReq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,9 +32,13 @@ const uint8_t tdVTypeByte[2][3] = {{
|
|||
};
|
||||
|
||||
// declaration
|
||||
static uint8_t tdGetBitmapByte(uint8_t byte);
|
||||
static int32_t tdCompareColId(const void *arg1, const void *arg2);
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2);
|
||||
static uint8_t tdGetBitmapByte(uint8_t byte);
|
||||
static bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
|
||||
static bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal);
|
||||
static bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset,
|
||||
col_id_t colIdx, SCellVal *pVal);
|
||||
static bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
|
||||
static void tdSCellValPrint(SCellVal *pVal, int8_t colType);
|
||||
|
||||
// implementation
|
||||
/**
|
||||
|
@ -330,14 +334,14 @@ void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) {
|
|||
tdSTSRowIterInit(&iter, pSchema);
|
||||
tdSTSRowIterReset(&iter, row);
|
||||
printf("%s >>>type:%d,sver:%d ", tag, (int32_t)TD_ROW_TYPE(row), (int32_t)TD_ROW_SVER(row));
|
||||
for (int i = 0; i < pSchema->numOfCols; ++i) {
|
||||
STColumn *stCol = pSchema->columns + i;
|
||||
SCellVal sVal = {255, NULL};
|
||||
if (!tdSTSRowIterNext(&iter, stCol->colId, stCol->type, &sVal)) {
|
||||
STColumn *cols = (STColumn *)&iter.pSchema->columns;
|
||||
while (true) {
|
||||
SCellVal sVal = {.valType = 255, NULL};
|
||||
if (!tdSTSRowIterNext(&iter, &sVal)) {
|
||||
break;
|
||||
}
|
||||
ASSERT(sVal.valType == 0 || sVal.valType == 1 || sVal.valType == 2);
|
||||
tdSCellValPrint(&sVal, stCol->type);
|
||||
tdSCellValPrint(&sVal, cols[iter.colIdx - 1].type);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
@ -420,6 +424,16 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) {
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) {
|
||||
if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) {
|
||||
return 1;
|
||||
} else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
tdRowSetVal(pVal, TD_VTYPE_NORM, TD_ROW_KEY_ADDR(pRow));
|
||||
|
@ -456,7 +470,7 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
|
@ -477,10 +491,10 @@ bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCe
|
|||
return false;
|
||||
}
|
||||
}
|
||||
tdGetTpRowDataOfCol(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
|
||||
tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
|
||||
++pIter->colIdx;
|
||||
} else if (TD_IS_KV_ROW(pIter->pRow)) {
|
||||
return tdGetKvRowValOfColEx(pIter, colId, colType, &pIter->kvIdx, pVal);
|
||||
return tdSTSRowIterGetKvVal(pIter, colId, &pIter->kvIdx, pVal);
|
||||
} else {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
|
@ -489,13 +503,68 @@ bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCe
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal) {
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal) {
|
||||
if (pIter->colIdx >= pIter->pSchema->numOfCols) {
|
||||
return false;
|
||||
}
|
||||
|
||||
STColumn *pCol = &pIter->pSchema->columns[pIter->colIdx];
|
||||
|
||||
if (pCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
++pIter->colIdx;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (TD_IS_TP_ROW(pIter->pRow)) {
|
||||
tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
|
||||
} else if (TD_IS_KV_ROW(pIter->pRow)) {
|
||||
tdSTSRowIterGetKvVal(pIter, pCol->colId, &pIter->kvIdx, pVal);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
++pIter->colIdx;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
if (pRow->statis == 0) {
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (pVal->valType == TD_VTYPE_NORM) {
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterGetKvVal(STSRowIter *pIter, col_id_t colId, col_id_t *nIdx, SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
SKvRowIdx *pKvIdx = NULL;
|
||||
bool colFound = false;
|
||||
col_id_t kvNCols = tdRowGetNCols(pRow) - 1;
|
||||
void *pColIdx = TD_ROW_COL_IDX(pRow);
|
||||
while (*nIdx < kvNCols) {
|
||||
pKvIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(pRow), *nIdx * sizeof(SKvRowIdx));
|
||||
pKvIdx = (SKvRowIdx *)POINTER_SHIFT(pColIdx, *nIdx * sizeof(SKvRowIdx));
|
||||
if (pKvIdx->colId == colId) {
|
||||
++(*nIdx);
|
||||
pVal->val = POINTER_SHIFT(pRow, pKvIdx->offset);
|
||||
|
@ -518,48 +587,13 @@ bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
int16_t colIdx = -1;
|
||||
if (pKvIdx) colIdx = POINTER_DISTANCE(pKvIdx, TD_ROW_COL_IDX(pRow)) / sizeof(SKvRowIdx);
|
||||
if (tdGetBitmapValType(pIter->pBitmap, colIdx, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->kvIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
}
|
||||
#else
|
||||
pVal->valType = isNull(pVal->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pVal->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
}
|
||||
#else
|
||||
pVal->valType = isNull(pVal->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) {
|
||||
if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) {
|
||||
return 1;
|
||||
} else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
||||
STColumn *pTColumn;
|
||||
SColVal *pColVal;
|
||||
|
@ -625,7 +659,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
|||
if (maxVarDataLen > 0) {
|
||||
varBuf = taosMemoryMalloc(maxVarDataLen);
|
||||
if (!varBuf) {
|
||||
if(isAlloc) {
|
||||
if (isAlloc) {
|
||||
taosMemoryFreeClear(*ppRow);
|
||||
}
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -666,12 +700,26 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
|||
|
||||
++iColVal;
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
|
||||
taosMemoryFreeClear(varBuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tdCompareColId(const void *arg1, const void *arg2) {
|
||||
int32_t colId = *(int32_t *)arg1;
|
||||
STColumn *pCol = (STColumn *)arg2;
|
||||
|
||||
if (colId < pCol->colId) {
|
||||
return -1;
|
||||
} else if (colId == pCol->colId) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
|
@ -711,19 +759,6 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell
|
|||
return true;
|
||||
}
|
||||
|
||||
static int32_t tdCompareColId(const void *arg1, const void *arg2) {
|
||||
int32_t colId = *(int32_t *)arg1;
|
||||
STColumn *pCol = (STColumn *)arg2;
|
||||
|
||||
if (colId < pCol->colId) {
|
||||
return -1;
|
||||
} else if (colId == pCol->colId) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) {
|
||||
if (!pBitmap || colIdx < 0) {
|
||||
TASSERT(0);
|
||||
|
@ -880,26 +915,29 @@ int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_
|
|||
|
||||
int32_t tdGetTpRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int8_t colType, int32_t offset,
|
||||
int16_t colIdx) {
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (pRow->statis == 0) {
|
||||
output->valType = TD_VTYPE_NORM;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (tdGetBitmapValType(pBitmap, colIdx, &output->valType, 0) != TSDB_CODE_SUCCESS) {
|
||||
output->valType = TD_VTYPE_NONE;
|
||||
return terrno;
|
||||
}
|
||||
if (tdValTypeIsNorm(output->valType)) {
|
||||
|
||||
if (output->valType == TD_VTYPE_NORM) {
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
output->val = POINTER_SHIFT(pRow, *(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(pRow), offset));
|
||||
} else {
|
||||
output->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
|
||||
}
|
||||
output->valType = isNull(output->val, colType) ? TD_VTYPE_NULL : TD_VTYPE_NORM;
|
||||
#endif
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -908,7 +946,7 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
STSRow *pRow = pBuilder->pBuf;
|
||||
if (!val) {
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (tdValTypeIsNorm(valType)) {
|
||||
if (valType == TD_VTYPE_NORM) {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -925,6 +963,21 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
// TODO: We can avoid the type judegement by FP, but would prevent the inline scheme.
|
||||
|
||||
switch (valType) {
|
||||
case TD_VTYPE_NORM:
|
||||
break;
|
||||
case TD_VTYPE_NULL:
|
||||
if (!pBuilder->hasNull) pBuilder->hasNull = true;
|
||||
break;
|
||||
case TD_VTYPE_NONE:
|
||||
if (!pBuilder->hasNone) pBuilder->hasNone = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
default:
|
||||
ASSERT(0);
|
||||
break;
|
||||
}
|
||||
|
||||
if (TD_IS_TP_ROW(pRow)) {
|
||||
tdAppendColValToTpRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset);
|
||||
} else {
|
||||
|
@ -951,13 +1004,11 @@ int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
|
||||
STSRow *row = pBuilder->pBuf;
|
||||
// No need to store None/Null values.
|
||||
if (tdValIsNorm(valType, val, colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
if (valType == TD_VTYPE_NORM) {
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
if (isCopyVarData) {
|
||||
memcpy(ptr, val, varDataTLen(val));
|
||||
|
@ -968,26 +1019,6 @@ int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
TD_ROW_LEN(row) += TYPE_BYTES[colType];
|
||||
}
|
||||
}
|
||||
#ifdef TD_SUPPORT_BACK2
|
||||
// NULL/None value
|
||||
else {
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
const void *nullVal = getNullValue(colType);
|
||||
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
if (isCopyVarData) {
|
||||
memcpy(ptr, nullVal, varDataTLen(nullVal));
|
||||
}
|
||||
TD_ROW_LEN(row) += varDataTLen(nullVal);
|
||||
} else {
|
||||
memcpy(ptr, nullVal, TYPE_BYTES[colType]);
|
||||
TD_ROW_LEN(row) += TYPE_BYTES[colType];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1012,7 +1043,7 @@ int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
// 1. No need to set flen part for Null/None, just use bitmap. When upsert for the same primary TS key, the bitmap
|
||||
// should be updated simultaneously if Norm val overwrite Null/None cols.
|
||||
// 2. When consume STSRow in memory by taos client/tq, the output of Null/None cols should both be Null.
|
||||
if (tdValIsNorm(valType, val, colType)) {
|
||||
if (valType == TD_VTYPE_NORM) {
|
||||
// TODO: The layout of new data types imported since 3.0 like blob/medium blob is the same with binary/nchar.
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
|
@ -1025,24 +1056,6 @@ int32_t tdAppendColValToTpRow(SRowBuilder *pBuilder, TDRowValT valType, const vo
|
|||
memcpy(POINTER_SHIFT(TD_ROW_DATA(row), offset), val, TYPE_BYTES[colType]);
|
||||
}
|
||||
}
|
||||
#ifdef TD_SUPPORT_BACK2
|
||||
// NULL/None value
|
||||
else {
|
||||
// TODO: Null value for new data types imported since 3.0 need to be defined.
|
||||
const void *nullVal = getNullValue(colType);
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
*(VarDataOffsetT *)POINTER_SHIFT(TD_ROW_DATA(row), offset) = TD_ROW_LEN(row);
|
||||
|
||||
if (isCopyVarData) {
|
||||
memcpy(POINTER_SHIFT(row, TD_ROW_LEN(row)), nullVal, varDataTLen(nullVal));
|
||||
}
|
||||
TD_ROW_LEN(row) += varDataTLen(nullVal);
|
||||
} else {
|
||||
memcpy(POINTER_SHIFT(TD_ROW_DATA(row), offset), nullVal, TYPE_BYTES[colType]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1092,6 +1105,9 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
return terrno;
|
||||
}
|
||||
|
||||
if (pBuilder->hasNone) pBuilder->hasNone = false;
|
||||
if (pBuilder->hasNull) pBuilder->hasNull = false;
|
||||
|
||||
TD_ROW_SET_INFO(pBuilder->pBuf, 0);
|
||||
TD_ROW_SET_TYPE(pBuilder->pBuf, pBuilder->rowType);
|
||||
|
||||
|
@ -1159,14 +1175,6 @@ int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tdSRowInitEx(SRowBuilder *pBuilder, void *pBuf, uint32_t allNullLen, uint32_t boundNullLen, int32_t nCols,
|
||||
int32_t nBoundCols, int32_t flen) {
|
||||
if (tdSRowSetExtendedInfo(pBuilder, allNullLen, boundNullLen, nCols, nBoundCols, flen) < 0) {
|
||||
return terrno;
|
||||
}
|
||||
return tdSRowResetBuf(pBuilder, pBuf);
|
||||
}
|
||||
|
||||
void tdSRowReset(SRowBuilder *pBuilder) {
|
||||
pBuilder->rowType = TD_ROW_TP;
|
||||
pBuilder->pBuf = NULL;
|
||||
|
@ -1315,7 +1323,7 @@ void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow) {
|
|||
pIter->pRow = pRow;
|
||||
pIter->pBitmap = tdGetBitmapAddr(pRow, pRow->type, pIter->pSchema->flen, tdRowGetNCols(pRow));
|
||||
pIter->offset = 0;
|
||||
pIter->colIdx = PRIMARYKEY_TIMESTAMP_COL_ID;
|
||||
pIter->colIdx = 0; // PRIMARYKEY_TIMESTAMP_COL_ID;
|
||||
pIter->kvIdx = 0;
|
||||
}
|
||||
|
||||
|
@ -1353,4 +1361,4 @@ void tTSRowGetVal(STSRow *pRow, STSchema *pTSchema, int16_t iCol, SColVal *pColV
|
|||
|
||||
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, value);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -364,6 +364,7 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_COMMIT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_HEARTBEAT, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
|
|
|
@ -53,14 +53,27 @@ static bool dmCheckDiskSpace() {
|
|||
osUpdate();
|
||||
if (!osDataSpaceAvailable()) {
|
||||
dError("free disk size: %f GB, too little, require %f GB at least at least , quit", (double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0);
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return false;
|
||||
}
|
||||
if (!osLogSpaceAvailable()) {
|
||||
dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0);
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return false;
|
||||
}
|
||||
if (!osTempSpaceAvailable()) {
|
||||
dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0);
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dmCheckDataDirVersion() {
|
||||
char checkDataDirJsonFileName[PATH_MAX];
|
||||
snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir);
|
||||
if (taosCheckExistFile(checkDataDirJsonFileName)) {
|
||||
dError("The default data directory %s contains old data of tdengine 2.x, please clear it before running!", tsDataDir);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -68,6 +81,7 @@ static bool dmCheckDiskSpace() {
|
|||
|
||||
int32_t dmInit(int8_t rtype) {
|
||||
dInfo("start to init dnode env");
|
||||
if (!dmCheckDataDirVersion()) return -1;
|
||||
if (!dmCheckDiskSpace()) return -1;
|
||||
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
|
||||
if (dmInitSystem() != 0) return -1;
|
||||
|
|
|
@ -32,6 +32,7 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic);
|
|||
SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw);
|
||||
|
||||
int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
int32_t mndCheckTopicExist(SMnode *pMnode, SDbObj *pDb);
|
||||
|
||||
const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]);
|
||||
|
||||
|
|
|
@ -131,8 +131,9 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
|
|||
mInfo("receive consumer recover msg, consumer id %" PRId64 ", status %s", pRecoverMsg->consumerId,
|
||||
mndConsumerStatusName(pConsumer->status));
|
||||
|
||||
if (pConsumer->status != MQ_CONSUMER_STATUS__READY) {
|
||||
if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
mndReleaseConsumer(pMnode, pConsumer);
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -275,6 +276,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
int32_t status = atomic_load_32(&pConsumer->status);
|
||||
|
||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
mInfo("try to recover consumer %ld", consumerId);
|
||||
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
|
||||
|
||||
pRecoverMsg->consumerId = consumerId;
|
||||
|
@ -305,15 +307,14 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
|
||||
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
||||
|
||||
#if 1
|
||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||
#endif
|
||||
|
||||
// 1. check consumer status
|
||||
int32_t status = atomic_load_32(&pConsumer->status);
|
||||
|
||||
#if 1
|
||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
mInfo("try to recover consumer %ld", consumerId);
|
||||
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
|
||||
|
||||
pRecoverMsg->consumerId = consumerId;
|
||||
|
@ -326,6 +327,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
#endif
|
||||
|
||||
if (status != MQ_CONSUMER_STATUS__READY) {
|
||||
mInfo("consumer %ld not ready, status: %s", consumerId, mndConsumerStatusName(status));
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
return -1;
|
||||
}
|
||||
|
@ -939,13 +941,9 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
colDataAppend(pColInfo, numOfRows, NULL, true);
|
||||
}
|
||||
|
||||
// pid
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->pid, true);
|
||||
|
||||
// end point
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->ep, true);
|
||||
/*pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);*/
|
||||
/*colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->ep, true);*/
|
||||
|
||||
// up time
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
|
|
|
@ -995,11 +995,13 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
|
|||
mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name);
|
||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||
|
||||
if (mndCheckTopicExist(pMnode, pDb) < 0) goto _OVER;
|
||||
|
||||
if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
/*if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
|
||||
/*if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
|
||||
/*if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
|
||||
if (mndDropStreamByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSmasByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
|
@ -1706,7 +1708,10 @@ static void setPerfSchemaDbCfg(SDbObj *pDbObj) {
|
|||
static bool mndGetTablesOfDbFp(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3) {
|
||||
SVgObj *pVgroup = pObj;
|
||||
int32_t *numOfTables = p1;
|
||||
*numOfTables += pVgroup->numOfTables;
|
||||
int64_t uid = *(int64_t *)p2;
|
||||
if (pVgroup->dbUid == uid) {
|
||||
*numOfTables += pVgroup->numOfTables;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1747,7 +1752,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
|
|||
|
||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_OR_WRITE_DB, pDb) == 0) {
|
||||
int32_t numOfTables = 0;
|
||||
sdbTraverse(pSdb, SDB_VGROUP, mndGetTablesOfDbFp, &numOfTables, NULL, NULL);
|
||||
sdbTraverse(pSdb, SDB_VGROUP, mndGetTablesOfDbFp, &numOfTables, &pDb->uid, NULL);
|
||||
mndDumpDbInfoData(pMnode, pBlock, pDb, pShow, numOfRows, numOfTables, false, objStatus, sysinfo);
|
||||
numOfRows++;
|
||||
}
|
||||
|
|
|
@ -197,6 +197,30 @@ void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream) {
|
|||
sdbRelease(pSdb, pStream);
|
||||
}
|
||||
|
||||
static void mndShowStreamStatus(char *dst, SStreamObj *pStream) {
|
||||
int8_t status = atomic_load_8(&pStream->status);
|
||||
if (status == STREAM_STATUS__NORMAL) {
|
||||
strcpy(dst, "normal");
|
||||
} else if (status == STREAM_STATUS__STOP) {
|
||||
strcpy(dst, "stop");
|
||||
} else if (status == STREAM_STATUS__FAILED) {
|
||||
strcpy(dst, "failed");
|
||||
} else if (status == STREAM_STATUS__RECOVER) {
|
||||
strcpy(dst, "recover");
|
||||
}
|
||||
}
|
||||
|
||||
static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) {
|
||||
int8_t trigger = pStream->trigger;
|
||||
if (trigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
strcpy(dst, "at once");
|
||||
} else if (trigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
strcpy(dst, "window close");
|
||||
} else if (trigger == STREAM_TRIGGER_MAX_DELAY) {
|
||||
strcpy(dst, "max delay");
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) {
|
||||
if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 ||
|
||||
pCreate->targetStbFullName[0] == 0) {
|
||||
|
@ -504,6 +528,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int32_t mndPersistTaskRecoverReq(STrans *pTrans, SStreamTask *pTask) {
|
||||
SMStreamTaskRecoverReq *pReq = taosMemoryCalloc(1, sizeof(SMStreamTaskRecoverReq));
|
||||
if (pReq == NULL) {
|
||||
|
@ -540,7 +565,6 @@ static int32_t mndPersistTaskRecoverReq(STrans *pTrans, SStreamTask *pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t mndRecoverStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||
if (pStream->isDistributed) {
|
||||
int32_t lv = taosArrayGetSize(pStream->tasks);
|
||||
|
@ -837,7 +861,7 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
|||
sdbCancelFetch(pSdb, pIter);
|
||||
mError("db:%s, failed to drop stream:%s since sourceDbUid:%" PRId64 " not match with targetDbUid:%" PRId64,
|
||||
pDb->name, pStream->name, pStream->sourceDbUid, pStream->targetDbUid);
|
||||
terrno = TSDB_CODE_MND_STREAM_ALREADY_EXIST;
|
||||
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
|
||||
return -1;
|
||||
} else {
|
||||
#if 0
|
||||
|
@ -926,23 +950,46 @@ static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)sql, false);
|
||||
|
||||
char status[20 + VARSTR_HEADER_SIZE] = {0};
|
||||
mndShowStreamStatus(&status[VARSTR_HEADER_SIZE], pStream);
|
||||
varDataSetLen(status, strlen(varDataVal(status)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->status, true);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&status, false);
|
||||
|
||||
char sourceDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tNameFromString(&n, pStream->sourceDb, T_NAME_ACCT | T_NAME_DB);
|
||||
tNameGetDbName(&n, varDataVal(sourceDB));
|
||||
varDataSetLen(sourceDB, strlen(varDataVal(sourceDB)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->sourceDb, true);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&sourceDB, false);
|
||||
|
||||
char targetDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tNameFromString(&n, pStream->targetDb, T_NAME_ACCT | T_NAME_DB);
|
||||
tNameGetDbName(&n, varDataVal(targetDB));
|
||||
varDataSetLen(targetDB, strlen(varDataVal(targetDB)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->targetDb, true);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&targetDB, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->targetSTbName, true);
|
||||
if (pStream->targetSTbName[0] == 0) {
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, NULL, true);
|
||||
} else {
|
||||
char targetSTB[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tNameFromString(&n, pStream->targetSTbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
strcpy(&targetSTB[VARSTR_HEADER_SIZE], tNameGetTableName(&n));
|
||||
varDataSetLen(targetSTB, strlen(varDataVal(targetSTB)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&targetSTB, false);
|
||||
}
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->watermark, false);
|
||||
|
||||
char trigger[20 + VARSTR_HEADER_SIZE] = {0};
|
||||
mndShowStreamTrigger(&trigger[VARSTR_HEADER_SIZE], pStream);
|
||||
varDataSetLen(trigger, strlen(varDataVal(trigger)));
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->trigger, false);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&trigger, false);
|
||||
|
||||
numOfRows++;
|
||||
sdbRelease(pSdb, pStream);
|
||||
|
|
|
@ -398,13 +398,27 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
|
|||
}
|
||||
}
|
||||
|
||||
// 8. TODO generate logs
|
||||
mInfo("rebalance calculation completed, rebalanced vg:");
|
||||
// 8. generate logs
|
||||
mInfo("mq rebalance: calculation completed, rebalanced vg:");
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pOutput->rebVgs); i++) {
|
||||
SMqRebOutputVg *pOutputRebVg = taosArrayGet(pOutput->rebVgs, i);
|
||||
mInfo("vgId:%d, moved from consumer:%" PRId64 ", to consumer:%" PRId64, pOutputRebVg->pVgEp->vgId,
|
||||
mInfo("mq rebalance: vgId:%d, moved from consumer:%" PRId64 ", to consumer:%" PRId64, pOutputRebVg->pVgEp->vgId,
|
||||
pOutputRebVg->oldConsumerId, pOutputRebVg->newConsumerId);
|
||||
}
|
||||
{
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
|
||||
if (pIter == NULL) break;
|
||||
SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
|
||||
int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
|
||||
mInfo("mq rebalance: final cfg: consumer %ld has %d vg", pConsumerEp->consumerId, sz);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
|
||||
mInfo("mq rebalance: final cfg: vg %d to consumer %ld", pVgEp->vgId, pConsumerEp->consumerId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 9. clear
|
||||
taosHashCleanup(pHash);
|
||||
|
|
|
@ -782,6 +782,26 @@ static void mndCancelGetNextTopic(SMnode *pMnode, void *pIter) {
|
|||
sdbCancelFetch(pSdb, pIter);
|
||||
}
|
||||
|
||||
int32_t mndCheckTopicExist(SMnode *pMnode, SDbObj *pDb) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
|
||||
void *pIter = NULL;
|
||||
SMqTopicObj *pTopic = NULL;
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pTopic->dbUid == pDb->uid) {
|
||||
sdbRelease(pSdb, pTopic);
|
||||
terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pTopic);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||
int32_t code = 0;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
|
|
|
@ -97,7 +97,7 @@ typedef struct STbUidStore STbUidStore;
|
|||
|
||||
int metaOpen(SVnode* pVnode, SMeta** ppMeta);
|
||||
int metaClose(SMeta* pMeta);
|
||||
int metaBegin(SMeta* pMeta);
|
||||
int metaBegin(SMeta* pMeta, int8_t fromSys);
|
||||
int metaCommit(SMeta* pMeta);
|
||||
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
|
||||
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
|
||||
|
@ -157,7 +157,7 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
|
|||
int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver);
|
||||
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
|
@ -171,8 +171,8 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
|
|||
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
|
||||
|
||||
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
|
||||
const char* stbFullName, int32_t vgId);
|
||||
SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
|
||||
const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq);
|
||||
|
||||
// sma
|
||||
int32_t smaInit();
|
||||
|
@ -187,7 +187,7 @@ int32_t smaAsyncPreCommit(SSma* pSma);
|
|||
int32_t smaAsyncCommit(SSma* pSma);
|
||||
int32_t smaAsyncPostCommit(SSma* pSma);
|
||||
int32_t smaDoRetention(SSma* pSma, int64_t now);
|
||||
int32_t smaProcessFetch(SSma *pSma, void* pMsg);
|
||||
int32_t smaProcessFetch(SSma* pSma, void* pMsg);
|
||||
|
||||
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
|
||||
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
|
||||
|
|
|
@ -19,9 +19,12 @@ static FORCE_INLINE void *metaMalloc(void *pPool, size_t size) { return vnodeBuf
|
|||
static FORCE_INLINE void metaFree(void *pPool, void *p) { vnodeBufPoolFree((SVBufPool *)pPool, p); }
|
||||
|
||||
// begin a meta txn
|
||||
int metaBegin(SMeta *pMeta) {
|
||||
tdbTxnOpen(&pMeta->txn, 0, metaMalloc, metaFree, pMeta->pVnode->inUse, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
|
||||
int metaBegin(SMeta *pMeta, int8_t fromSys) {
|
||||
if (fromSys) {
|
||||
tdbTxnOpen(&pMeta->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
} else {
|
||||
tdbTxnOpen(&pMeta->txn, 0, metaMalloc, metaFree, pMeta->pVnode->inUse, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
|
||||
}
|
||||
if (tdbBegin(pMeta->pEnv, &pMeta->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
|
|||
pWriter->sver = sver;
|
||||
pWriter->ever = ever;
|
||||
|
||||
metaBegin(pMeta);
|
||||
metaBegin(pMeta, 1);
|
||||
|
||||
*ppWriter = pWriter;
|
||||
return code;
|
||||
|
|
|
@ -604,28 +604,44 @@ _end:
|
|||
|
||||
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
|
||||
int64_t suid, int8_t blkType) {
|
||||
while (1) {
|
||||
SSDataBlock *output = NULL;
|
||||
uint64_t ts;
|
||||
SArray *pResList = taosArrayInit(1, POINTER_BYTES);
|
||||
if (pResList == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
int32_t code = qExecTask(taskInfo, &output, &ts);
|
||||
while (1) {
|
||||
uint64_t ts;
|
||||
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
|
||||
if (code < 0) {
|
||||
smaError("vgId:%d, qExecTask for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
|
||||
pItem->level, terrstr(code));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (output) {
|
||||
#if 0
|
||||
if (taosArrayGetSize(pResList) == 0) {
|
||||
if (terrno == 0) {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
|
||||
} else {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) {
|
||||
SSDataBlock *output = taosArrayGetP(pResList, i);
|
||||
|
||||
#if 1
|
||||
char flag[10] = {0};
|
||||
snprintf(flag, 10, "level %" PRIi8, pItem->level);
|
||||
SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
||||
taosArrayPush(pResult, output);
|
||||
blockDebugShowDataBlocks(pResult, flag);
|
||||
taosArrayDestroy(pResult);
|
||||
// blockDebugShowDataBlocks(output, flag);
|
||||
// taosArrayDestroy(pResult);
|
||||
#endif
|
||||
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
|
||||
STsdb * sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
|
||||
SSubmitReq *pReq = NULL;
|
||||
|
||||
// TODO: the schema update should be handled later(TD-17965)
|
||||
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
|
||||
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
|
||||
|
@ -644,17 +660,14 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
|
|||
SMA_VID(pSma), suid, pItem->level, output->info.version);
|
||||
|
||||
taosMemoryFreeClear(pReq);
|
||||
} else if (terrno == 0) {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
|
||||
break;
|
||||
} else {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(pResList);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_err:
|
||||
taosArrayDestroy(pResList);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -1407,7 +1420,7 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
|
|||
tEncoderClear(&encoder);
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
|
||||
((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
|
||||
((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead);
|
||||
|
||||
|
@ -1434,10 +1447,10 @@ _err:
|
|||
|
||||
/**
|
||||
* @brief fetch rsma data of level 2/3 and submit
|
||||
*
|
||||
* @param pSma
|
||||
* @param pMsg
|
||||
* @return int32_t
|
||||
*
|
||||
* @param pSma
|
||||
* @param pMsg
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
|
||||
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
|
||||
|
|
|
@ -119,7 +119,7 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
|
|||
SName stbFullName = {0};
|
||||
tNameFromString(&stbFullName, pCfg->dstTbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
SVCreateStbReq pReq = {0};
|
||||
pReq.name = (char*)tNameGetTableName(&stbFullName);
|
||||
pReq.name = (char *)tNameGetTableName(&stbFullName);
|
||||
pReq.suid = pCfg->dstTbUid;
|
||||
pReq.schemaRow = pCfg->schemaRow;
|
||||
pReq.schemaTag = pCfg->schemaTag;
|
||||
|
@ -200,8 +200,10 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
SSubmitReq *pSubmitReq = tdBlockToSubmit((const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
|
||||
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId);
|
||||
SBatchDeleteReq deleteReq;
|
||||
SSubmitReq *pSubmitReq =
|
||||
tdBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
|
||||
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
|
||||
|
||||
if (!pSubmitReq) {
|
||||
smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
|
||||
|
@ -230,4 +232,4 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
|
|||
_err:
|
||||
tdUnRefSmaStat(pSma, pStat);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver) {
|
||||
STqOffset offset = {0};
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, msg, msgLen);
|
||||
|
@ -302,6 +302,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqError("tmq poll: consumer handle mismatch for consumer:%" PRId64
|
||||
", in vgId:%d, subkey %s, handle consumer id %" PRId64,
|
||||
consumerId, TD_VID(pTq->pVnode), pReq->subKey, pHandle->consumerId);
|
||||
terrno = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -777,6 +778,7 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRecoverReq* pReq = pMsg->pCont;
|
||||
int32_t taskId = pReq->taskId;
|
||||
|
@ -789,18 +791,6 @@ int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t taskId = pRsp->taskId;
|
||||
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRecoverRsp* pRsp = pMsg->pCont;
|
||||
int32_t taskId = pRsp->rspTaskId;
|
||||
|
@ -813,6 +803,19 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t taskId = pRsp->taskId;
|
||||
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||
|
@ -873,6 +876,8 @@ void vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
|||
.code = 0,
|
||||
};
|
||||
streamProcessDispatchReq(pTask, &req, &rsp, false);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -883,4 +888,6 @@ FAIL:
|
|||
.info = pMsg->info,
|
||||
};
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
|
|
@ -325,7 +325,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReader* pReader) {
|
|||
for (int32_t i = 0; i < colActual; i++) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SCellVal sVal = {0};
|
||||
if (!tdSTSRowIterNext(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
|
||||
if (!tdSTSRowIterFetch(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
|
||||
break;
|
||||
}
|
||||
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType != TD_VTYPE_NORM) < 0) {
|
||||
|
|
|
@ -13,10 +13,44 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tq.h"
|
||||
|
||||
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid,
|
||||
const char* stbFullName, int32_t vgId) {
|
||||
int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
|
||||
SBatchDeleteReq* deleteReq) {
|
||||
ASSERT(pDataBlock->info.type == STREAM_DELETE_RESULT);
|
||||
int32_t totRow = pDataBlock->info.rows;
|
||||
SColumnInfoData* pTsCol = taosArrayGet(pDataBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
for (int32_t row = 0; row < totRow; row++) {
|
||||
int64_t ts = *(int64_t*)colDataGetData(pTsCol, row);
|
||||
/*int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);*/
|
||||
int64_t groupId = 0;
|
||||
char* name = buildCtbNameByGroupId(stbFullName, groupId);
|
||||
tqDebug("stream delete msg: groupId :%ld, name: %s", groupId, name);
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pVnode->pMeta, 0);
|
||||
if (metaGetTableEntryByName(&mr, name) < 0) {
|
||||
metaReaderClear(&mr);
|
||||
taosMemoryFree(name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t uid = mr.me.uid;
|
||||
metaReaderClear(&mr);
|
||||
taosMemoryFree(name);
|
||||
SSingleDeleteReq req = {
|
||||
.ts = ts,
|
||||
.uid = uid,
|
||||
};
|
||||
taosArrayPush(deleteReq->deleteReqs, &req);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
|
||||
int64_t suid, const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq) {
|
||||
SSubmitReq* ret = NULL;
|
||||
SArray* schemaReqs = NULL;
|
||||
SArray* schemaReqSz = NULL;
|
||||
|
@ -33,10 +67,17 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
schemaReqSz = taosArrayInit(sz, sizeof(int32_t));
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
int32_t padding1 = 0;
|
||||
void* padding2 = taosMemoryMalloc(1);
|
||||
taosArrayPush(schemaReqSz, &padding1);
|
||||
taosArrayPush(schemaReqs, &padding2);
|
||||
}
|
||||
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
};
|
||||
STag* pTag = NULL;
|
||||
taosArrayClear(tagArray);
|
||||
|
@ -94,7 +135,10 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
int32_t cap = sizeof(SSubmitReq);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
continue;
|
||||
}
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
// TODO min
|
||||
int32_t rowSize = pDataBlock->info.rowSize;
|
||||
int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);
|
||||
|
@ -116,6 +160,11 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
SSubmitBlk* blkHead = POINTER_SHIFT(ret, sizeof(SSubmitReq));
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
pDeleteReq->suid = suid;
|
||||
tdBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
|
||||
continue;
|
||||
}
|
||||
|
||||
blkHead->numOfRows = htonl(pDataBlock->info.rows);
|
||||
blkHead->sversion = htonl(pTSchema->version);
|
||||
|
@ -157,6 +206,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
|
||||
}
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
|
@ -176,17 +226,47 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
}
|
||||
|
||||
void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
||||
const SArray* pRes = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
const SArray* pRes = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
SBatchDeleteReq deleteReq = {0};
|
||||
|
||||
tqDebug("vgId:%d, task %d write into table, block num: %d", TD_VID(pVnode), pTask->taskId, (int32_t)pRes->size);
|
||||
|
||||
ASSERT(pTask->tbSink.pTSchema);
|
||||
SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
|
||||
pTask->tbSink.stbFullName, pVnode->config.vgId);
|
||||
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
SSubmitReq* pReq = tdBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
|
||||
pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq);
|
||||
|
||||
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
|
||||
|
||||
int32_t code;
|
||||
int32_t len;
|
||||
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
|
||||
if (code < 0) {
|
||||
//
|
||||
ASSERT(0);
|
||||
}
|
||||
SEncoder encoder;
|
||||
void* buf = rpcMallocCont(len + sizeof(SMsgHead));
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
tEncodeSBatchDeleteReq(&encoder, &deleteReq);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
((SMsgHead*)buf)->vgId = pVnode->config.vgId;
|
||||
|
||||
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
|
||||
SRpcMsg msg = {
|
||||
.msgType = TDMT_VND_BATCH_DEL,
|
||||
.pCont = buf,
|
||||
.contLen = len + sizeof(SMsgHead),
|
||||
};
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqDebug("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
}
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
|
||||
/*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/
|
||||
// build write msg
|
||||
SRpcMsg msg = {
|
||||
|
|
|
@ -393,15 +393,16 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
SMemSkipListNode *px;
|
||||
SMemSkipListNode *pn;
|
||||
TSDBKEY *pTKey;
|
||||
int c;
|
||||
int backward = flags & SL_MOVE_BACKWARD;
|
||||
int fromPos = flags & SL_MOVE_FROM_POS;
|
||||
int32_t backward = flags & SL_MOVE_BACKWARD;
|
||||
int32_t fromPos = flags & SL_MOVE_FROM_POS;
|
||||
|
||||
if (backward) {
|
||||
px = pTbData->sl.pTail;
|
||||
|
||||
for (int8_t iLevel = pTbData->sl.maxLevel - 1; iLevel >= pTbData->sl.level; iLevel--) {
|
||||
pos[iLevel] = px;
|
||||
if (!fromPos) {
|
||||
for (int8_t iLevel = pTbData->sl.level; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
pos[iLevel] = px;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTbData->sl.level) {
|
||||
|
@ -412,7 +413,7 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
while (pn != pTbData->sl.pHead) {
|
||||
pTKey = (TSDBKEY *)SL_NODE_DATA(pn);
|
||||
|
||||
c = tsdbKeyCmprFn(pTKey, pKey);
|
||||
int32_t c = tsdbKeyCmprFn(pTKey, pKey);
|
||||
if (c <= 0) {
|
||||
break;
|
||||
} else {
|
||||
|
@ -427,8 +428,10 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
} else {
|
||||
px = pTbData->sl.pHead;
|
||||
|
||||
for (int8_t iLevel = pTbData->sl.maxLevel - 1; iLevel >= pTbData->sl.level; iLevel--) {
|
||||
pos[iLevel] = px;
|
||||
if (!fromPos) {
|
||||
for (int8_t iLevel = pTbData->sl.level; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
pos[iLevel] = px;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTbData->sl.level) {
|
||||
|
@ -437,9 +440,7 @@ static void tbDataMovePosTo(STbData *pTbData, SMemSkipListNode **pos, TSDBKEY *p
|
|||
for (int8_t iLevel = pTbData->sl.level - 1; iLevel >= 0; iLevel--) {
|
||||
pn = SL_NODE_FORWARD(px, iLevel);
|
||||
while (pn != pTbData->sl.pTail) {
|
||||
pTKey = (TSDBKEY *)SL_NODE_DATA(pn);
|
||||
|
||||
c = tsdbKeyCmprFn(pTKey, pKey);
|
||||
int32_t c = tsdbKeyCmprFn(SL_NODE_DATA(pn), pKey);
|
||||
if (c >= 0) {
|
||||
break;
|
||||
} else {
|
||||
|
@ -480,36 +481,44 @@ static int32_t tbDataDoPut(SMemTable *pMemTable, STbData *pTbData, SMemSkipListN
|
|||
goto _exit;
|
||||
}
|
||||
pNode->level = level;
|
||||
for (int8_t iLevel = 0; iLevel < level; iLevel++) {
|
||||
SL_NODE_FORWARD(pNode, iLevel) = NULL;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = NULL;
|
||||
}
|
||||
|
||||
tPutTSDBRow((uint8_t *)SL_NODE_DATA(pNode), pRow);
|
||||
|
||||
// put
|
||||
for (int8_t iLevel = 0; iLevel < pNode->level; iLevel++) {
|
||||
SMemSkipListNode *px = pos[iLevel];
|
||||
for (int8_t iLevel = level - 1; iLevel >= 0; iLevel--) {
|
||||
SMemSkipListNode *pn = pos[iLevel];
|
||||
SMemSkipListNode *px;
|
||||
|
||||
if (forward) {
|
||||
SMemSkipListNode *pNext = SL_NODE_FORWARD(px, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(pNode, iLevel) = pNext;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = px;
|
||||
|
||||
SL_NODE_BACKWARD(pNext, iLevel) = pNode;
|
||||
SL_NODE_FORWARD(px, iLevel) = pNode;
|
||||
} else {
|
||||
SMemSkipListNode *pPrev = SL_NODE_BACKWARD(px, iLevel);
|
||||
px = SL_NODE_FORWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = pn;
|
||||
SL_NODE_FORWARD(pNode, iLevel) = px;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = pPrev;
|
||||
} else {
|
||||
px = SL_NODE_BACKWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(pPrev, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(px, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(pNode, iLevel) = px;
|
||||
SL_NODE_FORWARD(pNode, iLevel) = pn;
|
||||
}
|
||||
}
|
||||
|
||||
for (int8_t iLevel = level - 1; iLevel >= 0; iLevel--) {
|
||||
SMemSkipListNode *pn = pos[iLevel];
|
||||
SMemSkipListNode *px;
|
||||
|
||||
if (forward) {
|
||||
px = SL_NODE_FORWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(pn, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(px, iLevel) = pNode;
|
||||
} else {
|
||||
px = SL_NODE_BACKWARD(pn, iLevel);
|
||||
|
||||
SL_NODE_FORWARD(px, iLevel) = pNode;
|
||||
SL_NODE_BACKWARD(pn, iLevel) = pNode;
|
||||
}
|
||||
|
||||
pos[iLevel] = pNode;
|
||||
}
|
||||
|
||||
pTbData->sl.size++;
|
||||
if (pTbData->sl.level < pNode->level) {
|
||||
pTbData->sl.level = pNode->level;
|
||||
|
@ -548,7 +557,7 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i
|
|||
// forward put rest data
|
||||
row.pTSRow = tGetSubmitBlkNext(&blkIter);
|
||||
if (row.pTSRow) {
|
||||
for (int8_t iLevel = 0; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
for (int8_t iLevel = pos[0]->level; iLevel < pTbData->sl.maxLevel; iLevel++) {
|
||||
pos[iLevel] = SL_NODE_BACKWARD(pos[iLevel], iLevel);
|
||||
}
|
||||
do {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "tsdb.h"
|
||||
#include "osDef.h"
|
||||
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
|
||||
|
||||
typedef enum {
|
||||
|
@ -129,7 +130,8 @@ struct STsdbReader {
|
|||
SBlockLoadSuppInfo suppInfo;
|
||||
STsdbReadSnap* pReadSnap;
|
||||
SIOCostSummary cost;
|
||||
STSchema* pSchema;
|
||||
STSchema* pSchema;// the newest version schema
|
||||
STSchema* pMemSchema;// the previous schema for in-memory data, to avoid load schema too many times
|
||||
SDataFReader* pFileReader;
|
||||
SVersionRange verRange;
|
||||
|
||||
|
@ -145,15 +147,14 @@ static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanI
|
|||
SRowMerger* pMerger);
|
||||
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
STsdbReader* pReader);
|
||||
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow);
|
||||
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
|
||||
static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
|
||||
int32_t rowIndex);
|
||||
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
|
||||
static void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader);
|
||||
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
|
||||
|
||||
static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
STsdbReader* pReader);
|
||||
STsdbReader* pReader, bool* freeTSRow);
|
||||
static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
|
||||
STSRow** pTSRow);
|
||||
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
|
||||
|
@ -181,6 +182,7 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
|
|||
|
||||
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
pSupInfo->buildBuf[i] = taosMemoryMalloc(pCol->info.bytes);
|
||||
// tsdbInfo("-------------------%d\n", pCol->info.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -673,6 +675,7 @@ static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_
|
|||
colDataAppendNULL(pColInfoData, rowIndex);
|
||||
} else {
|
||||
varDataSetLen(pSup->buildBuf[colIndex], pColVal->value.nData);
|
||||
ASSERT(pColVal->value.nData <= pColInfoData->info.bytes);
|
||||
memcpy(varDataVal(pSup->buildBuf[colIndex]), pColVal->value.pData, pColVal->value.nData);
|
||||
colDataAppend(pColInfoData, rowIndex, pSup->buildBuf[colIndex], false);
|
||||
}
|
||||
|
@ -1210,6 +1213,51 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
|
|||
return code;
|
||||
}
|
||||
|
||||
static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key, SFileBlockDumpInfo* pDumpInfo) {
|
||||
|
||||
// opt version
|
||||
// 1. it is not a border point
|
||||
// 2. the direct next point is not an duplicated timestamp
|
||||
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
|
||||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
|
||||
int32_t step = pReader->order == TSDB_ORDER_ASC? 1:-1;
|
||||
|
||||
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
|
||||
if (nextKey != key) { // merge is not needed
|
||||
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
|
||||
pDumpInfo->rowIndex += step;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
|
||||
// always set the newest schema version in pReader->pSchema
|
||||
if (pReader->pSchema == NULL) {
|
||||
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1);
|
||||
}
|
||||
|
||||
if (sversion == pReader->pSchema->version) {
|
||||
return pReader->pSchema;
|
||||
}
|
||||
|
||||
if (pReader->pMemSchema == NULL) {
|
||||
int32_t code =
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pMemSchema);
|
||||
return pReader->pMemSchema;
|
||||
}
|
||||
|
||||
if (pReader->pMemSchema->version == sversion) {
|
||||
return pReader->pMemSchema;
|
||||
}
|
||||
|
||||
taosMemoryFree(pReader->pMemSchema);
|
||||
int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pMemSchema);
|
||||
return pReader->pMemSchema;
|
||||
}
|
||||
|
||||
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
|
||||
SIterInfo* pIter, int64_t key) {
|
||||
SRowMerger merge = {0};
|
||||
|
@ -1220,16 +1268,23 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
SArray* pDelList = pBlockScanInfo->delSkyline;
|
||||
bool freeTSRow = false;
|
||||
uint64_t uid = pBlockScanInfo->uid;
|
||||
|
||||
// ascending order traverse
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
if (key < k.ts) {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
// imem & mem are all empty, only file exist
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else if (k.ts < key) { // k.ts < key
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
} else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
|
@ -1238,32 +1293,41 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else { // descending order scan
|
||||
if (key < k.ts) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
} else if (k.ts < key) {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
} else { // descending order: mem rows -----> imem rows ------> file block
|
||||
updateSchema(pRow, pBlockScanInfo->uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, &fRow);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
freeTSRow = true;
|
||||
}
|
||||
}
|
||||
|
||||
tRowMergerClear(&merge);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1280,12 +1344,12 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
ASSERT(pRow != NULL && piRow != NULL);
|
||||
|
||||
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
|
||||
bool freeTSRow = false;
|
||||
|
||||
uint64_t uid = pBlockScanInfo->uid;
|
||||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) {
|
||||
// [1&2] key <= [k.ts && ik.ts]
|
||||
if (key <= k.ts && key <= ik.ts) {
|
||||
|
@ -1305,7 +1369,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else { // key > ik.ts || key > k.ts
|
||||
ASSERT(key != ik.ts);
|
||||
|
@ -1313,16 +1377,22 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [3] ik.ts < key <= k.ts
|
||||
// [4] ik.ts < k.ts <= key
|
||||
if (ik.ts < k.ts) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// [5] k.ts < key <= ik.ts
|
||||
// [6] k.ts < ik.ts <= key
|
||||
if (k.ts < ik.ts) {
|
||||
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1331,16 +1401,17 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
ASSERT(key > ik.ts && key > k.ts);
|
||||
|
||||
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
} else { // descending order scan
|
||||
// [1/2] k.ts >= ik.ts && k.ts >= key
|
||||
if (k.ts >= ik.ts && k.ts >= key) {
|
||||
updateSchema(pRow, uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
if (ik.ts == k.ts) {
|
||||
|
@ -1355,7 +1426,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
ASSERT(ik.ts != k.ts); // this case has been included in the previous if branch
|
||||
|
@ -1363,8 +1434,11 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
// [3] ik.ts > k.ts >= Key
|
||||
// [4] ik.ts > key >= k.ts
|
||||
if (ik.ts > key) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1376,19 +1450,22 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
//[7] key = ik.ts > k.ts
|
||||
if (key == ik.ts) {
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
|
||||
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
tRowMerge(&merge, &fRow);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
@ -1443,34 +1520,23 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
|
|||
}
|
||||
|
||||
// imem & mem are all empty, only file exist
|
||||
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
// opt version
|
||||
// 1. it is not a border point
|
||||
// 2. the direct next point is not an duplicated timestamp
|
||||
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
|
||||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
|
||||
int32_t step = pReader->order == TSDB_ORDER_ASC ? 1 : -1;
|
||||
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
|
||||
if (nextKey != key) { // merge is not needed
|
||||
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
|
||||
pDumpInfo->rowIndex += step;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
|
||||
STSRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
tRowMergerInit(&merge, &fRow, pReader->pSchema);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2083,7 +2149,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
|
|||
}
|
||||
|
||||
TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter);
|
||||
TSDBKEY key = TSDBROW_KEY(pRow);
|
||||
TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
|
||||
if (outOfTimeWindow(key.ts, &pReader->window)) {
|
||||
pIter->hasVal = false;
|
||||
return NULL;
|
||||
|
@ -2116,6 +2182,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
STsdbReader* pReader) {
|
||||
while (1) {
|
||||
|
@ -2136,22 +2203,8 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe
|
|||
break;
|
||||
}
|
||||
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pReader->pSchema == NULL || sversion != pReader->pSchema->version) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
|
||||
if (pReader->pSchema == NULL) {
|
||||
pReader->pSchema = pTSchema;
|
||||
}
|
||||
} else {
|
||||
pTSchema = pReader->pSchema;
|
||||
}
|
||||
|
||||
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, uid);
|
||||
tRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
|
||||
if (pTSchema != pReader->pSchema) {
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2259,42 +2312,50 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader) {
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
|
||||
if (pReader->pSchema == NULL) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
|
||||
} else if (pReader->pSchema->version != sversion) {
|
||||
taosMemoryFreeClear(pReader->pSchema);
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
|
||||
}
|
||||
}
|
||||
|
||||
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
|
||||
STsdbReader* pReader) {
|
||||
STsdbReader* pReader, bool* freeTSRow) {
|
||||
|
||||
TSDBROW* pNextRow = NULL;
|
||||
TSDBROW current = *pRow;
|
||||
|
||||
{ // if the timestamp of the next valid row has a different ts, return current row directly
|
||||
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
|
||||
|
||||
if (!pIter->hasVal) {
|
||||
*pTSRow = current.pTSRow;
|
||||
*freeTSRow = false;
|
||||
return;
|
||||
} else { // has next point in mem/imem
|
||||
pNextRow = getValidRow(pIter, pDelList, pReader);
|
||||
if (pNextRow == NULL) {
|
||||
*pTSRow = current.pTSRow;
|
||||
*freeTSRow = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (current.pTSRow->ts != pNextRow->pTSRow->ts) {
|
||||
*pTSRow = current.pTSRow;
|
||||
*freeTSRow = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SRowMerger merge = {0};
|
||||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
// updateSchema(pRow, uid, pReader);
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (pReader->pSchema == NULL || sversion != pReader->pSchema->version) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
|
||||
if (pReader->pSchema == NULL) {
|
||||
pReader->pSchema = pTSchema;
|
||||
}
|
||||
} else {
|
||||
pTSchema = pReader->pSchema;
|
||||
}
|
||||
// get the correct schema for data in memory
|
||||
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(¤t), pReader, uid);
|
||||
|
||||
tRowMergerInit2(&merge, pReader->pSchema, pRow, pTSchema);
|
||||
doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader);
|
||||
tRowMergerInit2(&merge, pReader->pSchema, ¤t, pTSchema);
|
||||
|
||||
STSchema* pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
|
||||
tRowMergerAdd(&merge, pNextRow, pTSchema1);
|
||||
|
||||
doMergeRowsInBuf(pIter, uid, current.pTSRow->ts, pDelList, &merge, pReader);
|
||||
tRowMergerGetRow(&merge, pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
|
||||
if (pTSchema != pReader->pSchema) {
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
*freeTSRow = true;
|
||||
}
|
||||
|
||||
void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
|
||||
|
@ -2305,17 +2366,17 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
|
|||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->order)) { // ascending order imem --> mem
|
||||
updateSchema(piRow, pBlockScanInfo->uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, piRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, piRow, pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, pRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
} else {
|
||||
updateSchema(pRow, pBlockScanInfo->uid, pReader);
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
tRowMergerInit(&merge, pRow, pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, piRow);
|
||||
|
@ -2326,7 +2387,7 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
|
|||
}
|
||||
|
||||
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow,
|
||||
int64_t endKey) {
|
||||
int64_t endKey, bool* freeTSRow) {
|
||||
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
|
||||
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
|
||||
SArray* pDelList = pBlockScanInfo->delSkyline;
|
||||
|
@ -2352,35 +2413,36 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
|
|||
TSDBKEY ik = TSDBROW_KEY(piRow);
|
||||
|
||||
if (ik.ts < k.ts) { // ik.ts < k.ts
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
} else if (k.ts < ik.ts) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
} else { // ik.ts == k.ts
|
||||
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
|
||||
*freeTSRow = true;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader);
|
||||
doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow) {
|
||||
int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid) {
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
STSchema* pSchema = pReader->pSchema;
|
||||
STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
|
||||
|
||||
SColVal colVal = {0};
|
||||
int32_t i = 0, j = 0;
|
||||
|
@ -2396,7 +2458,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
|
|||
col_id_t colId = pColInfoData->info.colId;
|
||||
|
||||
if (colId == pSchema->columns[j].colId) {
|
||||
tTSRowGetVal(pTSRow, pReader->pSchema, j, &colVal);
|
||||
tTSRowGetVal(pTSRow, pSchema, j, &colVal);
|
||||
doCopyColVal(pColInfoData, numOfRows, i, &colVal, pSupInfo);
|
||||
i += 1;
|
||||
j += 1;
|
||||
|
@ -2466,13 +2528,16 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
|
|||
|
||||
do {
|
||||
STSRow* pTSRow = NULL;
|
||||
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey);
|
||||
bool freeTSRow = false;
|
||||
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey, &freeTSRow);
|
||||
if (pTSRow == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
doAppendRowFromTSRow(pBlock, pReader, pTSRow);
|
||||
taosMemoryFree(pTSRow);
|
||||
doAppendRowFromTSRow(pBlock, pReader, pTSRow, pBlockScanInfo->uid);
|
||||
if (freeTSRow) {
|
||||
taosMemoryFree(pTSRow);
|
||||
}
|
||||
|
||||
// no data in buffer, return immediately
|
||||
if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) {
|
||||
|
@ -2631,6 +2696,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
|||
STsdbReader* pPrevReader = pReader->innerReader[0];
|
||||
SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter;
|
||||
|
||||
code = tsdbTakeReadSnap(pPrevReader->pTsdb, &pPrevReader->pReadSnap);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order,
|
||||
pPrevReader->idStr);
|
||||
resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
|
||||
|
@ -2660,7 +2730,6 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
}
|
||||
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
|
||||
tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap);
|
||||
|
||||
taosMemoryFreeClear(pSupInfo->plist);
|
||||
|
@ -2688,16 +2757,15 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
SIOCostSummary* pCost = &pReader->cost;
|
||||
|
||||
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
|
||||
" SMA-time:%.2f ms, "
|
||||
"fileBlocks:%" PRId64
|
||||
", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo "
|
||||
"size:%.2f Kb %s",
|
||||
" SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-time:%.2f ms, "
|
||||
"build in-memory-block-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
|
||||
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime,
|
||||
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock,
|
||||
numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
|
||||
|
||||
taosMemoryFree(pReader->idStr);
|
||||
taosMemoryFree(pReader->pSchema);
|
||||
taosMemoryFree(pReader->pMemSchema);
|
||||
taosMemoryFreeClear(pReader);
|
||||
}
|
||||
|
||||
|
|
|
@ -1520,22 +1520,22 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
|||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
pColAgg->sum += colVal.value.f;
|
||||
if (pColAgg->min > colVal.value.f) {
|
||||
pColAgg->min = colVal.value.f;
|
||||
*(double*)(&pColAgg->sum) += colVal.value.f;
|
||||
if (*(double*)(&pColAgg->min) > colVal.value.f) {
|
||||
*(double*)(&pColAgg->min) = colVal.value.f;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.f) {
|
||||
pColAgg->max = colVal.value.f;
|
||||
if (*(double*)(&pColAgg->max) < colVal.value.f) {
|
||||
*(double*)(&pColAgg->max) = colVal.value.f;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
pColAgg->sum += colVal.value.d;
|
||||
if (pColAgg->min > colVal.value.d) {
|
||||
pColAgg->min = colVal.value.d;
|
||||
*(double*)(&pColAgg->sum) += colVal.value.d;
|
||||
if (*(double*)(&pColAgg->min) > colVal.value.d) {
|
||||
*(double*)(&pColAgg->min) = colVal.value.d;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.d) {
|
||||
pColAgg->max = colVal.value.d;
|
||||
if (*(double*)(&pColAgg->max) < colVal.value.d) {
|
||||
*(double*)(&pColAgg->max) = colVal.value.d;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ int vnodeBegin(SVnode *pVnode) {
|
|||
|
||||
pVnode->state.commitID++;
|
||||
// begin meta
|
||||
if (metaBegin(pVnode->pMeta) < 0) {
|
||||
if (metaBegin(pVnode->pMeta, 0) < 0) {
|
||||
vError("vgId:%d, failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void
|
|||
static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
|
||||
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
|
@ -144,7 +145,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
int32_t len;
|
||||
int32_t ret;
|
||||
|
||||
vTrace("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
|
||||
vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
|
||||
version);
|
||||
|
||||
pVnode->state.applied = version;
|
||||
|
@ -190,6 +191,9 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
case TDMT_VND_DELETE:
|
||||
if (vnodeProcessDeleteReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
|
||||
break;
|
||||
case TDMT_VND_BATCH_DEL:
|
||||
if (vnodeProcessBatchDeleteReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
|
||||
break;
|
||||
/* TQ */
|
||||
case TDMT_VND_MQ_VG_CHANGE:
|
||||
if (tqProcessVgChangeReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
|
||||
|
@ -204,7 +208,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
break;
|
||||
case TDMT_VND_MQ_COMMIT_OFFSET:
|
||||
if (tqProcessOffsetCommitReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
|
||||
pMsg->contLen - sizeof(SMsgHead)) < 0) {
|
||||
pMsg->contLen - sizeof(SMsgHead), version) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
break;
|
||||
|
@ -334,14 +338,14 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
case TDMT_STREAM_TASK_DISPATCH:
|
||||
// return tqProcessTaskDispatchReq(pVnode->pTq, pMsg, pInfo->workerId != 0);
|
||||
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg, true);
|
||||
case TDMT_STREAM_TASK_RECOVER:
|
||||
return tqProcessTaskRecoverReq(pVnode->pTq, pMsg);
|
||||
/*case TDMT_STREAM_TASK_RECOVER:*/
|
||||
/*return tqProcessTaskRecoverReq(pVnode->pTq, pMsg);*/
|
||||
case TDMT_STREAM_RETRIEVE:
|
||||
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_DISPATCH_RSP:
|
||||
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_RECOVER_RSP:
|
||||
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
|
||||
/*case TDMT_STREAM_TASK_RECOVER_RSP:*/
|
||||
/*return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);*/
|
||||
case TDMT_STREAM_RETRIEVE_RSP:
|
||||
return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg);
|
||||
default:
|
||||
|
@ -1053,6 +1057,24 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
SBatchDeleteReq deleteReq;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, pReq, len);
|
||||
tDecodeSBatchDeleteReq(&decoder, &deleteReq);
|
||||
|
||||
int32_t sz = taosArrayGetSize(deleteReq.deleteReqs);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSingleDeleteReq *pOneReq = taosArrayGet(deleteReq.deleteReqs, i);
|
||||
int32_t code = tsdbDeleteTableData(pVnode->pTsdb, version, deleteReq.suid, pOneReq->uid, pOneReq->ts, pOneReq->ts);
|
||||
if (code) {
|
||||
// TODO
|
||||
}
|
||||
}
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
int32_t code = 0;
|
||||
SDecoder *pCoder = &(SDecoder){0};
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "catalogInt.h"
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
SCtgDebug gCTGDebug = {.lockEnable = true};
|
||||
SCtgDebug gCTGDebug = {0};
|
||||
|
||||
void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
|
||||
ASSERT(*(int32_t*)param == 1);
|
||||
|
|
|
@ -883,6 +883,32 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName
|
|||
CTG_RET(code);
|
||||
}
|
||||
|
||||
int32_t ctgHashValueComp(void const *lp, void const *rp) {
|
||||
uint32_t *key = (uint32_t *)lp;
|
||||
SVgroupInfo *pVg = *(SVgroupInfo **)rp;
|
||||
|
||||
if (*key < pVg->hashBegin) {
|
||||
return -1;
|
||||
} else if (*key > pVg->hashEnd) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ctgVgInfoComp(const void* lp, const void* rp) {
|
||||
SVgroupInfo *pLeft = *(SVgroupInfo **)lp;
|
||||
SVgroupInfo *pRight = *(SVgroupInfo **)rp;
|
||||
if (pLeft->hashBegin < pRight->hashBegin) {
|
||||
return -1;
|
||||
} else if (pLeft->hashBegin > pRight->hashBegin) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo *dbInfo, SCtgTbHashsCtx *pCtx, char* dbFName, SArray* pNames, bool update) {
|
||||
int32_t code = 0;
|
||||
SCtgTask* pTask = tReq->pTask;
|
||||
|
@ -923,9 +949,19 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
|
|||
}
|
||||
}
|
||||
|
||||
taosHashCancelIterate(dbInfo->vgHash, pIter);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SArray* pVgList = taosArrayInit(vgNum, POINTER_BYTES);
|
||||
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
|
||||
while (pIter) {
|
||||
taosArrayPush(pVgList, &pIter);
|
||||
pIter = taosHashIterate(dbInfo->vgHash, pIter);
|
||||
}
|
||||
|
||||
taosArraySort(pVgList, ctgVgInfoComp);
|
||||
|
||||
char tbFullName[TSDB_TABLE_FNAME_LEN];
|
||||
sprintf(tbFullName, "%s.", dbFName);
|
||||
int32_t offset = strlen(tbFullName);
|
||||
|
@ -940,25 +976,20 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
|
|||
|
||||
uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen);
|
||||
|
||||
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
|
||||
while (pIter) {
|
||||
vgInfo = pIter;
|
||||
if (hashValue >= vgInfo->hashBegin && hashValue <= vgInfo->hashEnd) {
|
||||
taosHashCancelIterate(dbInfo->vgHash, pIter);
|
||||
break;
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(dbInfo->vgHash, pIter);
|
||||
vgInfo = NULL;
|
||||
}
|
||||
SVgroupInfo **p = taosArraySearch(pVgList, &hashValue, ctgHashValueComp, TD_EQ);
|
||||
|
||||
if (NULL == vgInfo) {
|
||||
if (NULL == p) {
|
||||
ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, dbFName, taosHashGetSize(dbInfo->vgHash));
|
||||
ASSERT(0);
|
||||
taosArrayDestroy(pVgList);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
vgInfo = *p;
|
||||
|
||||
SVgroupInfo* pNewVg = taosMemoryMalloc(sizeof(SVgroupInfo));
|
||||
if (NULL == pNewVg) {
|
||||
taosArrayDestroy(pVgList);
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
|
@ -977,6 +1008,8 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
|
|||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(pVgList);
|
||||
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,12 +43,14 @@ extern "C" {
|
|||
#define EXPLAIN_GROUP_SORT_FORMAT "Group Sort"
|
||||
#define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s"
|
||||
#define EXPLAIN_MERGE_INTERVAL_FORMAT "Merge Interval on Column %s"
|
||||
#define EXPLAIN_MERGE_ALIGNED_INTERVAL_FORMAT "Merge Aligned Interval on Column %s"
|
||||
#define EXPLAIN_FILL_FORMAT "Fill"
|
||||
#define EXPLAIN_SESSION_FORMAT "Session"
|
||||
#define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s"
|
||||
#define EXPLAIN_PARITION_FORMAT "Partition on Column %s"
|
||||
#define EXPLAIN_ORDER_FORMAT "Order: %s"
|
||||
#define EXPLAIN_FILTER_FORMAT "Filter: "
|
||||
#define EXPLAIN_MERGEBLOCKS_FORMAT "Merge ResBlocks: %s"
|
||||
#define EXPLAIN_FILL_VALUE_FORMAT "Fill Values: "
|
||||
#define EXPLAIN_ON_CONDITIONS_FORMAT "Join Cond: "
|
||||
#define EXPLAIN_TIMERANGE_FORMAT "Time Range: [%" PRId64 ", %" PRId64 "]"
|
||||
|
@ -56,9 +58,11 @@ extern "C" {
|
|||
#define EXPLAIN_TIME_WINDOWS_FORMAT "Time Window: interval=%" PRId64 "%c offset=%" PRId64 "%c sliding=%" PRId64 "%c"
|
||||
#define EXPLAIN_WINDOW_FORMAT "Window: gap=%" PRId64
|
||||
#define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f"
|
||||
#define EXPLAIN_MERGE_FORMAT "Merge"
|
||||
#define EXPLAIN_MERGE_FORMAT "SortMerge"
|
||||
#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: "
|
||||
#define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s"
|
||||
#define EXPLAIN_PARTITION_KETS_FORMAT "Partition Key: "
|
||||
#define EXPLAIN_INTERP_FORMAT "Interp"
|
||||
|
||||
#define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms"
|
||||
#define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms"
|
||||
|
@ -69,6 +73,7 @@ extern "C" {
|
|||
#define EXPLAIN_LEFT_PARENTHESIS_FORMAT " ("
|
||||
#define EXPLAIN_RIGHT_PARENTHESIS_FORMAT ")"
|
||||
#define EXPLAIN_BLANK_FORMAT " "
|
||||
#define EXPLAIN_COMMA_FORMAT ", "
|
||||
#define EXPLAIN_COST_FORMAT "cost=%.2f..%.2f"
|
||||
#define EXPLAIN_ROWS_FORMAT "rows=%" PRIu64
|
||||
#define EXPLAIN_COLUMNS_FORMAT "columns=%d"
|
||||
|
@ -86,6 +91,7 @@ extern "C" {
|
|||
#define EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT "output_order=%s"
|
||||
#define EXPLAIN_OFFSET_FORMAT "offset=%d"
|
||||
#define EXPLAIN_SOFFSET_FORMAT "soffset=%d"
|
||||
#define EXPLAIN_PARTITIONS_FORMAT "partitions=%d"
|
||||
|
||||
#define COMMAND_RESET_LOG "resetLog"
|
||||
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"
|
||||
|
|
|
@ -535,6 +535,13 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (NULL != pTblScanNode->pGroupTags) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_PARTITION_KETS_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PARTITIONS_FORMAT, pTblScanNode->pGroupTags->length);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
|
||||
if (pTblScanNode->scan.node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pTblScanNode->scan.node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
|
@ -617,13 +624,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pPrjNode->mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (pPrjNode->node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pPrjNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -707,7 +718,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pAggNode->mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -906,13 +921,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pIntNode->window.mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: {
|
||||
SMergeAlignedIntervalPhysiNode *pIntNode = (SMergeAlignedIntervalPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk));
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_ALIGNED_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
|
@ -950,7 +969,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
}
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pIntNode->window.mergeDataBlock? "True":"False");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1122,6 +1145,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_PARTITION_KETS_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PARTITIONS_FORMAT, pPartNode->pPartitionKeys->length);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (pPartNode->node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pPartNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
|
@ -1194,11 +1222,20 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
|
||||
if (pMergeNode->groupSort) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, "_group_id asc");
|
||||
if (LIST_LENGTH(pMergeNode->pMergeKeys) > 0) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_COMMA_FORMAT);
|
||||
}
|
||||
}
|
||||
for (int32_t i = 0; i < LIST_LENGTH(pMergeNode->pMergeKeys); ++i) {
|
||||
SOrderByExprNode *ptn = (SOrderByExprNode *)nodesListGetNode(pMergeNode->pMergeKeys, i);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, nodesGetNameFromColumnNode(ptn->pExpr));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, EXPLAIN_ORDER_STRING(ptn->order));
|
||||
if (i != LIST_LENGTH(pMergeNode->pMergeKeys) - 1) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_COMMA_FORMAT);
|
||||
}
|
||||
}
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
@ -1410,7 +1447,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: {
|
||||
SInterpFuncPhysiNode *pInterpNode = (SInterpFuncPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT);
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERP_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
|
|
|
@ -52,13 +52,6 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
|
|||
|
||||
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0)
|
||||
|
||||
#define START_TS_COLUMN_INDEX 0
|
||||
#define END_TS_COLUMN_INDEX 1
|
||||
#define UID_COLUMN_INDEX 2
|
||||
#define GROUPID_COLUMN_INDEX 3
|
||||
#define CALCULATE_START_TS_COLUMN_INDEX 4
|
||||
#define CALCULATE_END_TS_COLUMN_INDEX 5
|
||||
|
||||
enum {
|
||||
// when this task starts to execute, this status will set
|
||||
TASK_NOT_COMPLETED = 0x1u,
|
||||
|
@ -266,7 +259,11 @@ typedef struct SExchangeInfo {
|
|||
SArray* pSourceDataInfo;
|
||||
tsem_t ready;
|
||||
void* pTransporter;
|
||||
SSDataBlock* pResult;
|
||||
// SArray<SSDataBlock*>, result block list, used to keep the multi-block that
|
||||
// passed by downstream operator
|
||||
SArray* pResultBlockList;
|
||||
int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
|
||||
SSDataBlock* pDummyBlock; // dummy block, not keep data
|
||||
bool seqLoadData; // sequential load data or not, false by default
|
||||
int32_t current;
|
||||
SLoadRemoteDataInfo loadInfo;
|
||||
|
@ -520,6 +517,7 @@ typedef struct SBlockDistInfo {
|
|||
typedef struct SOptrBasicInfo {
|
||||
SResultRowInfo resultRowInfo;
|
||||
SSDataBlock* pRes;
|
||||
bool mergeResultBlock;
|
||||
} SOptrBasicInfo;
|
||||
|
||||
typedef struct SIntervalAggOperatorInfo {
|
||||
|
@ -554,8 +552,6 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo {
|
|||
uint64_t groupId; // current groupId
|
||||
int64_t curTs; // current ts
|
||||
SSDataBlock* prefetchedBlock;
|
||||
bool inputBlocksFinished;
|
||||
|
||||
SNode* pCondition;
|
||||
} SMergeAlignedIntervalAggOperatorInfo;
|
||||
|
||||
|
@ -594,7 +590,6 @@ typedef struct SAggOperatorInfo {
|
|||
uint64_t groupId;
|
||||
SGroupResInfo groupResInfo;
|
||||
SExprSupp scalarExprSup;
|
||||
|
||||
SNode *pCondition;
|
||||
} SAggOperatorInfo;
|
||||
|
||||
|
@ -699,6 +694,7 @@ typedef struct SSessionAggOperatorInfo {
|
|||
typedef struct SResultWindowInfo {
|
||||
SResultRowPosition pos;
|
||||
STimeWindow win;
|
||||
uint64_t groupId;
|
||||
bool isOutput;
|
||||
bool isClosed;
|
||||
} SResultWindowInfo;
|
||||
|
@ -739,6 +735,7 @@ typedef struct STimeSliceOperatorInfo {
|
|||
SArray* pPrevRow; // SArray<SGroupValue>
|
||||
SArray* pNextRow; // SArray<SGroupValue>
|
||||
SArray* pLinearInfo; // SArray<SFillLinearInfo>
|
||||
bool fillLastPoint;
|
||||
bool isPrevRowSet;
|
||||
bool isNextRowSet;
|
||||
int32_t fillType; // fill type
|
||||
|
@ -860,9 +857,10 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLi
|
|||
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
|
||||
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order);
|
||||
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
|
||||
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
|
||||
SArray* pColList);
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList, char** pNextStart);
|
||||
void updateLoadRemoteInfo(SLoadRemoteDataInfo *pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
|
||||
SOperatorInfo* pOperator);
|
||||
|
||||
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
|
||||
|
||||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
|
||||
|
@ -897,7 +895,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
|
|||
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
|
||||
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
|
||||
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo);
|
||||
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
|
||||
|
@ -912,14 +910,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
|
|||
|
||||
SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
bool mergeResultBlock, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
SNode* pCondition, SExecTaskInfo* pTaskInfo);
|
||||
SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
||||
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
|
||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
|
||||
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||
STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
|
||||
|
@ -1011,9 +1009,8 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
|
|||
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
|
||||
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
|
||||
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
|
||||
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
|
||||
TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted);
|
||||
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
|
||||
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
|
||||
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
|
||||
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
|
||||
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid);
|
||||
|
|
|
@ -37,7 +37,6 @@ typedef struct SFillLinearInfo {
|
|||
SPoint start;
|
||||
SPoint end;
|
||||
bool hasNull;
|
||||
bool fillLastPoint;
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
} SFillLinearInfo;
|
||||
|
|
|
@ -50,7 +50,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
|
|||
|
||||
STableListInfo* pTableList = &pTaskInfo->tableqinfoList;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 1024);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
|
||||
pInfo->pUidList = taosArrayInit(4, sizeof(int64_t));
|
||||
|
||||
|
|
|
@ -69,10 +69,10 @@ static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) {
|
|||
|
||||
// clang-format off
|
||||
// data format:
|
||||
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// |SDataCacheEntry | total length | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
|
||||
// | |sizeof(int32) |sizeof(uint64_t) |(sizeof(int16_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
|
||||
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// |SDataCacheEntry | version | total length | numOfRows | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
|
||||
// | | sizeof(int32_t) |sizeof(int32) | sizeof(int32)| sizeof(uint64_t) | (sizeof(int8_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
|
||||
// +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
|
||||
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
|
||||
// recorded in the first segment, next to the struct header
|
||||
// clang-format on
|
||||
|
|
|
@ -213,6 +213,10 @@ int32_t dataBlockToSubmit(SDataInserterHandle* pInserter, SSubmitReq** pReq) {
|
|||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
|
||||
}
|
||||
}
|
||||
if(!fullCol) {
|
||||
rb.hasNone = true;
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
|
||||
if (ignoreRow) {
|
||||
continue;
|
||||
|
|
|
@ -348,12 +348,14 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
taosThreadOnce(&initPoolOnce, initRefPool);
|
||||
atexit(cleanupRefPool);
|
||||
|
||||
qDebug("start to create subplan task, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
|
||||
|
||||
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 1000, .maxDataBlockNumPerQuery = 100};
|
||||
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000};
|
||||
code = dsDataSinkMgtInit(&cfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
@ -372,6 +374,8 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("subplan task create completed, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
|
||||
|
||||
_error:
|
||||
// if failed to add ref for all tables in this query, abort current query
|
||||
return code;
|
||||
|
@ -422,6 +426,80 @@ int waitMoment(SQInfo* pQInfo) {
|
|||
}
|
||||
#endif
|
||||
|
||||
static void freeBlock(void* param) {
|
||||
SSDataBlock* pBlock = *(SSDataBlock**) param;
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
||||
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
int64_t threadId = taosGetSelfPthreadId();
|
||||
|
||||
taosArrayClearEx(pResList, freeBlock);
|
||||
|
||||
int64_t curOwner = 0;
|
||||
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
|
||||
qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner);
|
||||
pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC;
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
if (pTaskInfo->cost.start == 0) {
|
||||
pTaskInfo->cost.start = taosGetTimestampMs();
|
||||
}
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// error occurs, record the error code and return to client
|
||||
int32_t ret = setjmp(pTaskInfo->env);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
pTaskInfo->code = ret;
|
||||
cleanUpUdfs();
|
||||
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
|
||||
|
||||
int32_t current = 0;
|
||||
SSDataBlock* pRes = NULL;
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
while((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) {
|
||||
SSDataBlock* p = createOneDataBlock(pRes, true);
|
||||
current += p->info.rows;
|
||||
ASSERT(p->info.rows > 0);
|
||||
taosArrayPush(pResList, &p);
|
||||
|
||||
if (current >= 4096) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t el = (taosGetTimestampUs() - st);
|
||||
|
||||
pTaskInfo->cost.elapsedTime += el;
|
||||
if (NULL == pRes) {
|
||||
*useconds = pTaskInfo->cost.elapsedTime;
|
||||
}
|
||||
|
||||
cleanUpUdfs();
|
||||
uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
|
||||
|
||||
qDebug("%s task suspended, %d rows in %d blocks returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
|
||||
GET_TASKID(pTaskInfo), current, (int32_t) taosArrayGetSize(pResList), total, 0, el / 1000.0);
|
||||
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
int64_t threadId = taosGetSelfPthreadId();
|
||||
|
|
|
@ -88,7 +88,7 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
|
|||
|
||||
static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes);
|
||||
|
||||
static void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
|
||||
static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
|
||||
|
||||
static void releaseQueryBuf(size_t numOfTables);
|
||||
|
||||
|
@ -136,9 +136,8 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
|
|||
|
||||
void operatorDummyCloseFn(void* param, int32_t numOfCols) {}
|
||||
|
||||
static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo,
|
||||
SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset,
|
||||
SqlFunctionCtx* pCtx, int32_t numOfExprs);
|
||||
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo);
|
||||
|
||||
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
|
||||
static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
|
||||
|
@ -434,7 +433,8 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SqlFunctionCtx* pC
|
|||
for (int32_t i = 0; i < pOperator->exprSupp.numOfExprs; ++i) {
|
||||
pCtx[i].order = order;
|
||||
pCtx[i].input.numOfRows = pBlock->info.rows;
|
||||
setBlockStatisInfo(&pCtx[i], &pOperator->exprSupp.pExprInfo[i], pBlock);
|
||||
setBlockSMAInfo(&pCtx[i], &pOperator->exprSupp.pExprInfo[i], pBlock);
|
||||
pCtx[i].pSrcBlock = pBlock;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -799,7 +799,7 @@ static int32_t doCreateConstantValColumnAggInfo(SInputColumnInfoData* pInput, SF
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pBlock) {
|
||||
void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pBlock) {
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
|
@ -1012,16 +1012,6 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
|
|||
// }
|
||||
//}
|
||||
|
||||
// static FORCE_INLINE bool doFilterByBlockStatistics(STaskRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis,
|
||||
// SqlFunctionCtx *pCtx, int32_t numOfRows) {
|
||||
// STaskAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
//
|
||||
// if (pDataStatis == NULL || pQueryAttr->pFilters == NULL) {
|
||||
// return true;
|
||||
// }
|
||||
//
|
||||
// return filterRangeExecute(pQueryAttr->pFilters, pDataStatis, pQueryAttr->numOfCols, numOfRows);
|
||||
// }
|
||||
#if 0
|
||||
static bool overlapWithTimeWindow(STaskAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) {
|
||||
STimeWindow w = {0};
|
||||
|
@ -1216,7 +1206,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
|
|||
}
|
||||
|
||||
// current block has been discard due to filter applied
|
||||
// if (!doFilterByBlockStatistics(pRuntimeEnv, pBlock->pBlockAgg, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
// if (!doFilterByBlockSMA(pRuntimeEnv, pBlock->pBlockAgg, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
// pCost->skipBlocks += 1;
|
||||
// qDebug("QInfo:0x%"PRIx64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
|
||||
// pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
|
@ -1287,8 +1277,12 @@ void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
|
|||
}
|
||||
|
||||
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
|
||||
bool init = false;
|
||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||
pCtx[i].resultInfo = getResultEntryInfo(pResult, i, rowEntryInfoOffset);
|
||||
if (init) {
|
||||
continue;
|
||||
}
|
||||
|
||||
struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo;
|
||||
if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) {
|
||||
|
@ -1305,6 +1299,8 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
|
|||
} else {
|
||||
pResInfo->initialized = true;
|
||||
}
|
||||
} else {
|
||||
init = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1512,19 +1508,24 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset, SqlFunctionCtx* pCtx,
|
||||
int32_t numOfExprs) {
|
||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
int32_t start = pGroupResInfo->index;
|
||||
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
|
||||
SGroupResInfo* pGroupResInfo) {
|
||||
SExprInfo* pExprInfo = pSup->pExprInfo;
|
||||
int32_t numOfExprs = pSup->numOfExprs;
|
||||
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
|
||||
SqlFunctionCtx* pCtx = pSup->pCtx;
|
||||
|
||||
for (int32_t i = start; i < numOfRows; i += 1) {
|
||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
|
||||
for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
|
||||
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
|
||||
|
||||
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
|
||||
|
||||
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
|
||||
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
|
||||
|
||||
// no results, continue to check the next one
|
||||
if (pRow->numOfRows == 0) {
|
||||
pGroupResInfo->index += 1;
|
||||
releaseBufPage(pBuf, page);
|
||||
|
@ -1542,6 +1543,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
}
|
||||
|
||||
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
|
||||
ASSERT(pBlock->info.rows > 0);
|
||||
releaseBufPage(pBuf, page);
|
||||
break;
|
||||
}
|
||||
|
@ -1551,7 +1553,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
for (int32_t j = 0; j < numOfExprs; ++j) {
|
||||
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
|
||||
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
|
||||
if (pCtx[j].fpSet.finalize) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("\npage_finalize %d", numOfExprs);
|
||||
|
@ -1584,26 +1586,19 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
|
||||
releaseBufPage(pBuf, page);
|
||||
pBlock->info.rows += pRow->numOfRows;
|
||||
// if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
|
||||
// break;
|
||||
// }
|
||||
}
|
||||
|
||||
qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows,
|
||||
pBlock->info.groupId);
|
||||
|
||||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
|
||||
SDiskbasedBuf* pBuf) {
|
||||
SExprInfo* pExprInfo = pOperator->exprSupp.pExprInfo;
|
||||
int32_t numOfExprs = pOperator->exprSupp.numOfExprs;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
int32_t* rowCellOffset = pOperator->exprSupp.rowEntryInfoOffset;
|
||||
SSDataBlock* pBlock = pbInfo->pRes;
|
||||
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
||||
SSDataBlock* pBlock = pbInfo->pRes;
|
||||
|
||||
// set output datablock version
|
||||
pBlock->info.version = pTaskInfo->version;
|
||||
|
@ -1615,30 +1610,22 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
|
|||
|
||||
// clear the existed group id
|
||||
pBlock->info.groupId = 0;
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, rowCellOffset, pCtx, numOfExprs);
|
||||
}
|
||||
|
||||
static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo,
|
||||
int32_t* rowEntryInfoOffset) {
|
||||
// update the number of result for each, only update the number of rows for the corresponding window result.
|
||||
// if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
|
||||
// return;
|
||||
// }
|
||||
#if 0
|
||||
for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
|
||||
SResultRow* pResult = pResultRowInfo->pResult[i];
|
||||
|
||||
for (int32_t j = 0; j < numOfOutput; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) {
|
||||
continue;
|
||||
if (!pbInfo->mergeResultBlock) {
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
|
||||
} else {
|
||||
while(hasRemainResults(pGroupResInfo)) {
|
||||
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
|
||||
if (pBlock->info.rows >= pOperator->resultInfo.threshold) {
|
||||
break;
|
||||
}
|
||||
|
||||
SResultRowEntryInfo* pCell = getResultEntryInfo(pResult, j, rowEntryInfoOffset);
|
||||
pResult->numOfRows = (uint16_t)(TMAX(pResult->numOfRows, pCell->numOfRes));
|
||||
// clearing group id to continue to merge data that belong to different groups
|
||||
pBlock->info.groupId = 0;
|
||||
}
|
||||
|
||||
// clear the group id info in SSDataBlock, since the client does not need it
|
||||
pBlock->info.groupId = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t compressQueryColData(SColumnInfoData* pColRes, int32_t numOfRows, char* data, int8_t compressed) {
|
||||
|
@ -1962,6 +1949,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pWrapper->exchangeId);
|
||||
if (pExchangeInfo == NULL) {
|
||||
qWarn("failed to acquire exchange operator, since it may have been released");
|
||||
taosMemoryFree(pMsg->pData);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1976,10 +1964,13 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
pRsp->compLen = htonl(pRsp->compLen);
|
||||
pRsp->numOfCols = htonl(pRsp->numOfCols);
|
||||
pRsp->useconds = htobe64(pRsp->useconds);
|
||||
pRsp->numOfBlocks = htonl(pRsp->numOfBlocks);
|
||||
|
||||
ASSERT(pRsp != NULL);
|
||||
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
|
||||
qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfBlocks,
|
||||
pRsp->numOfRows);
|
||||
} else {
|
||||
taosMemoryFree(pMsg->pData);
|
||||
pSourceDataInfo->code = code;
|
||||
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
|
||||
}
|
||||
|
@ -2062,13 +2053,19 @@ static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
|
||||
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
|
||||
SArray* pColList) {
|
||||
void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
|
||||
SOperatorInfo* pOperator) {
|
||||
pInfo->totalRows += numOfRows;
|
||||
pInfo->totalSize += dataLen;
|
||||
pInfo->totalElapsed += (taosGetTimestampUs() - startTs);
|
||||
pOperator->resultInfo.totalRows += numOfRows;
|
||||
}
|
||||
|
||||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList,
|
||||
char** pNextStart) {
|
||||
if (pColList == NULL) { // data from other sources
|
||||
blockDataCleanup(pRes);
|
||||
// blockDataEnsureCapacity(pRes, numOfRows);
|
||||
blockDecode(pRes, numOfOutput, numOfRows, pData);
|
||||
*pNextStart = (char*) blockDecode(pRes, pData);
|
||||
} else { // extract data according to pColList
|
||||
ASSERT(numOfOutput == taosArrayGetSize(pColList));
|
||||
char* pStart = pData;
|
||||
|
@ -2092,28 +2089,17 @@ int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLo
|
|||
blockDataAppendColInfo(pBlock, &idata);
|
||||
}
|
||||
|
||||
blockDecode(pBlock, numOfCols, numOfRows, pStart);
|
||||
blockDataEnsureCapacity(pRes, numOfRows);
|
||||
blockDecode(pBlock, pStart);
|
||||
blockDataEnsureCapacity(pRes, pBlock->info.rows);
|
||||
|
||||
// data from mnode
|
||||
pRes->info.rows = numOfRows;
|
||||
pRes->info.rows = pBlock->info.rows;
|
||||
relocateColumnData(pRes, pColList, pBlock->pDataBlock, false);
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
||||
// todo move this to time window aggregator, since the primary timestamp may not be known by exchange operator.
|
||||
blockDataUpdateTsWindow(pRes, 0);
|
||||
|
||||
int64_t el = taosGetTimestampUs() - startTs;
|
||||
|
||||
pLoadInfo->totalRows += numOfRows;
|
||||
pLoadInfo->totalSize += compLen;
|
||||
|
||||
if (total != NULL) {
|
||||
*total += numOfRows;
|
||||
}
|
||||
|
||||
pLoadInfo->totalElapsed += el;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2135,8 +2121,8 @@ static void* setAllSourcesCompleted(SOperatorInfo* pOperator, int64_t startTs) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
int32_t code = 0;
|
||||
int64_t startTs = taosGetTimestampUs();
|
||||
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
|
||||
|
@ -2162,7 +2148,6 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
SRetrieveTableRsp* pRsp = pDataInfo->pRsp;
|
||||
SDownstreamSourceNode* pSource = taosArrayGet(pExchangeInfo->pSources, i);
|
||||
|
||||
SSDataBlock* pRes = pExchangeInfo->pResult;
|
||||
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
|
||||
if (pRsp->numOfRows == 0) {
|
||||
qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64
|
||||
|
@ -2175,29 +2160,35 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
continue;
|
||||
}
|
||||
|
||||
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
|
||||
code =
|
||||
extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
|
||||
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
|
||||
if (code != 0) {
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
goto _error;
|
||||
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
|
||||
int32_t index = 0;
|
||||
char* pStart = pRetrieveRsp->data;
|
||||
while(index++ < pRetrieveRsp->numOfBlocks) {
|
||||
SSDataBlock* pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false);
|
||||
code = extractDataBlockFromFetchRsp(pb, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
|
||||
if (code != 0) {
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
goto _error;
|
||||
}
|
||||
|
||||
taosArrayPush(pExchangeInfo->pResultBlockList, &pb);
|
||||
}
|
||||
|
||||
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
|
||||
|
||||
if (pRsp->completed == 1) {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64
|
||||
" execId:%d"
|
||||
" index:%d completed, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64
|
||||
", completed:%d try next %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRes->info.rows,
|
||||
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, completed + 1, i + 1, totalSources);
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d"
|
||||
" index:%d completed, blocks:%d, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", total:%.2f Kb,"
|
||||
" completed:%d try next %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRsp->numOfBlocks,
|
||||
pRsp->numOfRows, pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize / 1024.0,
|
||||
completed + 1, i + 1, totalSources);
|
||||
completed += 1;
|
||||
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
|
||||
} else {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64
|
||||
", totalBytes:%" PRIu64,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
|
||||
pLoadInfo->totalRows, pLoadInfo->totalSize);
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d blocks:%d, numOfRows:%d, totalRows:%" PRIu64
|
||||
", total:%.2f Kb", GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId,
|
||||
pRsp->numOfBlocks, pRsp->numOfRows, pLoadInfo->totalRows, pLoadInfo->totalSize/1024.0);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
|
@ -2211,11 +2202,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
}
|
||||
}
|
||||
|
||||
return pExchangeInfo->pResult;
|
||||
return;
|
||||
}
|
||||
|
||||
if (completed == totalSources) {
|
||||
return setAllSourcesCompleted(pOperator, startTs);
|
||||
setAllSourcesCompleted(pOperator, startTs);
|
||||
return;
|
||||
}
|
||||
|
||||
sched_yield();
|
||||
|
@ -2223,7 +2215,6 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
|||
|
||||
_error:
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator) {
|
||||
|
@ -2253,7 +2244,7 @@ static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
static int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
SExchangeInfo* pExchangeInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
|
@ -2262,7 +2253,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
|
||||
while (1) {
|
||||
if (pExchangeInfo->current >= totalSources) {
|
||||
return setAllSourcesCompleted(pOperator, startTs);
|
||||
setAllSourcesCompleted(pOperator, startTs);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
doSendFetchDataRequest(pExchangeInfo, pTaskInfo, pExchangeInfo->current);
|
||||
|
@ -2275,7 +2267,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo),
|
||||
pSource->addr.nodeId, pSource->taskId, pSource->execId, tstrerror(pDataInfo->code));
|
||||
pOperator->pTaskInfo->code = pDataInfo->code;
|
||||
return NULL;
|
||||
return pOperator->pTaskInfo->code;
|
||||
}
|
||||
|
||||
SRetrieveTableRsp* pRsp = pDataInfo->pRsp;
|
||||
|
@ -2292,16 +2284,15 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SSDataBlock* pRes = pExchangeInfo->pResult;
|
||||
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
|
||||
int32_t code =
|
||||
extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
|
||||
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
|
||||
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
|
||||
|
||||
char* pStart = pRetrieveRsp->data;
|
||||
int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
|
||||
|
||||
if (pRsp->completed == 1) {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, rowsOfSource:%" PRIu64
|
||||
", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
|
||||
pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1,
|
||||
totalSources);
|
||||
|
||||
|
@ -2310,13 +2301,15 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
} else {
|
||||
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64
|
||||
", totalBytes:%" PRIu64,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRetrieveRsp->numOfRows,
|
||||
pLoadInfo->totalRows, pLoadInfo->totalSize);
|
||||
}
|
||||
|
||||
pOperator->resultInfo.totalRows += pRes->info.rows;
|
||||
updateLoadRemoteInfo(pLoadInfo, pRetrieveRsp->numOfRows, pRetrieveRsp->compLen, startTs, pOperator);
|
||||
pDataInfo->totalRows += pRetrieveRsp->numOfRows;
|
||||
|
||||
taosMemoryFreeClear(pDataInfo->pRsp);
|
||||
return pExchangeInfo->pResult;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2340,6 +2333,11 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void freeBlock(void* pParam) {
|
||||
SSDataBlock* pBlock = *(SSDataBlock**)pParam;
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
||||
static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
|
||||
SExchangeInfo* pExchangeInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -2349,9 +2347,9 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
|
||||
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
|
||||
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
|
||||
|
||||
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
qDebug("%s all %" PRIzu " source(s) are exhausted, total rows:%" PRIu64 " bytes:%" PRIu64 ", elapsed:%.2f ms",
|
||||
GET_TASKID(pTaskInfo), totalSources, pLoadInfo->totalRows, pLoadInfo->totalSize,
|
||||
|
@ -2359,11 +2357,23 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (pExchangeInfo->seqLoadData) {
|
||||
return seqLoadRemoteData(pOperator);
|
||||
} else {
|
||||
return concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo);
|
||||
size_t size = taosArrayGetSize(pExchangeInfo->pResultBlockList);
|
||||
if (size == 0 || pExchangeInfo->rspBlockIndex >= size) {
|
||||
pExchangeInfo->rspBlockIndex = 0;
|
||||
taosArrayClearEx(pExchangeInfo->pResultBlockList, freeBlock);
|
||||
if (pExchangeInfo->seqLoadData) {
|
||||
seqLoadRemoteData(pOperator);
|
||||
} else {
|
||||
concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo);
|
||||
}
|
||||
|
||||
if (taosArrayGetSize(pExchangeInfo->pResultBlockList) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// we have buffered retrieved datablock, return it directly
|
||||
return taosArrayGetP(pExchangeInfo->pResultBlockList, pExchangeInfo->rspBlockIndex++);
|
||||
}
|
||||
|
||||
static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
|
@ -2380,26 +2390,24 @@ static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ASSERT(pBlock == pExchangeInfo->pResult);
|
||||
|
||||
SLimitInfo* pLimitInfo = &pExchangeInfo->limitInfo;
|
||||
if (hasLimitOffsetInfo(pLimitInfo)) {
|
||||
int32_t status = handleLimitOffset(pOperator, pLimitInfo, pExchangeInfo->pResult, false);
|
||||
int32_t status = handleLimitOffset(pOperator, pLimitInfo, pBlock, false);
|
||||
if (status == PROJECT_RETRIEVE_CONTINUE) {
|
||||
continue;
|
||||
} else if (status == PROJECT_RETRIEVE_DONE) {
|
||||
size_t rows = pExchangeInfo->pResult->info.rows;
|
||||
size_t rows = pBlock->info.rows;
|
||||
pExchangeInfo->limitInfo.numOfOutputRows += rows;
|
||||
|
||||
if (rows == 0) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
} else {
|
||||
return pExchangeInfo->pResult;
|
||||
return pBlock;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return pExchangeInfo->pResult;
|
||||
return pBlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2462,16 +2470,18 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode
|
|||
}
|
||||
|
||||
tsem_init(&pInfo->ready, 0, 0);
|
||||
pInfo->pDummyBlock = createResDataBlock(pExNode->node.pOutputDataBlockDesc);
|
||||
pInfo->pResultBlockList = taosArrayInit(1, POINTER_BYTES);
|
||||
|
||||
pInfo->seqLoadData = false;
|
||||
pInfo->pTransporter = pTransporter;
|
||||
pInfo->pResult = createResDataBlock(pExNode->node.pOutputDataBlockDesc);
|
||||
|
||||
pOperator->name = "ExchangeOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_EXCHANGE;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pResult->pDataBlock);
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pDummyBlock->pDataBlock);
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
|
||||
|
@ -2990,6 +3000,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
size_t rows = blockDataGetNumOfRows(pInfo->pRes);
|
||||
pOperator->resultInfo.totalRows += rows;
|
||||
|
||||
|
@ -3413,6 +3424,7 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
|
|||
}
|
||||
int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("Create agg result buf failed since %s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -3432,7 +3444,11 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
|
|||
return code;
|
||||
}
|
||||
|
||||
doInitAggInfoSup(pAggSup, pSup->pCtx, numOfCols, keyBufSize, pkey);
|
||||
code = doInitAggInfoSup(pAggSup, pSup->pCtx, numOfCols, keyBufSize, pkey);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
|
||||
}
|
||||
|
@ -3498,17 +3514,16 @@ void cleanupExprSupp(SExprSupp* pSupp) {
|
|||
|
||||
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||
SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
|
||||
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) {
|
||||
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo) {
|
||||
SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 1024;
|
||||
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
@ -3520,6 +3535,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->binfo.mergeResultBlock = mergeResult;
|
||||
pInfo->groupId = UINT64_MAX;
|
||||
pInfo->pCondition = pCondition;
|
||||
pOperator->name = "TableAggregate";
|
||||
|
@ -3601,12 +3617,15 @@ void doDestroyExchangeOperatorInfo(void* param) {
|
|||
|
||||
taosArrayDestroy(pExInfo->pSources);
|
||||
taosArrayDestroy(pExInfo->pSourceDataInfo);
|
||||
if (pExInfo->pResult != NULL) {
|
||||
pExInfo->pResult = blockDataDestroy(pExInfo->pResult);
|
||||
|
||||
if (pExInfo->pResultBlockList != NULL) {
|
||||
taosArrayDestroyEx(pExInfo->pResultBlockList, freeBlock);
|
||||
pExInfo->pResultBlockList = NULL;
|
||||
}
|
||||
|
||||
tsem_destroy(&pExInfo->ready);
|
||||
blockDataDestroy(pExInfo->pDummyBlock);
|
||||
|
||||
tsem_destroy(&pExInfo->ready);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -4071,7 +4090,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
|
||||
|
||||
if (pOperator != NULL) {
|
||||
pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
|
||||
}
|
||||
|
||||
return pOperator;
|
||||
}
|
||||
|
||||
|
@ -4110,7 +4133,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
|
||||
} else {
|
||||
pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pAggNode->node.pConditions,
|
||||
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
|
||||
pScalarExprInfo, numOfScalarExpr, pAggNode->mergeDataBlock, pTaskInfo);
|
||||
}
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
|
||||
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
||||
|
@ -4152,7 +4175,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
|
||||
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
|
||||
pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
|
||||
pPhyNode->pConditions, pTaskInfo);
|
||||
pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
|
||||
SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
|
||||
|
||||
|
@ -4167,7 +4190,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
.precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
|
||||
|
||||
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
|
||||
pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, pTaskInfo);
|
||||
pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
|
||||
int32_t children = 0;
|
||||
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue