Merge branch '3.0' into FIX/TD-24182-3.0
This commit is contained in:
commit
3689b41f3a
16
README-CN.md
16
README-CN.md
|
@ -15,7 +15,7 @@
|
|||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
|
||||
简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
||||
|
||||
# TDengine 简介
|
||||
|
||||
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
|||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
|
||||
```
|
||||
|
||||
#### 为 taos-tools 安装编译需要的软件
|
||||
|
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
|||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
### CentOS 8 & Fedora
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
|
||||
```
|
||||
|
||||
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
||||
|
@ -88,7 +88,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
|
@ -101,7 +101,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
|
|||
|
||||
若 powertools 安装失败,可以尝试改用:
|
||||
```
|
||||
sudo yum config-manager --set-enabled Powertools
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
#### CentOS + devtoolset
|
||||
|
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
|
|||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig
|
||||
brew install argp-standalone pkgconfig geos
|
||||
```
|
||||
|
||||
### 设置 golang 开发环境
|
||||
|
|
12
README.md
12
README.md
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
|||
### Ubuntu 18.04 and above or Debian
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools
|
||||
|
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
|||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
### CentOS 8 & Fedora
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools on CentOS
|
||||
|
@ -94,7 +94,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
|
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
|
|||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig
|
||||
brew install argp-standalone pkgconfig geos
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
|
|
|
@ -123,8 +123,8 @@ ELSE ()
|
|||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
# disable all assert
|
||||
|
|
|
@ -64,12 +64,25 @@ IF(${TD_WINDOWS})
|
|||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build geos Win32")
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build geos on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
IF(${BUILD_TEST})
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build geos on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.4.1")
|
||||
SET(TD_VER_NUMBER "3.0.5.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
# geos
|
||||
ExternalProject_Add(geos
|
||||
GIT_REPOSITORY https://github.com/libgeos/geos.git
|
||||
GIT_TAG 3.11.2
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/geos"
|
||||
BINARY_DIR ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -2,6 +2,7 @@
|
|||
# stub
|
||||
ExternalProject_Add(stub
|
||||
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
||||
GIT_TAG 5e903b8e
|
||||
GIT_SUBMODULES "src"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 565ca21
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 4378702
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -134,6 +134,11 @@ if(${BUILD_ADDR2LINE})
|
|||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
# geos
|
||||
if(${BUILD_GEOS})
|
||||
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# download dependencies
|
||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
|
@ -226,11 +231,16 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
if(${TD_LINUX})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
endif(${TD_LINUX})
|
||||
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||
endif(${TD_WINDOWS})
|
||||
|
@ -243,7 +253,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
endif(${TD_DARWIN})
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
option(WITH_JNI "" ON)
|
||||
option(WITH_JNI "" OFF)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
|
@ -255,7 +265,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
option(WITH_FALLOCATE "" OFF)
|
||||
option(WITH_JEMALLOC "" OFF)
|
||||
option(WITH_GFLAGS "" OFF)
|
||||
option(PORTABLE "" ON)
|
||||
option(PORTABLE "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
option(FAIL_ON_WARNINGS OFF)
|
||||
|
||||
|
@ -263,8 +273,11 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||
option(WITH_TOOLS "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
|
||||
IF (TD_LINUX)
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
ELSE()
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
ENDIF()
|
||||
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
rocksdb
|
||||
|
@ -470,6 +483,15 @@ if(${BUILD_ADDR2LINE})
|
|||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
# geos
|
||||
if(${BUILD_GEOS})
|
||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
geos_c
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||
)
|
||||
endif(${BUILD_GEOS})
|
||||
|
||||
# ================================================================================================
|
||||
# Build test
|
||||
|
|
|
@ -4,7 +4,7 @@ if(${BUILD_DOCS})
|
|||
find_package(Doxygen)
|
||||
if (DOXYGEN_FOUND)
|
||||
# Build the doc
|
||||
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/Doxyfile.in)
|
||||
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/doxgen/Doxyfile.in)
|
||||
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
|
||||
|
||||
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||
|
|
|
@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -105,6 +105,12 @@ class Consumer:
|
|||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def assignment(self):
|
||||
pass
|
||||
|
||||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ User-defined functions can be scalar functions or aggregate functions. Scalar fu
|
|||
|
||||
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
|
||||
|
||||
## Implement a UDF in C
|
||||
## Implement a UDF in C
|
||||
|
||||
When you create a user-defined function, you must implement standard interface functions:
|
||||
- For scalar functions, implement the `scalarfn` interface function.
|
||||
|
@ -111,13 +111,13 @@ Interface functions return a value that indicates whether the operation was succ
|
|||
For information about the parameters for interface functions, see Data Model
|
||||
|
||||
#### Scalar Interface
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
|
||||
|
||||
The parameters in the function are defined as follows:
|
||||
- inputDataBlock: The data block to input.
|
||||
- resultColumn: The column to output. The column to output.
|
||||
- resultColumn: The column to output. The column to output.
|
||||
|
||||
#### Aggregate Interface
|
||||
|
||||
|
@ -197,7 +197,7 @@ The data structure is described as follows:
|
|||
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
|
||||
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
|
||||
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
|
||||
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
|
||||
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
|
||||
- SUdfInterBuf defines the intermediate structure `buffer` and the number of results in the buffer `numOfResult`.
|
||||
|
||||
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
|
||||
|
@ -270,29 +270,95 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
|||
|
||||
## Implement a UDF in Python
|
||||
|
||||
### Prepare Environment
|
||||
|
||||
1. Prepare Python Environment
|
||||
|
||||
Please follow standard procedure of python environment preparation.
|
||||
|
||||
2. Install Python package `taospyudf`
|
||||
|
||||
```shell
|
||||
pip3 install taospyudf
|
||||
```
|
||||
|
||||
During this process, some C++ code needs to be compiled. So it's required to have `cmake` and `gcc` on your system. The compiled `libtaospyudf.so` will be automatically copied to `/usr/local/lib` path. If you are not root user, please use `sudo`. After installation is done, please check using the command below.
|
||||
|
||||
```shell
|
||||
root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
|
||||
-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
|
||||
```
|
||||
|
||||
Then execute the command below.
|
||||
|
||||
```shell
|
||||
ldconfig
|
||||
```
|
||||
|
||||
3. If you want to utilize some 3rd party python packages in your Python UDF, please set configuration parameter `UdfdLdLibPath` to the value of `PYTHONPATH` before starting `taosd`.
|
||||
|
||||
4. Launch `taosd` service
|
||||
|
||||
Please refer to [Get Started](../../get-started)
|
||||
|
||||
### Interface definition
|
||||
|
||||
#### Introduction to Interface
|
||||
|
||||
Implement the specified interface functions when implementing a UDF in Python.
|
||||
- implement `process` function for the scalar UDF.
|
||||
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
||||
- implement `init` for initialization and `destroy` for termination.
|
||||
|
||||
### Implement a Scalar UDF in Python
|
||||
#### Scalar UDF Interface
|
||||
|
||||
The implementation of a scalar UDF is described as follows:
|
||||
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
|
||||
Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
|
||||
|
||||
#### Aggregate UDF Interface
|
||||
|
||||
The implementation of an aggregate function is described as follows:
|
||||
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
Description: first the start() is invoked to generate the initial result `buffer`; then the input data is divided into multiple row blocks, and reduce() is invoked for each block `inputs` and current intermediate result `buf`; finally finish() is invoked to generate the final result from intermediate `buf`, the final result can only contains 0 or 1 data.
|
||||
|
||||
#### Initialization and Cleanup Interface
|
||||
|
||||
```python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
|
||||
Description: init() does the work of initialization before processing any data; destroy() does the work of cleanup after the data is processed.
|
||||
|
||||
### Python UDF Template
|
||||
|
||||
#### Scalar Template
|
||||
|
||||
```Python
|
||||
def init():
|
||||
# initialization
|
||||
def destroy():
|
||||
# destroy
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
# process input datablock,
|
||||
# process input datablock,
|
||||
# datablock.data(row, col) is to access the python object in location(row,col)
|
||||
# return tuple object consisted of object of type outputtype
|
||||
# return tuple object consisted of object of type outputtype
|
||||
```
|
||||
|
||||
### Implement an Aggregate UDF in Python
|
||||
Note:process() must be implemeted, init() and destroy() must be defined too but they can do nothing.
|
||||
|
||||
The implementation of an aggregate function is described as follows:
|
||||
#### Aggregate Template
|
||||
|
||||
```Python
|
||||
def init():
|
||||
|
@ -303,41 +369,15 @@ def start() -> bytes:
|
|||
#return serialize(init_state)
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
# deserialize buf to state
|
||||
# reduce the inputs and state into new_state.
|
||||
# use inputs.data(i,j) to access python ojbect of location(i,j)
|
||||
# reduce the inputs and state into new_state.
|
||||
# use inputs.data(i,j) to access python object of location(i,j)
|
||||
# serialize new_state into new_state_bytes
|
||||
return new_state_bytes
|
||||
return new_state_bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
#return obj of type outputtype
|
||||
#return obj of type outputtype
|
||||
```
|
||||
|
||||
### Python UDF Interface Definition
|
||||
|
||||
#### Scalar interface
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
- `input` is a data block two-dimension matrix-like object, of which method `data(row, col)` returns the Python object located at location (`row`, `col`)
|
||||
- return a Python tuple object, of which each item is a Python object of type `output_type`
|
||||
|
||||
#### Aggregate Interface
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(input: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
- first `start()` is called to return the initial result in type `bytes`
|
||||
- then the input data are divided into multiple data blocks and for each block `input`, `reduce` is called with the data block `input` and the current result `buf` bytes and generates a new intermediate result buffer.
|
||||
- finally, the `finish` function is called on the intermediate result `buf` and outputs 0 or 1 data of type `output_type`
|
||||
|
||||
|
||||
#### Initialization and Cleanup Interface
|
||||
```Python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
Implement `init` for initialization and `destroy` for termination.
|
||||
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
|
||||
|
||||
### Data Mapping between TDengine SQL and Python UDF
|
||||
|
||||
|
@ -353,15 +393,463 @@ The following table describes the mapping between TDengine SQL data type and Pyt
|
|||
|TIMESTAMP | int |
|
||||
|JSON and other types | Not Supported |
|
||||
|
||||
### Installing Python UDF
|
||||
1. Install Python package `taospyudf` that executes Python UDF
|
||||
```bash
|
||||
sudo pip install taospyudf
|
||||
ldconfig
|
||||
### Development Guide
|
||||
|
||||
In this section we will demonstrate 5 examples of developing UDF in Python language. In this guide, you will learn the development skills from easy case to hard case, the examples include:
|
||||
1. A scalar function which accepts only one integer as input and outputs ln(n^2 + 1)。
|
||||
2. A scalar function which accepts n integers, like(x1, x2, ..., xn)and output the sum of the product of each input and its sequence number, i.e. x1 + 2 * x2 + ... + n * xn。
|
||||
3. A scalar function which accepts a timestamp and output the next closest Sunday of the timestamp. In this case, we will demonstrate how to use 3rd party library `moment`.
|
||||
4. An aggregate function which calculates the difference between the maximum and the minimum of a specific column, i.e. same functionality of built-in spread().
|
||||
|
||||
In the guide, some debugging skills of using Python UDF will be explained too.
|
||||
|
||||
We assume you are using Linux system and already have TDengine 3.0.4.0+ and Python 3.x.
|
||||
|
||||
Note:**You can't use print() function to output log inside a UDF, you have to write the log to a specific file or use logging module of Python.**
|
||||
|
||||
#### Sample 1: Simplest UDF
|
||||
|
||||
This scalar UDF accepts an integer as input and output ln(n^2 + 1).
|
||||
|
||||
Firstly, please compose a Python source code file in your system and save it, e.g. `/root/udf/myfun.py`, the code is like below.
|
||||
|
||||
```python
|
||||
from math import log
|
||||
|
||||
def init():
|
||||
pass
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
def process(block):
|
||||
rows, _ = block.shape()
|
||||
return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||
```
|
||||
2. If PYTHONPATH is needed to find Python packages when the Python UDF executes, include the PYTHONPATH contents into the udfdLdLibPath variable of the taos.cfg configuration file
|
||||
|
||||
### Python UDF Sample Code
|
||||
|
||||
This program consists of 3 functions, init() and destroy() do nothing, but they have to be defined even though there is nothing to do in them because they are critical parts of a python UDF. The most important function is process(), which accepts a data block and the data block object has two methods:
|
||||
1. shape() returns the number of rows and the number of columns of the data block
|
||||
2. data(i, j) returns the value at (i,j) in the block
|
||||
|
||||
The output of the process() function of a scalar UDF returns exactly same number of data as the number of input rows. We will ignore the number of columns because we just want to compute on the first column.
|
||||
|
||||
Then, we create the UDF using the SQL command below.
|
||||
|
||||
```sql
|
||||
create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
|
||||
```
|
||||
|
||||
Here is the output example, it may change a little depending on your version being used.
|
||||
|
||||
```shell
|
||||
taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||
Create OK, 0 row(s) affected (0.005202s)
|
||||
```
|
||||
|
||||
Then, we used the `show` command to prove the creation of the UDF is successful.
|
||||
|
||||
```text
|
||||
taos> show functions;
|
||||
name |
|
||||
=================================
|
||||
myfun |
|
||||
Query OK, 1 row(s) in set (0.005767s)
|
||||
```
|
||||
|
||||
Next, we can try to test the function. Before executing the UDF, we need to prepare some data using the command below in TDengine CLI.
|
||||
|
||||
```sql
|
||||
create database test;
|
||||
create table t(ts timestamp, v1 int, v2 int, v3 int);
|
||||
insert into t values('2023-05-01 12:13:14', 1, 2, 3);
|
||||
insert into t values('2023-05-03 08:09:10', 2, 3, 4);
|
||||
insert into t values('2023-05-10 07:06:05', 3, 4, 5);
|
||||
```
|
||||
|
||||
Execute the UDF to test it:
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
|
||||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
Unfortunately, the UDF execution failed. We need to check the log `udfd` daemon to find out why.
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
```
|
||||
|
||||
Below is the output.
|
||||
|
||||
```text
|
||||
05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
|
||||
05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
|
||||
```
|
||||
|
||||
From the error message we can find out that `libtaospyudf.so` was not loaded successfully. Please refer to the [Prepare Environment] section.
|
||||
|
||||
After correcting environment issues, execute the UDF:
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1) from t;
|
||||
myfun(v1) |
|
||||
============================
|
||||
0.693147181 |
|
||||
1.609437912 |
|
||||
2.302585093 |
|
||||
```
|
||||
|
||||
Now, we have finished the first PDF in Python, and learned some basic debugging skills.
|
||||
|
||||
#### Sample 2: Abnormal Processing
|
||||
|
||||
The `myfun` UDF example in sample 1 has passed, but it has two drawbacks.
|
||||
|
||||
1. It the program accepts only one column of data as input, but it doesn't throw exception if you passes multiple columns.
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
myfun(v1, v2) |
|
||||
============================
|
||||
0.693147181 |
|
||||
1.609437912 |
|
||||
2.302585093 |
|
||||
```
|
||||
|
||||
2. `null` value is not processed. We expect the program to throw exception and terminate if `null` is passed as input.
|
||||
|
||||
So, we try to optimize the process() function as below.
|
||||
|
||||
```python
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
if cols > 1:
|
||||
raise Exception(f"require 1 parameter but given {cols}")
|
||||
return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||
```
|
||||
|
||||
The update the UDF with command below.
|
||||
|
||||
```sql
|
||||
create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||
```
|
||||
|
||||
At this time, if we pass two arguments to `myfun`, the execution would fail.
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
|
||||
DB error: udf function execution failure (0.014643s)
|
||||
```
|
||||
|
||||
However, the exception is not shown to end user, but displayed in the log file `/var/log/taos/taospyudf.log`
|
||||
|
||||
```text
|
||||
2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
|
||||
|
||||
At:
|
||||
/var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
|
||||
|
||||
```
|
||||
|
||||
Now, we have learned how to update a UDF and check the log of a UDF.
|
||||
|
||||
Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart `taosd` service. After 3.0.5.0, restarting is not required.
|
||||
|
||||
#### Sample 3: UDF with n arguments
|
||||
|
||||
A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
|
||||
|
||||
```python
|
||||
def init():
|
||||
pass
|
||||
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
result = []
|
||||
for i in range(rows):
|
||||
total = 0
|
||||
for j in range(cols):
|
||||
v = block.data(i, j)
|
||||
if v is None:
|
||||
total = None
|
||||
break
|
||||
total += (j + 1) * block.data(i, j)
|
||||
result.append(total)
|
||||
return result
|
||||
```
|
||||
|
||||
Crate and test the UDF:
|
||||
|
||||
```sql
|
||||
create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
|
||||
```
|
||||
|
||||
```sql
|
||||
taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
|
||||
Insert OK, 1 row(s) affected (0.003675s)
|
||||
|
||||
taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
|
||||
ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
|
||||
================================================================================================
|
||||
2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
|
||||
2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
|
||||
2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
|
||||
2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
|
||||
Query OK, 4 row(s) in set (0.010653s)
|
||||
```
|
||||
|
||||
#### Sample 4: Utilize 3rd party package
|
||||
|
||||
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty.
|
||||
|
||||
```shell
|
||||
pip3 install moment
|
||||
```
|
||||
|
||||
Then compose the Python code in /root/udf/nextsunday.py
|
||||
|
||||
```python
|
||||
import moment
|
||||
|
||||
|
||||
def init():
|
||||
pass
|
||||
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
if cols > 1:
|
||||
raise Exception("require only 1 parameter")
|
||||
if not type(block.data(0, 0)) is int:
|
||||
raise Exception("type error")
|
||||
return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
|
||||
for i in range(rows)]
|
||||
```
|
||||
|
||||
UDF framework will map the TDengine timestamp to Python int type, so this function only accepts an integer representing millisecond. process() firstly validates the parameters, then use `moment` to replace the time, format the result and output.
|
||||
|
||||
Create and test the UDF.
|
||||
|
||||
```sql
|
||||
create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
|
||||
```
|
||||
|
||||
If your `taosd` is started using `systemd`, you may encounter the error below. Next we will show how to debug.
|
||||
|
||||
```sql
|
||||
taos> select ts, nextsunday(ts) from t;
|
||||
|
||||
DB error: udf function execution failure (1.123615s)
|
||||
```
|
||||
|
||||
```shell
|
||||
tail -20 taospyudf.log
|
||||
2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
|
||||
```
|
||||
|
||||
This is because `moment` doesn't exist in the default library search path of python UDF, please check the log file `taosdpyudf.log`.
|
||||
|
||||
```shell
|
||||
grep 'sys path' taospyudf.log | tail -1
|
||||
```
|
||||
|
||||
```text
|
||||
2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
|
||||
```
|
||||
|
||||
You may find that the default library search path is `/lib/python3/dist-packages` (just for example, it may be different in your system), but `moment` is installed to `/usr/local/lib/python3.8/dist-packages` (for example, it may be different in your system). Then we change the library search path of python UDF.
|
||||
|
||||
Check `sys.path`, which must include the packages you install with pip3 command previously, as shown below:
|
||||
|
||||
```python
|
||||
>>> import sys
|
||||
>>> ":".join(sys.path)
|
||||
'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
|
||||
```
|
||||
|
||||
Copy the output and edit /var/taos/taos.cfg to add below configuration parameter.
|
||||
|
||||
```shell
|
||||
UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
|
||||
```
|
||||
|
||||
Save it, then restart `taosd`, using `systemctl restart taosd`, and test again, it will succeed this time.
|
||||
|
||||
Note: If your cluster consists of multiple `taosd` instances, you have to repeat same process for each of them.
|
||||
|
||||
```sql
|
||||
taos> select ts, nextsunday(ts) from t;
|
||||
ts | nextsunday(ts) |
|
||||
===========================================
|
||||
2023-05-01 12:13:14.000 | 2023-05-07 |
|
||||
2023-05-03 08:09:10.000 | 2023-05-07 |
|
||||
2023-05-10 07:06:05.000 | 2023-05-14 |
|
||||
2023-05-25 09:09:15.000 | 2023-05-28 |
|
||||
Query OK, 4 row(s) in set (1.011474s)
|
||||
```
|
||||
|
||||
#### Sample 5: Aggregate Function
|
||||
|
||||
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`.
|
||||
|
||||
```python
|
||||
import io
|
||||
import math
|
||||
import pickle
|
||||
|
||||
LOG_FILE: io.TextIOBase = None
|
||||
|
||||
|
||||
def init():
|
||||
global LOG_FILE
|
||||
LOG_FILE = open("/var/log/taos/spread.log", "wt")
|
||||
log("init function myspead success")
|
||||
|
||||
|
||||
def log(o):
|
||||
LOG_FILE.write(str(o) + '\n')
|
||||
|
||||
|
||||
def destroy():
|
||||
log("close log file: spread.log")
|
||||
LOG_FILE.close()
|
||||
|
||||
|
||||
def start():
|
||||
return pickle.dumps((-math.inf, math.inf))
|
||||
|
||||
|
||||
def reduce(block, buf):
|
||||
max_number, min_number = pickle.loads(buf)
|
||||
log(f"initial max_number={max_number}, min_number={min_number}")
|
||||
rows, _ = block.shape()
|
||||
for i in range(rows):
|
||||
v = block.data(i, 0)
|
||||
if v > max_number:
|
||||
log(f"max_number={v}")
|
||||
max_number = v
|
||||
if v < min_number:
|
||||
log(f"min_number={v}")
|
||||
min_number = v
|
||||
return pickle.dumps((max_number, min_number))
|
||||
|
||||
|
||||
def finish(buf):
|
||||
max_number, min_number = pickle.loads(buf)
|
||||
return max_number - min_number
|
||||
```
|
||||
|
||||
In this example, we implemented an aggregate function, and added some logging.
|
||||
1. init() opens a file for logging
|
||||
2. log() is the function for logging, it converts the input object to string and output with an end of line
|
||||
3. destroy() closes the log file \
|
||||
4. start() returns the initial buffer for storing the intermediate result
|
||||
5. reduce() processes each daa block and aggregates the result
|
||||
6. finish() converts the final buffer() to final result\
|
||||
|
||||
Create the UDF.
|
||||
|
||||
```sql
|
||||
create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
|
||||
```
|
||||
|
||||
This SQL command has two important different points from the command creating scalar UDF.
|
||||
1. keyword `aggregate` is used
|
||||
2. keyword `bufsize` is used to specify the memory size for storing the intermediate result. In this example, the result is 32 bytes, but we specified 128 bytes for `bufsize`. You can use the `python` CLI to print actual size.
|
||||
|
||||
```python
|
||||
>>> len(pickle.dumps((12345.6789, 23456789.9877)))
|
||||
32
|
||||
```
|
||||
|
||||
Test this function, you can see the result is same as built-in spread() function. \
|
||||
|
||||
```sql
|
||||
taos> select myspread(v1) from t;
|
||||
myspread(v1) |
|
||||
============================
|
||||
5.000000000 |
|
||||
Query OK, 1 row(s) in set (0.013486s)
|
||||
|
||||
taos> select spread(v1) from t;
|
||||
spread(v1) |
|
||||
============================
|
||||
5.000000000 |
|
||||
Query OK, 1 row(s) in set (0.005501s)
|
||||
```
|
||||
|
||||
At last, check the log file, we can see that the reduce() function is executed 3 times, max value is updated 3 times and min value is updated only one time.
|
||||
|
||||
```shell
|
||||
root@slave11 /var/log/taos $ cat spread.log
|
||||
init function myspead success
|
||||
initial max_number=-inf, min_number=inf
|
||||
max_number=1
|
||||
min_number=1
|
||||
initial max_number=1, min_number=1
|
||||
max_number=2
|
||||
max_number=3
|
||||
initial max_number=3, min_number=1
|
||||
max_number=6
|
||||
close log file: spread.log
|
||||
```
|
||||
|
||||
### SQL Commands
|
||||
|
||||
1. Create Scalar UDF
|
||||
|
||||
```sql
|
||||
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
2. Create Aggregate UDF
|
||||
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
3. Update Scalar UDF
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
4. Update Aggregate UDF
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
Note: If keyword `AGGREGATE` used, the UDF will be treated as aggregate UDF despite what it was before; Similarly, if there is no keyword `aggregate`, the UDF will be treated as scalar function despite what it was before.
|
||||
|
||||
5. Show the UDF
|
||||
|
||||
The version of a UDF is increased by one every time it's updated.
|
||||
|
||||
```sql
|
||||
select * from ins_functions \G;
|
||||
```
|
||||
|
||||
6. Show and Drop existing UDF
|
||||
|
||||
```sql
|
||||
SHOW functions;
|
||||
DROP FUNCTION function_name;
|
||||
```
|
||||
|
||||
### More Python UDF Samples
|
||||
|
||||
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
||||
|
||||
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
|
||||
|
@ -377,7 +865,7 @@ The `pybitand` function implements bitwise addition for multiple columns. If the
|
|||
|
||||
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
||||
|
||||
The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
|
||||
The `pyl2norm` function finds the second-order norm for all data in the input columns. This squares the values, takes a cumulative sum, and finds the square root.
|
||||
<details>
|
||||
<summary>pyl2norm.py</summary>
|
||||
|
||||
|
@ -387,5 +875,16 @@ The `pyl2norm` function finds the second-order norm for all data in the input co
|
|||
|
||||
</details>
|
||||
|
||||
#### Aggregate Function [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
|
||||
|
||||
The `pycumsum` function finds the cumulative sum for all data in the input columns.
|
||||
<details>
|
||||
<summary>pycumsum.py</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/pycumsum.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
## Manage and Use UDF
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||
|
|
|
@ -42,11 +42,10 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
|||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
|
||||
:::note
|
||||
|
||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
||||
:::
|
||||
|
|
|
@ -45,7 +45,7 @@ table_option: {
|
|||
|
||||
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
||||
2. The maximum length of the table name is 192 bytes.
|
||||
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
||||
3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
||||
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
||||
5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
|
||||
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
|||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
|
|
@ -889,9 +889,10 @@ ignore_null_values: {
|
|||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||
|
@ -902,7 +903,7 @@ ignore_null_values: {
|
|||
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
|
||||
|
||||
```sql
|
||||
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
||||
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
||||
```
|
||||
|
||||
### LAST
|
||||
|
@ -1008,8 +1009,7 @@ SAMPLE(expr, k)
|
|||
|
||||
**More explanations**:
|
||||
|
||||
This function cannot be used in expression calculation.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
- This function cannot be used in expression calculation.
|
||||
|
||||
|
||||
### TAIL
|
||||
|
@ -1088,7 +1088,6 @@ CSUM(expr)
|
|||
|
||||
- Arithmetic operation can't be performed on the result of `csum` function
|
||||
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
||||
### DERIVATIVE
|
||||
|
@ -1112,7 +1111,6 @@ ignore_negative: {
|
|||
|
||||
**More explanation**:
|
||||
|
||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
||||
|
||||
### DIFF
|
||||
|
@ -1175,7 +1173,6 @@ MAVG(expr, k)
|
|||
|
||||
- Arithmetic operation can't be performed on the result of `MAVG`.
|
||||
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
||||
### STATECOUNT
|
||||
|
@ -1201,7 +1198,6 @@ STATECOUNT(expr, oper, val)
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
|
||||
- Can't be used with window operation, like interval/state_window/session_window
|
||||
|
||||
|
||||
|
@ -1229,7 +1225,6 @@ STATEDURATION(expr, oper, val, unit)
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
|
||||
- Can't be used with window operation, like interval/state_window/session_window
|
||||
|
||||
|
||||
|
@ -1247,7 +1242,6 @@ TWA(expr)
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
|
||||
|
||||
|
||||
## System Information Functions
|
||||
|
|
|
@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
|||
|
||||
- Maximum length of database name is 64 bytes
|
||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
||||
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- The maximum length of a column name is 64 bytes.
|
||||
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
||||
- The maximum length of a tag name is 64 bytes
|
||||
|
|
|
@ -32,25 +32,22 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
|
|||
Native connections are supported on the same platforms as the TDengine client driver.
|
||||
REST connection supports all platforms that can run Java.
|
||||
|
||||
## Version support
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
|
||||
## Recent update logs
|
||||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
|
||||
| 3.2.0 | This version has been deprecated |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
| 2.0.37 | Support json tags |
|
||||
| 2.0.36 | Support schemaless writing |
|
||||
| taos-jdbcdriver version | major changes | TDengine version |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||
| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
|
||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
||||
| 3.2.0 | This version has been deprecated | - |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
||||
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection | - |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
||||
| 2.0.37 | Support json tags | - |
|
||||
| 2.0.36 | Support schemaless writing | - |
|
||||
|
||||
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||
|
||||
|
@ -102,6 +99,8 @@ For specific error codes, please refer to.
|
|||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||
| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||
|
@ -117,8 +116,8 @@ For specific error codes, please refer to.
|
|||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
|
||||
| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -169,7 +168,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>3.2.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -913,14 +912,15 @@ public class SchemalessWsTest {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
init(connection);
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
init(connection);
|
||||
|
||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
System.exit(0);
|
||||
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
|
@ -959,6 +959,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
|
|||
|
||||
```java
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||
|
@ -966,12 +967,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
|
|||
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||
```
|
||||
|
||||
- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
|
||||
- enable.auto.commit: Specifies whether to commit automatically.
|
||||
- group.id: consumer: Specifies the group that the consumer is in.
|
||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
@ -988,6 +991,17 @@ while(true) {
|
|||
|
||||
`poll` obtains one message each time it is run.
|
||||
|
||||
#### Assignment subscription Offset
|
||||
|
||||
```
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
```
|
||||
|
||||
#### Close subscriptions
|
||||
|
||||
```java
|
||||
|
@ -1015,10 +1029,20 @@ public abstract class ConsumerLoop {
|
|||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("td.connect.type", "jni");
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1090,12 +1114,19 @@ public abstract class ConsumerLoop {
|
|||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.type", "ws");
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group2");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1236,6 +1267,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
|
||||
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
|
||||
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
|
||||
- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
|
||||
|
||||
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
|
|||
|
||||
## Version support
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
|
||||
|
||||
## Supported features
|
||||
|
||||
|
@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
|||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
|
||||
|
||||
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
|||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
|
||||
|
||||
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -476,7 +494,7 @@ Unsubscribe.
|
|||
|
||||
Close consumer.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
|
||||
|
||||
### parameter binding via WebSocket
|
||||
|
||||
|
@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
|
|||
|
||||
Closes the parameter binding.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
|
||||
|
||||
## API Reference
|
||||
|
||||
|
|
|
@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
|
|||
Native connections are supported on the same platforms as the TDengine client driver.
|
||||
Websocket connections are supported on all platforms that can run Go.
|
||||
|
||||
## Version support
|
||||
## Version history
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
| connector-rust version | TDengine version | major features |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
|
||||
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
|
||||
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
|
||||
| v0.6.0 | 3.0.0.0 | Base features. |
|
||||
|
||||
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
||||
|
||||
|
@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
|
|||
}
|
||||
```
|
||||
|
||||
Get assignments:
|
||||
|
||||
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
let assignments = consumer.assignments().await.unwrap();
|
||||
```
|
||||
|
||||
Seek offset:
|
||||
|
||||
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||
```
|
||||
|
||||
Unsubscribe:
|
||||
|
||||
```rust
|
||||
|
@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
|
|||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||
|
||||
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
||||
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||
|
||||
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
||||
|
||||
|
|
|
@ -362,7 +362,7 @@ By using the optional req_id parameter, you can specify a request ID that can be
|
|||
|
||||
##### TaosConnection class
|
||||
|
||||
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
|
||||
As the way to connect introduced above but add `req_id` argument.
|
||||
|
||||
```python title="execute method"
|
||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
|
||||
|
@ -372,13 +372,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect
|
|||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
|
||||
```
|
||||
|
||||
:::tip
|
||||
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
|
||||
:::
|
||||
|
||||
##### Use of TaosResult class
|
||||
|
||||
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
|
||||
As the way to fetch data introduced above but add `req_id` argument.
|
||||
|
||||
```python title="blocks_iter method"
|
||||
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
|
||||
|
@ -391,17 +387,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
|
|||
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
|
||||
```
|
||||
|
||||
:::note
|
||||
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
||||
##### Use of TaosRestCursor class
|
||||
|
||||
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
||||
As the way to connect introduced above but add `req_id` argument.
|
||||
|
||||
```python title="Use of TaosRestCursor"
|
||||
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
|
||||
|
@ -421,8 +412,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
|
|||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
As the way to connect introduced above but add `req_id` argument.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
|
||||
```
|
||||
|
@ -459,6 +453,170 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Subscription
|
||||
|
||||
Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
The `consumer` in the connector contains the subscription api.
|
||||
|
||||
#### Create Consumer
|
||||
|
||||
The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
#### Subscribe topics
|
||||
|
||||
The `subscribe` function is used to subscribe to a list of topics.
|
||||
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
#### Consume
|
||||
|
||||
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
```
|
||||
|
||||
#### assignment
|
||||
|
||||
The `assignment` function is used to get the assignment of the topic.
|
||||
|
||||
```python
|
||||
assignments = consumer.assignment()
|
||||
```
|
||||
|
||||
#### Seek
|
||||
|
||||
The `seek` function is used to reset the assignment of the topic.
|
||||
|
||||
```python
|
||||
tp = TopicPartition(topic='topic1', partition=0, offset=0)
|
||||
consumer.seek(tp)
|
||||
```
|
||||
|
||||
#### After consuming data
|
||||
|
||||
You should unsubscribe to the topics and close the consumer after consuming.
|
||||
|
||||
```python
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
#### Tmq subscription example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_example.py}}
|
||||
```
|
||||
|
||||
#### assignment and seek example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
In addition to native connections, the connector also supports subscriptions via websockets.
|
||||
|
||||
#### Create Consumer
|
||||
|
||||
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
|
||||
|
||||
```python
|
||||
import taosws
|
||||
|
||||
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
```
|
||||
|
||||
#### subscribe topics
|
||||
|
||||
The `subscribe` function is used to subscribe to a list of topics.
|
||||
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
#### Consume
|
||||
|
||||
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(timeout=1.0)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
for block in message:
|
||||
for row in block:
|
||||
print(row)
|
||||
```
|
||||
|
||||
#### assignment
|
||||
|
||||
The `assignment` function is used to get the assignment of the topic.
|
||||
|
||||
```python
|
||||
assignments = consumer.assignment()
|
||||
```
|
||||
|
||||
#### Seek
|
||||
|
||||
The `seek` function is used to reset the assignment of the topic.
|
||||
|
||||
```python
|
||||
consumer.seek(topic='topic1', partition=0, offset=0)
|
||||
```
|
||||
|
||||
#### After consuming data
|
||||
|
||||
You should unsubscribe to the topics and close the consumer after consuming.
|
||||
|
||||
```python
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
#### Subscription example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||
```
|
||||
|
||||
#### Assignment and seek example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Schemaless Insert
|
||||
|
||||
Connector support schemaless insert.
|
||||
|
@ -513,7 +671,8 @@ Insert with req_id argument
|
|||
|
||||
| Example program links | Example program content |
|
||||
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
|
||||
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
|
||||
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
|
||||
bind multiple rows at once |
|
||||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
|
||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
|
||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
|
||||
|
|
|
@ -62,7 +62,7 @@ The different database framework specifications for various programming language
|
|||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Attribute | Description |
|
||||
| ------------- | ---------------------------------------------- |
|
||||
| Applicable | Client/Server |
|
||||
| Meaning | The maximum waiting time to get avaliable conn |
|
||||
| Meaning | The maximum waiting time to get available conn |
|
||||
| Value Range | 10-50000000(ms) |
|
||||
| Default Value | 500000 |
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
:::
|
||||
|
||||
## Time resolution recognition
|
||||
|
|
|
@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
|
||||

|
||||
|
||||
## What is Confluent?
|
||||
|
||||
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
|
||||
|
||||
1. Schema Registry
|
||||
2. REST Proxy
|
||||
3. Non-Java Clients
|
||||
4. Many packaged Kafka Connect plugins
|
||||
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
||||
|
||||
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
||||

|
||||
|
||||
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git is installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||
|
||||
## Install Confluent
|
||||
|
||||
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
|
||||
## Install Kafka
|
||||
|
||||
Execute in any directory:
|
||||
|
||||
````
|
||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||
````
|
||||
|
||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
||||
Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
|
||||
|
||||
```title=".profile"
|
||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||
export KAFKA_HOME=/opt/kafka
|
||||
export PATH=$PATH:$KAFKA_HOME/bin
|
||||
```
|
||||
|
||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||
|
||||
After the installation is complete, you can enter `confluent version` for simple verification:
|
||||
|
||||
```
|
||||
# confluent version
|
||||
confluent - Confluent CLI
|
||||
|
||||
Version: v2.6.1
|
||||
Git Ref: 6d920590
|
||||
Build Date: 2022-02-18T06:14:21Z
|
||||
Go Version: go1.17.6 (linux/amd64)
|
||||
Development: false
|
||||
```
|
||||
|
||||
## Install TDengine Connector plugin
|
||||
|
||||
### Install from source code
|
||||
|
||||
```
|
||||
```shell
|
||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
```
|
||||
|
||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
|
||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
|
||||
|
||||
### Install with confluent-hub
|
||||
### Add configuration file
|
||||
|
||||
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
|
||||
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
|
||||
add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
|
||||
|
||||
## Start Confluent
|
||||
|
||||
```
|
||||
confluent local services start
|
||||
```properties
|
||||
plugin.path=/usr/share/java,/opt/kafka/components
|
||||
```
|
||||
|
||||
:::note
|
||||
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
|
||||
:::
|
||||
## Start Kafka Services
|
||||
|
||||
:::tip
|
||||
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
|
||||
Use command bellow to start all services:
|
||||
|
||||
```title="Console output log" {1}
|
||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
||||
Starting ZooKeeper
|
||||
ZooKeeper is [UP]
|
||||
Starting Kafka
|
||||
Kafka is [UP]
|
||||
Starting Schema Registry
|
||||
Schema Registry is [UP]
|
||||
Starting Kafka REST
|
||||
Kafka REST is [UP]
|
||||
Starting Connect
|
||||
Connect is [UP]
|
||||
Starting ksqlDB Server
|
||||
ksqlDB Server is [UP]
|
||||
Starting Control Center
|
||||
Control Center is [UP]
|
||||
```
|
||||
```shell
|
||||
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||
|
||||
To clear data, execute `rm -rf /tmp/confluent.106668`.
|
||||
:::
|
||||
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||
|
||||
### Check Confluent Services Status
|
||||
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||
|
||||
Use command bellow to check the status of all service:
|
||||
|
||||
```
|
||||
confluent local services status
|
||||
```
|
||||
|
||||
The expected output is:
|
||||
```
|
||||
Connect is [UP]
|
||||
Control Center is [UP]
|
||||
Kafka is [UP]
|
||||
Kafka REST is [UP]
|
||||
ksqlDB Server is [UP]
|
||||
Schema Registry is [UP]
|
||||
ZooKeeper is [UP]
|
||||
```
|
||||
|
||||
### Check Successfully Loaded Plugin
|
||||
|
||||
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
||||
```
|
||||
confluent local services connect plugin list
|
||||
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
|
||||
The output as bellow:
|
||||
|
||||
```txt
|
||||
[]
|
||||
```
|
||||
Available Connect Plugins:
|
||||
[
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"type": "sink",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"type": "source",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
......
|
||||
```
|
||||
|
||||
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
|
||||
|
||||
```
|
||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
||||
```
|
||||
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
|
||||
|
||||
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
|
||||
|
||||
## The use of TDengine Sink Connector
|
||||
|
||||
|
@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
|
|||
|
||||
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
||||
|
||||
### Add configuration file
|
||||
### Add Sink Connector configuration file
|
||||
|
||||
```
|
||||
```shell
|
||||
mkdir ~/test
|
||||
cd ~/test
|
||||
vi sink-demo.properties
|
||||
vi sink-demo.json
|
||||
```
|
||||
|
||||
sink-demo.properties' content is following:
|
||||
sink-demo.json' content is following:
|
||||
|
||||
```ini title="sink-demo.properties"
|
||||
name=TDengineSinkConnector
|
||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
||||
tasks.max=1
|
||||
topics=meters
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.user=root
|
||||
connection.password=taosdata
|
||||
connection.database=power
|
||||
db.schemaless=line
|
||||
data.precision=ns
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="sink-demo.json"
|
||||
{
|
||||
"name": "TDengineSinkConnector",
|
||||
"config": {
|
||||
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.user": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "power",
|
||||
"db.schemaless": "line",
|
||||
"data.precision": "ns",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Key configuration instructions:
|
||||
|
||||
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
|
||||
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
|
||||
1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
|
||||
2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
|
||||
|
||||
### Create Connector instance
|
||||
### Create Sink Connector instance
|
||||
|
||||
````
|
||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
||||
````shell
|
||||
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
````
|
||||
|
||||
If the above command is executed successfully, the output is as follows:
|
||||
|
@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
|
|||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"name": "TDengineSinkConnector"
|
||||
"name": "TDengineSinkConnector",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||
},
|
||||
"tasks": [],
|
||||
"type": "sink"
|
||||
|
@ -258,7 +182,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
|
|||
Use kafka-console-producer to write test data to the topic `meters`.
|
||||
|
||||
```
|
||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
||||
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
|
|||
|
||||
Use the TDengine CLI to verify that the sync was successful.
|
||||
|
||||
```
|
||||
```sql
|
||||
taos> use power;
|
||||
Database changed.
|
||||
|
||||
taos> select * from meters;
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
_ts | current | voltage | phase | groupid | location |
|
||||
===============================================================================================================================================================
|
||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||
|
@ -293,29 +217,36 @@ TDengine Source Connector will convert the data in TDengine data table into [Inf
|
|||
|
||||
The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
|
||||
|
||||
### Add configuration file
|
||||
### Add Source Connector configuration file
|
||||
|
||||
```
|
||||
vi source-demo.properties
|
||||
```shell
|
||||
vi source-demo.json
|
||||
```
|
||||
|
||||
Input following content:
|
||||
|
||||
```ini title="source-demo.properties"
|
||||
name=TDengineSourceConnector
|
||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
||||
tasks.max=1
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.username=root
|
||||
connection.password=taosdata
|
||||
connection.database=test
|
||||
connection.attempts=3
|
||||
connection.backoff.ms=5000
|
||||
topic.prefix=tdengine-source-
|
||||
poll.interval.ms=1000
|
||||
fetch.max.rows=100
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="source-demo.json"
|
||||
{
|
||||
"name":"TDengineSourceConnector",
|
||||
"config":{
|
||||
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"tasks.max": 1,
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.username": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "test",
|
||||
"connection.attempts": 3,
|
||||
"connection.backoff.ms": 5000,
|
||||
"topic.prefix": "tdengine-source",
|
||||
"poll.interval.ms": 1000,
|
||||
"fetch.max.rows": 100,
|
||||
"topic.per.stable": true,
|
||||
"topic.ignore.db": false,
|
||||
"out.format": "line",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Prepare test data
|
||||
|
@ -340,40 +271,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
|||
|
||||
Use TDengine CLI to execute SQL script
|
||||
|
||||
```
|
||||
```shell
|
||||
taos -f prepare-source-data.sql
|
||||
```
|
||||
|
||||
### Create Connector instance
|
||||
|
||||
````
|
||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
||||
````
|
||||
```shell
|
||||
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### View topic data
|
||||
|
||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||
|
||||
````
|
||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||
````shell
|
||||
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
|
||||
````
|
||||
|
||||
output:
|
||||
|
||||
````
|
||||
```txt
|
||||
......
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||
......
|
||||
````
|
||||
```
|
||||
|
||||
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
||||
|
||||
````
|
||||
```sql
|
||||
USE test;
|
||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||
````
|
||||
```
|
||||
|
||||
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
||||
|
||||
|
@ -383,16 +314,16 @@ After testing, use the unload command to stop the loaded connector.
|
|||
|
||||
View currently active connectors:
|
||||
|
||||
````
|
||||
confluent local services connect connector status
|
||||
````
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||
|
||||
````
|
||||
confluent local services connect connector unload TDengineSinkConnector
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
````
|
||||
```shell
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||
```
|
||||
|
||||
## Configuration reference
|
||||
|
||||
|
@ -427,22 +358,19 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
|||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
|
||||
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
|
||||
|
||||
|
||||
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
|
||||
7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
|
||||
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
|
||||
9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is `<topic.prefix>-<stable.name>`, false indicates that the rule is `<topic.prefix>-<connection.database>-<stable.name>`, and the default is false. Does not take effect when `topic.per.stable` is set to false.
|
||||
|
||||
## Other notes
|
||||
|
||||
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
|
||||
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
|
||||
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
|
||||
|
||||
## Feedback
|
||||
|
||||
https://github.com/taosdata/kafka-connect-tdengine/issues
|
||||
<https://github.com/taosdata/kafka-connect-tdengine/issues>
|
||||
|
||||
## Reference
|
||||
|
||||
1. https://www.confluent.io/what-is-apache-kafka
|
||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
||||
3. https://docs.confluent.io/platform/current/platform.html
|
||||
1. For more information, see <https://kafka.apache.org/documentation/>
|
||||
|
|
|
@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.5.0
|
||||
|
||||
<Release type="tdengine" version="3.0.5.0" />
|
||||
|
||||
## 3.0.4.2
|
||||
|
||||
<Release type="tdengine" version="3.0.4.2" />
|
||||
|
||||
## 3.0.4.1
|
||||
|
||||
<Release type="tdengine" version="3.0.4.1" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.1
|
||||
|
||||
<Release type="tools" version="2.5.1" />
|
||||
|
||||
## 2.5.0
|
||||
|
||||
<Release type="tools" version="2.5.0" />
|
||||
|
|
|
@ -78,7 +78,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
memcpy(str + len, row[i], charLen);
|
||||
len += charLen;
|
||||
|
|
|
@ -76,7 +76,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
memcpy(str + len, row[i], charLen);
|
||||
len += charLen;
|
||||
|
|
|
@ -6,39 +6,32 @@ import java.sql.Connection;
|
|||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.time.LocalDateTime;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class StmtInsertExample {
|
||||
private static ArrayList<Long> tsToLongArray(String ts) {
|
||||
ArrayList<Long> result = new ArrayList<>();
|
||||
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS");
|
||||
LocalDateTime localDateTime = LocalDateTime.parse(ts, formatter);
|
||||
result.add(localDateTime.toInstant(ZoneOffset.of("+8")).toEpochMilli());
|
||||
return result;
|
||||
}
|
||||
private static String datePattern = "yyyy-MM-dd HH:mm:ss.SSS";
|
||||
private static DateTimeFormatter formatter = DateTimeFormatter.ofPattern(datePattern);
|
||||
|
||||
private static <T> ArrayList<T> toArray(T v) {
|
||||
ArrayList<T> result = new ArrayList<>();
|
||||
result.add(v);
|
||||
return result;
|
||||
}
|
||||
|
||||
private static List<String> getRawData() {
|
||||
return Arrays.asList(
|
||||
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
|
||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
|
||||
);
|
||||
private static List<String> getRawData(int size) {
|
||||
SimpleDateFormat format = new SimpleDateFormat(datePattern);
|
||||
List<String> result = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
Random random = new Random();
|
||||
for (int i = 0; i < size; i++) {
|
||||
String time = format.format(current + i);
|
||||
int id = random.nextInt(10);
|
||||
result.add("d" + id + "," + time + ",10.30000,219,0.31000,California.SanFrancisco,2");
|
||||
}
|
||||
return result.stream()
|
||||
.sorted(Comparator.comparing(s -> s.split(",")[0])).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private static Connection getConnection() throws SQLException {
|
||||
|
@ -48,9 +41,9 @@ public class StmtInsertExample {
|
|||
|
||||
private static void createTable(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("CREATE DATABASE power KEEP 3650");
|
||||
stmt.executeUpdate("USE power");
|
||||
stmt.execute("CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " +
|
||||
stmt.execute("CREATE DATABASE if not exists power KEEP 3650");
|
||||
stmt.executeUpdate("use power");
|
||||
stmt.execute("CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " +
|
||||
"TAGS (location BINARY(64), groupId INT)");
|
||||
}
|
||||
}
|
||||
|
@ -58,21 +51,54 @@ public class StmtInsertExample {
|
|||
private static void insertData() throws SQLException {
|
||||
try (Connection conn = getConnection()) {
|
||||
createTable(conn);
|
||||
String psql = "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)";
|
||||
String psql = "INSERT INTO ? USING power.meters TAGS(?, ?) VALUES(?, ?, ?, ?)";
|
||||
try (TSDBPreparedStatement pst = (TSDBPreparedStatement) conn.prepareStatement(psql)) {
|
||||
for (String line : getRawData()) {
|
||||
String tableName = null;
|
||||
ArrayList<Long> ts = new ArrayList<>();
|
||||
ArrayList<Float> current = new ArrayList<>();
|
||||
ArrayList<Integer> voltage = new ArrayList<>();
|
||||
ArrayList<Float> phase = new ArrayList<>();
|
||||
for (String line : getRawData(100000)) {
|
||||
String[] ps = line.split(",");
|
||||
// bind table name and tags
|
||||
pst.setTableName(ps[0]);
|
||||
pst.setTagString(0, ps[5]);
|
||||
pst.setTagInt(1, Integer.valueOf(ps[6]));
|
||||
if (tableName == null) {
|
||||
// bind table name and tags
|
||||
tableName = "power." + ps[0];
|
||||
pst.setTableName(ps[0]);
|
||||
pst.setTagString(0, ps[5]);
|
||||
pst.setTagInt(1, Integer.valueOf(ps[6]));
|
||||
} else {
|
||||
if (!tableName.equals(ps[0])) {
|
||||
pst.setTimestamp(0, ts);
|
||||
pst.setFloat(1, current);
|
||||
pst.setInt(2, voltage);
|
||||
pst.setFloat(3, phase);
|
||||
pst.columnDataAddBatch();
|
||||
pst.columnDataExecuteBatch();
|
||||
|
||||
// bind table name and tags
|
||||
tableName = ps[0];
|
||||
pst.setTableName(ps[0]);
|
||||
pst.setTagString(0, ps[5]);
|
||||
pst.setTagInt(1, Integer.valueOf(ps[6]));
|
||||
ts.clear();
|
||||
current.clear();
|
||||
voltage.clear();
|
||||
phase.clear();
|
||||
}
|
||||
}
|
||||
// bind values
|
||||
pst.setTimestamp(0, tsToLongArray(ps[1])); //ps[1] looks like: 2018-10-03 14:38:05.000
|
||||
pst.setFloat(1, toArray(Float.valueOf(ps[2])));
|
||||
pst.setInt(2, toArray(Integer.valueOf(ps[3])));
|
||||
pst.setFloat(3, toArray(Float.valueOf(ps[4])));
|
||||
pst.columnDataAddBatch();
|
||||
// ps[1] looks like: 2018-10-03 14:38:05.000
|
||||
LocalDateTime localDateTime = LocalDateTime.parse(ps[1], formatter);
|
||||
ts.add(localDateTime.toInstant(ZoneOffset.of("+8")).toEpochMilli());
|
||||
current.add(Float.valueOf(ps[2]));
|
||||
voltage.add(Integer.valueOf(ps[3]));
|
||||
phase.add(Float.valueOf(ps[4]));
|
||||
}
|
||||
pst.setTimestamp(0, ts);
|
||||
pst.setFloat(1, current);
|
||||
pst.setInt(2, voltage);
|
||||
pst.setFloat(3, phase);
|
||||
pst.columnDataAddBatch();
|
||||
pst.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,20 +53,28 @@ public class SubscribeDemo {
|
|||
|
||||
// create consumer
|
||||
Properties properties = new Properties();
|
||||
properties.getProperty(TMQConstants.CONNECT_TYPE, "jni");
|
||||
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030");
|
||||
properties.setProperty(TMQConstants.CONNECT_USER, "root");
|
||||
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
|
||||
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
||||
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||
properties.setProperty(TMQConstants.GROUP_ID, "test");
|
||||
properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
|
||||
properties.setProperty(TMQConstants.GROUP_ID, "test1");
|
||||
properties.setProperty(TMQConstants.CLIENT_ID, "1");
|
||||
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taos.example.MetersDeserializer");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||
for (ConsumerRecord<Meters> recode : meters) {
|
||||
Meters meter = recode.value();
|
||||
for (ConsumerRecord<Meters> r : meters) {
|
||||
Meters meter = r.value();
|
||||
System.out.println(meter);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||
import com.taosdata.jdbc.tmq.TMQConstants;
|
||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||
|
@ -54,18 +55,26 @@ public class WebsocketSubscribeDemo {
|
|||
Properties properties = new Properties();
|
||||
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041");
|
||||
properties.setProperty(TMQConstants.CONNECT_TYPE, "ws");
|
||||
properties.setProperty(TMQConstants.CONNECT_USER, "root");
|
||||
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
|
||||
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
|
||||
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
||||
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||
properties.setProperty(TMQConstants.GROUP_ID, "test");
|
||||
properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
|
||||
properties.setProperty(TMQConstants.GROUP_ID, "test2");
|
||||
properties.setProperty(TMQConstants.CLIENT_ID, "1");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taos.example.MetersDeserializer");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||
for (Meters meter : meters) {
|
||||
for (ConsumerRecord<Meters> r : meters) {
|
||||
Meters meter = (Meters) r.value();
|
||||
System.out.println(meter);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
import taos
|
||||
from taos.tmq import Consumer
|
||||
import taosws
|
||||
|
||||
|
||||
def prepare():
|
||||
conn = taos.connect()
|
||||
conn.execute("drop topic if exists tmq_assignment_demo_topic")
|
||||
conn.execute("drop database if exists tmq_assignment_demo_db")
|
||||
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
|
||||
conn.select_db("tmq_assignment_demo_db")
|
||||
conn.execute(
|
||||
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
|
||||
conn.execute(
|
||||
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
|
||||
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
|
||||
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
|
||||
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
|
||||
|
||||
|
||||
def taos_get_assignment_and_seek_demo():
|
||||
prepare()
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "0",
|
||||
# should disable snapshot,
|
||||
# otherwise it will cause invalid params error
|
||||
"experimental.snapshot.enable": "false",
|
||||
}
|
||||
)
|
||||
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||
|
||||
# get topic assignment
|
||||
assignments = consumer.assignment()
|
||||
for assignment in assignments:
|
||||
print(assignment)
|
||||
|
||||
# poll
|
||||
consumer.poll(1)
|
||||
consumer.poll(1)
|
||||
|
||||
# get topic assignment again
|
||||
after_pool_assignments = consumer.assignment()
|
||||
for assignment in after_pool_assignments:
|
||||
print(assignment)
|
||||
|
||||
# seek to the beginning
|
||||
for assignment in assignments:
|
||||
consumer.seek(assignment)
|
||||
|
||||
# now the assignment should be the same as before poll
|
||||
assignments = consumer.assignment()
|
||||
for assignment in assignments:
|
||||
print(assignment)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
taosws_get_assignment_and_seek_demo()
|
|
@ -0,0 +1,57 @@
|
|||
import taos
|
||||
import taosws
|
||||
|
||||
|
||||
def prepare():
|
||||
conn = taos.connect()
|
||||
conn.execute("drop topic if exists tmq_assignment_demo_topic")
|
||||
conn.execute("drop database if exists tmq_assignment_demo_db")
|
||||
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
|
||||
conn.select_db("tmq_assignment_demo_db")
|
||||
conn.execute(
|
||||
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
|
||||
conn.execute(
|
||||
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
|
||||
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
|
||||
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
|
||||
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
|
||||
|
||||
|
||||
def taosws_get_assignment_and_seek_demo():
|
||||
prepare()
|
||||
consumer = taosws.Consumer(conf={
|
||||
"td.connect.websocket.scheme": "ws",
|
||||
# should disable snapshot,
|
||||
# otherwise it will cause invalid params error
|
||||
"experimental.snapshot.enable": "false",
|
||||
"group.id": "0",
|
||||
})
|
||||
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||
|
||||
# get topic assignment
|
||||
assignments = consumer.assignment()
|
||||
for assignment in assignments:
|
||||
print(assignment.to_string())
|
||||
|
||||
# poll
|
||||
consumer.poll(1)
|
||||
consumer.poll(1)
|
||||
|
||||
# get topic assignment again
|
||||
after_poll_assignments = consumer.assignment()
|
||||
for assignment in after_poll_assignments:
|
||||
print(assignment.to_string())
|
||||
|
||||
# seek to the beginning
|
||||
for assignment in assignments:
|
||||
for a in assignment.assignments():
|
||||
consumer.seek(assignment.topic(), a.vg_id(), a.offset())
|
||||
|
||||
# now the assignment should be the same as before poll
|
||||
assignments = consumer.assignment()
|
||||
for assignment in assignments:
|
||||
print(assignment.to_string())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
taosws_get_assignment_and_seek_demo()
|
|
@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -105,6 +105,12 @@ class Consumer:
|
|||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def assignment(self):
|
||||
pass
|
||||
|
||||
def seek(self, partition):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
|
|
|
@ -271,26 +271,90 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
|||
|
||||
## 用 Python 语言实现 UDF
|
||||
|
||||
### 准备环境
|
||||
|
||||
1. 准备好 Python 运行环境
|
||||
|
||||
2. 安装 Python 包 `taospyudf`
|
||||
|
||||
```shell
|
||||
pip3 install taospyudf
|
||||
```
|
||||
|
||||
安装过程中会编译 C++ 源码,因此系统上要有 cmake 和 gcc。编译生成的 libtaospyudf.so 文件自动会被复制到 /usr/local/lib/ 目录,因此如果是非 root 用户,安装时需加 sudo。安装完可以检查这个目录是否有了这个文件:
|
||||
|
||||
```shell
|
||||
root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
|
||||
-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
|
||||
```
|
||||
|
||||
然后执行命令
|
||||
```shell
|
||||
ldconfig
|
||||
```
|
||||
|
||||
3. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
|
||||
|
||||
4. 启动 `taosd` 服务
|
||||
细节请参考 [快速开始](../../get-started)
|
||||
|
||||
### 接口定义
|
||||
|
||||
#### 接口概述
|
||||
|
||||
使用 Python 语言实现 UDF 时,需要实现规定的接口函数
|
||||
- 标量函数需要实现标量接口函数 process 。
|
||||
- 聚合函数需要实现聚合接口函数 start ,reduce ,finish。
|
||||
- 如果需要初始化,实现 init;如果需要清理工作,实现 destroy。
|
||||
|
||||
### 用 Python 实现标量函数
|
||||
#### 标量函数接口
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
|
||||
说明:
|
||||
- input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行,col 列的 python 对象
|
||||
- 返回值是一个 Python 对象元组,每个元素类型为输出类型。
|
||||
|
||||
#### 聚合函数接口
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
说明:
|
||||
- 首先调用 start 生成最初结果 buffer
|
||||
- 然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果
|
||||
- 最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
|
||||
|
||||
#### 初始化和销毁接口
|
||||
```Python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
|
||||
说明:
|
||||
- init 完成初始化工作
|
||||
- destroy 完成清理工作
|
||||
|
||||
### Python UDF 函数模板
|
||||
|
||||
#### 标量函数实现模板
|
||||
|
||||
标量函数实现模版如下
|
||||
|
||||
```Python
|
||||
def init():
|
||||
# initialization
|
||||
def destroy():
|
||||
# destroy
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
# process input datablock,
|
||||
# datablock.data(row, col) is to access the python object in location(row,col)
|
||||
# return tuple object consisted of object of type outputtype
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
|
||||
### 用 Python 实现聚合函数
|
||||
注意:定义标题函数最重要是要实现 process 函数,同时必须定义 init 和 destroy 函数即使什么都不做
|
||||
|
||||
#### 聚合函数实现模板
|
||||
|
||||
聚合函数实现模版如下
|
||||
```Python
|
||||
|
@ -303,41 +367,16 @@ def start() -> bytes:
|
|||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
# deserialize buf to state
|
||||
# reduce the inputs and state into new_state.
|
||||
# use inputs.data(i,j) to access python ojbect of location(i,j)
|
||||
# use inputs.data(i,j) to access python object of location(i,j)
|
||||
# serialize new_state into new_state_bytes
|
||||
return new_state_bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
#return obj of type outputtype
|
||||
```
|
||||
|
||||
### Python UDF 接口函数定义
|
||||
注意:定义聚合函数最重要是要实现 start, reduce 和 finish,且必须定义 init 和 destroy 函数。start 生成最初结果 buffer,然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果,最后再调用 finish 从中间结果 buf 产生最终输出。
|
||||
|
||||
#### 标量函数接口
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
- input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行,col 列的 python 对象
|
||||
- 返回值是一个 Python 对象元组,每个元素类型为输出类型。
|
||||
|
||||
#### 聚合函数接口
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
首先调用 start 生成最初结果 buffer,然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果,最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
|
||||
|
||||
|
||||
#### 初始化和销毁接口
|
||||
```Python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
|
||||
其中 init 完成初始化工作。 destroy 完成清理工作。如果没有初始化工作,无需定义 init 函数。如果没有清理工作,无需定义 destroy 函数。
|
||||
|
||||
### Python 和 TDengine之间的数据类型映射
|
||||
### 数据类型映射
|
||||
|
||||
下表描述了TDengine SQL数据类型和Python数据类型的映射。任何类型的NULL值都映射成Python的None值。
|
||||
|
||||
|
@ -351,15 +390,461 @@ def destroy()
|
|||
|TIMESTAMP | int |
|
||||
|JSON and other types | 不支持 |
|
||||
|
||||
### Python UDF 环境的安装
|
||||
1. 安装 taospyudf 包。此包执行Python UDF程序。
|
||||
```bash
|
||||
sudo pip install taospyudf
|
||||
ldconfig
|
||||
### 开发指南
|
||||
|
||||
本文内容由浅入深包括 4 个示例程序:
|
||||
1. 定义一个只接收一个整数的标量函数: 输入 n, 输出 ln(n^2 + 1)。
|
||||
2. 定义一个接收 n 个整数的标量函数, 输入 (x1, x2, ..., xn), 输出每个值和它们的序号的乘积的和: x1 + 2 * x2 + ... + n * xn。
|
||||
3. 定义一个标量函数,输入一个时间戳,输出距离这个时间最近的下一个周日。完成这个函数要用到第三方库 moment。我们在这个示例中讲解使用第三方库的注意事项。
|
||||
4. 定义一个聚合函数,计算某一列最大值和最小值的差, 也就是实现 TDengien 内置的 spread 函数。
|
||||
同时也包含大量实用的 debug 技巧。
|
||||
本文假设你用的是 Linux 系统,且已安装好了 TDengine 3.0.4.0+ 和 Python 3.x。
|
||||
|
||||
注意:**UDF 内无法通过 print 函数输出日志,需要自己写文件或用 python 内置的 logging 库写文件**。
|
||||
|
||||
#### 最简单的 UDF
|
||||
|
||||
编写一个只接收一个整数的 UDF 函数: 输入 n, 输出 ln(n^2 + 1)。
|
||||
首先编写一个 Python 文件,存在系统某个目录,比如 /root/udf/myfun.py 内容如下
|
||||
|
||||
```python
|
||||
from math import log
|
||||
|
||||
def init():
|
||||
pass
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
def process(block):
|
||||
rows, _ = block.shape()
|
||||
return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||
```
|
||||
2. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
|
||||
|
||||
这个文件包含 3 个函数, init 和 destroy 都是空函数,它们是 UDF 的生命周期函数,即使什么都不做也要定义。最关键的是 process 函数, 它接受一个数据块,这个数据块对象有两个方法:
|
||||
1. shape() 返回数据块的行数和列数
|
||||
2. data(i, j) 返回 i 行 j 列的数据
|
||||
标量函数的 process 方法传人的数据块有多少行,就需要返回多少个数据。上述代码中我们忽略的列数,因为我们只想对每行的第一个数做计算。
|
||||
接下来我们创建对应的 UDF 函数,在 TDengine CLI 中执行下面语句:
|
||||
|
||||
```sql
|
||||
create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
|
||||
```
|
||||
其输出如下
|
||||
|
||||
```shell
|
||||
taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||
Create OK, 0 row(s) affected (0.005202s)
|
||||
```
|
||||
|
||||
看起来很顺利,接下来 show 一下系统中所有的自定义函数,确认创建成功:
|
||||
|
||||
```text
|
||||
taos> show functions;
|
||||
name |
|
||||
=================================
|
||||
myfun |
|
||||
Query OK, 1 row(s) in set (0.005767s)
|
||||
```
|
||||
|
||||
接下来就来测试一下这个函数,测试之前先执行下面的 SQL 命令,制造些测试数据,在 TDengine CLI 中执行下述命令
|
||||
|
||||
```sql
|
||||
create database test;
|
||||
create table t(ts timestamp, v1 int, v2 int, v3 int);
|
||||
insert into t values('2023-05-01 12:13:14', 1, 2, 3);
|
||||
insert into t values('2023-05-03 08:09:10', 2, 3, 4);
|
||||
insert into t values('2023-05-10 07:06:05', 3, 4, 5);
|
||||
```
|
||||
|
||||
测试 myfun 函数:
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
|
||||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
不幸的是执行失败了,什么原因呢?
|
||||
查看 udfd 进程的日志
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
```
|
||||
|
||||
发现以下错误信息:
|
||||
|
||||
```text
|
||||
05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
|
||||
05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
|
||||
```
|
||||
|
||||
错误很明确:没有加载到 Python 插件 libtaospyudf.so,如果遇到此错误,请参考前面的准备环境一节。
|
||||
|
||||
修复环境错误后再次执行,如下:
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1) from t;
|
||||
myfun(v1) |
|
||||
============================
|
||||
0.693147181 |
|
||||
1.609437912 |
|
||||
2.302585093 |
|
||||
```
|
||||
|
||||
至此,我们完成了第一个 UDF 😊,并学会了简单的 debug 方法。
|
||||
|
||||
#### 示例二:异常处理
|
||||
|
||||
上面的 myfun 虽然测试测试通过了,但是有两个缺点:
|
||||
|
||||
1. 这个标量函数只接受 1 列数据作为输入,如果用户传入了多列也不会抛异常。
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
myfun(v1, v2) |
|
||||
============================
|
||||
0.693147181 |
|
||||
1.609437912 |
|
||||
2.302585093 |
|
||||
```
|
||||
|
||||
2. 没有处理 null 值。我们期望如果输入有 null,则会抛异常终止执行。
|
||||
因此 process 函数改进如下:
|
||||
|
||||
```python
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
if cols > 1:
|
||||
raise Exception(f"require 1 parameter but given {cols}")
|
||||
return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||
```
|
||||
|
||||
然后执行下面的语句更新已有的 UDF:
|
||||
|
||||
```sql
|
||||
create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||
```
|
||||
|
||||
再传入 myfun 两个参数,就会执行失败了
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
|
||||
DB error: udf function execution failure (0.014643s)
|
||||
```
|
||||
|
||||
但遗憾的是我们自定义的异常信息没有展示给用户,而是在插件的日志文件 /var/log/taos/taospyudf.log 中:
|
||||
|
||||
```text
|
||||
2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
|
||||
|
||||
At:
|
||||
/var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
|
||||
|
||||
```
|
||||
|
||||
至此,我们学会了如何更新 UDF,并查看 UDF 输出的错误日志。
|
||||
(注:如果 UDF 更新后未生效,在 TDengine 3.0.5.0 以前(不含)的版本中需要重启 taosd,在 3.0.5.0 及之后的版本中不需要重启 taosd 即可生效。)
|
||||
|
||||
#### 示例三: 接收 n 个参数的 UDF
|
||||
|
||||
编写一个 UDF:输入(x1, x2, ..., xn), 输出每个值和它们的序号的乘积的和: 1 * x1 + 2 * x2 + ... + n * xn。如果 x1 至 xn 中包含 null,则结果为 null。
|
||||
这个示例与示例一的区别是,可以接受任意多列作为输入,且要处理每一列的值。编写 UDF 文件 /root/udf/nsum.py:
|
||||
|
||||
```python
|
||||
def init():
|
||||
pass
|
||||
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
result = []
|
||||
for i in range(rows):
|
||||
total = 0
|
||||
for j in range(cols):
|
||||
v = block.data(i, j)
|
||||
if v is None:
|
||||
total = None
|
||||
break
|
||||
total += (j + 1) * block.data(i, j)
|
||||
result.append(total)
|
||||
return result
|
||||
```
|
||||
|
||||
创建 UDF:
|
||||
|
||||
```sql
|
||||
create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
|
||||
```
|
||||
|
||||
测试 UDF:
|
||||
|
||||
```sql
|
||||
taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
|
||||
Insert OK, 1 row(s) affected (0.003675s)
|
||||
|
||||
taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
|
||||
ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
|
||||
================================================================================================
|
||||
2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
|
||||
2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
|
||||
2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
|
||||
2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
|
||||
Query OK, 4 row(s) in set (0.010653s)
|
||||
```
|
||||
|
||||
#### 示例四:使用第三方库
|
||||
|
||||
编写一个 UDF,输入一个时间戳,输出距离这个时间最近的下一个周日。比如今天是 2023-05-25, 则下一个周日是 2023-05-28。
|
||||
完成这个函数要用到第三方库 momen。先安装这个库:
|
||||
|
||||
```shell
|
||||
pip3 install moment
|
||||
```
|
||||
|
||||
然后编写 UDF 文件 /root/udf/nextsunday.py
|
||||
|
||||
```python
|
||||
import moment
|
||||
|
||||
|
||||
def init():
|
||||
pass
|
||||
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
if cols > 1:
|
||||
raise Exception("require only 1 parameter")
|
||||
if not type(block.data(0, 0)) is int:
|
||||
raise Exception("type error")
|
||||
return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
|
||||
for i in range(rows)]
|
||||
```
|
||||
|
||||
UDF 框架会将 TDengine 的 timestamp 类型映射为 Python 的 int 类型,所以这个函数只接受一个表示毫秒数的整数。process 方法先做参数检查,然后用 moment 包替换时间的星期为星期日,最后格式化输出。输出的字符串长度是固定的10个字符长,因此可以这样创建 UDF 函数:
|
||||
|
||||
```sql
|
||||
create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
|
||||
```
|
||||
|
||||
此时测试函数,如果你是用 systemctl 启动的 taosd,肯定会遇到错误:
|
||||
|
||||
```sql
|
||||
taos> select ts, nextsunday(ts) from t;
|
||||
|
||||
DB error: udf function execution failure (1.123615s)
|
||||
```
|
||||
|
||||
```shell
|
||||
tail -20 taospyudf.log
|
||||
2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
|
||||
```
|
||||
|
||||
这是因为 “moment” 所在位置不在 python udf 插件默认的库搜索路径中。怎么确认这一点呢?通过以下命令搜索 taospyudf.log:
|
||||
|
||||
```shell
|
||||
grep 'sys path' taospyudf.log | tail -1
|
||||
```
|
||||
|
||||
输出如下
|
||||
|
||||
```text
|
||||
2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
|
||||
```
|
||||
|
||||
发现 python udf 插件默认搜索的第三方库安装路径是: /lib/python3/dist-packages,而 moment 默认安装到了 /usr/local/lib/python3.8/dist-packages。下面我们修改 python udf 插件默认的库搜索路径。
|
||||
先打开 python3 命令行,查看当前的 sys.path
|
||||
|
||||
```python
|
||||
>>> import sys
|
||||
>>> ":".join(sys.path)
|
||||
'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
|
||||
```
|
||||
|
||||
复制上面脚本的输出的字符串,然后编辑 /var/taos/taos.cfg 加入以下配置:
|
||||
|
||||
```shell
|
||||
UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
|
||||
```
|
||||
|
||||
保存后执行 systemctl restart taosd, 再测试就不报错了:
|
||||
|
||||
```sql
|
||||
taos> select ts, nextsunday(ts) from t;
|
||||
ts | nextsunday(ts) |
|
||||
===========================================
|
||||
2023-05-01 12:13:14.000 | 2023-05-07 |
|
||||
2023-05-03 08:09:10.000 | 2023-05-07 |
|
||||
2023-05-10 07:06:05.000 | 2023-05-14 |
|
||||
2023-05-25 09:09:15.000 | 2023-05-28 |
|
||||
Query OK, 4 row(s) in set (1.011474s)
|
||||
```
|
||||
|
||||
#### 示例五:聚合函数
|
||||
|
||||
编写一个聚合函数,计算某一列最大值和最小值的差。
|
||||
聚合函数与标量函数的区别是:标量函数是多行输入对应多个输出,聚合函数是多行输入对应一个输出。聚合函数的执行过程有点像经典的 map-reduce 框架的执行过程,框架把数据分成若干块,每个 mapper 处理一个块,reducer 再把 mapper 的结果做聚合。不一样的地方在于,对于 TDengine Python UDF 中的 reduce 函数既有 map 的功能又有 reduce 的功能。reduce 函数接受两个参数:一个是自己要处理的数据,一个是别的任务执行 reduce 函数的处理结果。如下面的示例 /root/udf/myspread.py:
|
||||
|
||||
```python
|
||||
import io
|
||||
import math
|
||||
import pickle
|
||||
|
||||
LOG_FILE: io.TextIOBase = None
|
||||
|
||||
|
||||
def init():
|
||||
global LOG_FILE
|
||||
LOG_FILE = open("/var/log/taos/spread.log", "wt")
|
||||
log("init function myspead success")
|
||||
|
||||
|
||||
def log(o):
|
||||
LOG_FILE.write(str(o) + '\n')
|
||||
|
||||
|
||||
def destroy():
|
||||
log("close log file: spread.log")
|
||||
LOG_FILE.close()
|
||||
|
||||
|
||||
def start():
|
||||
return pickle.dumps((-math.inf, math.inf))
|
||||
|
||||
|
||||
def reduce(block, buf):
|
||||
max_number, min_number = pickle.loads(buf)
|
||||
log(f"initial max_number={max_number}, min_number={min_number}")
|
||||
rows, _ = block.shape()
|
||||
for i in range(rows):
|
||||
v = block.data(i, 0)
|
||||
if v > max_number:
|
||||
log(f"max_number={v}")
|
||||
max_number = v
|
||||
if v < min_number:
|
||||
log(f"min_number={v}")
|
||||
min_number = v
|
||||
return pickle.dumps((max_number, min_number))
|
||||
|
||||
|
||||
def finish(buf):
|
||||
max_number, min_number = pickle.loads(buf)
|
||||
return max_number - min_number
|
||||
```
|
||||
|
||||
在这个示例中我们不光定义了一个聚合函数,还添加记录执行日志的功能,讲解如下:
|
||||
1. init 函数不再是空函数,而是打开了一个文件用于写执行日志
|
||||
2. log 函数是记录日志的工具,自动将传入的对象转成字符串,加换行符输出
|
||||
3. destroy 函数用来在执行结束关闭文件
|
||||
4. start 返回了初始的 buffer,用来存聚合函数的中间结果,我们把最大值初始化为负无穷大,最小值初始化为正无穷大
|
||||
5. reduce 处理每个数据块并聚合结果
|
||||
6. finish 函数将最终的 buffer 转换成最终的输出
|
||||
执行下面的 SQL语句创建对应的 UDF:
|
||||
|
||||
```sql
|
||||
create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
|
||||
```
|
||||
|
||||
这个 SQL 语句与创建标量函数的 SQL 语句有两个重要区别:
|
||||
1. 增加了 aggregate 关键字
|
||||
2. 增加了 bufsize 关键字,用来指定存储中间结果的内存大小,这个数值可以大于实际使用的数值。本例中间结果是两个浮点数组成的 tuple,序列化后实际占用大小只有 32 个字节,但指定的 bufsize 是128,可以用 python 命令行打印实际占用的字节数
|
||||
|
||||
```python
|
||||
>>> len(pickle.dumps((12345.6789, 23456789.9877)))
|
||||
32
|
||||
```
|
||||
|
||||
测试这个函数,可以看到 myspread 的输出结果和内置的 spread 函数的输出结果是一致的。
|
||||
|
||||
```sql
|
||||
taos> select myspread(v1) from t;
|
||||
myspread(v1) |
|
||||
============================
|
||||
5.000000000 |
|
||||
Query OK, 1 row(s) in set (0.013486s)
|
||||
|
||||
taos> select spread(v1) from t;
|
||||
spread(v1) |
|
||||
============================
|
||||
5.000000000 |
|
||||
Query OK, 1 row(s) in set (0.005501s)
|
||||
```
|
||||
|
||||
最后,查看我们自己打印的执行日志,从日志可以看出,reduce 函数被执行了 3 次。执行过程中 max 值被更新了 4 次, min 值只被更新 1 次。
|
||||
|
||||
```shell
|
||||
root@slave11 /var/log/taos $ cat spread.log
|
||||
init function myspead success
|
||||
initial max_number=-inf, min_number=inf
|
||||
max_number=1
|
||||
min_number=1
|
||||
initial max_number=1, min_number=1
|
||||
max_number=2
|
||||
max_number=3
|
||||
initial max_number=3, min_number=1
|
||||
max_number=6
|
||||
close log file: spread.log
|
||||
```
|
||||
|
||||
通过这个示例,我们学会了如何定义聚合函数,并打印自定义的日志信息。
|
||||
|
||||
### SQL 命令
|
||||
|
||||
1. 创建标量函数的语法
|
||||
|
||||
```sql
|
||||
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
2. 创建聚合函数的语法
|
||||
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
3. 更新标量函数
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
4. 更新聚合函数
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
注意:如果加了 “AGGREGATE” 关键字,更新之后函数将被当作聚合函数,无论之前是什么类型的函数。相反,如果没有加 “AGGREGATE” 关键字,更新之后的函数将被当作标量函数,无论之前是什么类型的函数。
|
||||
|
||||
5. 查看函数信息
|
||||
|
||||
同名的 UDF 每更新一次,版本号会增加 1。
|
||||
|
||||
```sql
|
||||
select * from ins_functions \G;
|
||||
```
|
||||
|
||||
6. 查看和删除已有的 UDF
|
||||
|
||||
```sql
|
||||
SHOW functions;
|
||||
DROP FUNCTION function_name;
|
||||
```
|
||||
|
||||
|
||||
上面的命令可以查看 UDF 的完整信息
|
||||
|
||||
### Python UDF 示例代码
|
||||
### 更多 Python UDF 示例代码
|
||||
#### 标量函数示例 [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
||||
|
||||
pybitand 实现多列的按位与功能。如果只有一列,返回这一列。pybitand 忽略空值。
|
||||
|
@ -386,6 +871,17 @@ pyl2norm 实现了输入列的所有数据的二阶范数,即对每个数据
|
|||
|
||||
</details>
|
||||
|
||||
#### 聚合函数示例 [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
|
||||
|
||||
pycumsum 使用 numpy 计算输入列所有数据的累积和。
|
||||
<details>
|
||||
<summary>pycumsum.py</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/pycumsum.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
## 管理和使用 UDF
|
||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
|
||||
|
||||
|
|
|
@ -32,25 +32,22 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
|||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
REST 连接支持所有能运行 Java 的平台。
|
||||
|
||||
## 版本支持
|
||||
## 版本历史
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## 最近更新记录
|
||||
|
||||
| taos-jdbcdriver 版本 | 主要变化 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
|
||||
| 3.2.0 | 存在连接问题,不推荐使用 |
|
||||
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
||||
| 3.0.0 | 支持 TDengine 3.0 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||
| 2.0.37 | 增加对 json tag 支持 |
|
||||
| 2.0.36 | 增加对 schemaless 写入支持 |
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
|
||||
| 3.2.0 | 存在连接问题,不推荐使用 | - |
|
||||
| 3.1.0 | WebSocket 连接支持订阅功能 | - |
|
||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | - |
|
||||
| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | - |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | - |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
|
||||
| 2.0.37 | 增加对 json tag 支持 | - |
|
||||
| 2.0.36 | 增加对 schemaless 写入支持 | - |
|
||||
|
||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||
|
||||
|
@ -80,45 +77,47 @@ JDBC 连接器可能报错的错误码包括 4 种:
|
|||
|
||||
具体的错误码请参考:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||
| 0x231d | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| 0x231e | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||
| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
|
||||
| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -169,7 +168,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>3.2.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -916,14 +915,15 @@ public class SchemalessWsTest {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
init(connection);
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
init(connection);
|
||||
|
||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
System.exit(0);
|
||||
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
|
@ -962,6 +962,7 @@ statement.executeUpdate("create topic if not exists topic_speed as select ts, sp
|
|||
|
||||
```java
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||
|
@ -969,12 +970,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
|
|||
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||
```
|
||||
|
||||
- bootstrap.servers: TDengine 服务端所在的`ip:port`,如果使用 WebSocket 连接,则为 taosAdapter 所在的`ip:port`。
|
||||
- enable.auto.commit: 是否允许自动提交。
|
||||
- group.id: consumer: 所在的 group。
|
||||
- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。
|
||||
- td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。
|
||||
- httpConnectTimeout:创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout:数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
@ -991,6 +994,17 @@ while(true) {
|
|||
|
||||
`poll` 每次调用获取一个消息。
|
||||
|
||||
#### 指定订阅 Offset
|
||||
|
||||
```
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
```
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
|
@ -1016,10 +1030,19 @@ public abstract class ConsumerLoop {
|
|||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("td.connect.type", "jni");
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1093,12 +1116,19 @@ public abstract class ConsumerLoop {
|
|||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.type", "ws");
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group2");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1239,6 +1269,7 @@ public static void main(String[] args) throws Exception {
|
|||
- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。
|
||||
- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。
|
||||
- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。
|
||||
- consumer-demo:Consumer 消费 TDengine 数据示例,可通过参数控制消费速度。
|
||||
|
||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
|
||||
|
||||
## 支持的功能特性
|
||||
|
||||
|
@ -383,6 +383,15 @@ func main() {
|
|||
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
|
||||
|
||||
按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭连接。
|
||||
|
@ -468,11 +477,20 @@ func main() {
|
|||
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
|
||||
|
||||
按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭连接。
|
||||
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
|
||||
|
||||
### 通过 WebSocket 进行参数绑定
|
||||
|
||||
|
@ -520,7 +538,7 @@ func main() {
|
|||
|
||||
结束参数绑定。
|
||||
|
||||
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
|
||||
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
|||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
Websocket 连接支持所有能运行 Rust 的平台。
|
||||
|
||||
## 版本支持
|
||||
## 版本历史
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
|
||||
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
|
||||
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
|
||||
| v0.6.0 | 3.0.0.0 | 基础功能。 |
|
||||
|
||||
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
|
||||
|
||||
|
@ -65,6 +70,13 @@ taos = "*"
|
|||
taos = { version = "*", default-features = false, features = ["ws"] }
|
||||
```
|
||||
|
||||
当仅启用 `ws` 特性时,可同时指定 `r2d2` 使得在同步(blocking/sync)模式下使用 [r2d2] 作为连接池:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
taos = { version = "*", default-features = false, features = ["r2d2", "ws"] }
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="native" label="仅原生连接">
|
||||
|
@ -257,26 +269,24 @@ let conn: Taos = cfg.build();
|
|||
|
||||
### 连接池
|
||||
|
||||
在复杂应用中,建议启用连接池。[taos] 的连接池使用 [r2d2] 实现。
|
||||
在复杂应用中,建议启用连接池。[taos] 的连接池默认(异步模式)使用 [deadpool] 实现。
|
||||
|
||||
如下,可以生成一个默认参数的连接池。
|
||||
|
||||
```rust
|
||||
let pool = TaosBuilder::from_dsn(dsn)?.pool()?;
|
||||
let pool: Pool<TaosBuilder> = TaosBuilder::from_dsn("taos:///")
|
||||
.unwrap()
|
||||
.pool()
|
||||
.unwrap();
|
||||
```
|
||||
|
||||
同样可以使用连接池的构造器,对连接池参数进行设置:
|
||||
|
||||
```rust
|
||||
let dsn = "taos://localhost:6030";
|
||||
|
||||
let opts = PoolBuilder::new()
|
||||
.max_size(5000) // max connections
|
||||
.max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
|
||||
.min_idle(Some(1000)) // minimal idle connections
|
||||
.connection_timeout(Duration::from_secs(2));
|
||||
|
||||
let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
|
||||
let pool: Pool<TaosBuilder> = Pool::builder(Manager::from_dsn(self.dsn.clone()).unwrap().0)
|
||||
.max_size(88) // 最大连接数
|
||||
.build()
|
||||
.unwrap();
|
||||
```
|
||||
|
||||
在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。
|
||||
|
@ -497,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
|
|||
}
|
||||
```
|
||||
|
||||
获取消费进度:
|
||||
|
||||
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
let assignments = consumer.assignments().await.unwrap();
|
||||
```
|
||||
|
||||
按照指定的进度消费:
|
||||
|
||||
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||
```
|
||||
|
||||
停止订阅:
|
||||
|
||||
```rust
|
||||
|
@ -511,11 +537,12 @@ consumer.unsubscribe().await;
|
|||
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
|
||||
- `auto.commit.interval.ms`: 自动标记的时间间隔。
|
||||
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||
|
||||
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。
|
||||
|
||||
[taos]: https://github.com/taosdata/rust-connector-taos
|
||||
[deadpool]: https://crates.io/crates/deadpool
|
||||
[r2d2]: https://crates.io/crates/r2d2
|
||||
[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
|
||||
[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
|
||||
|
|
|
@ -362,7 +362,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
|
||||
##### TaosConnection 类的使用
|
||||
|
||||
`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法。
|
||||
类似上文介绍的使用方法,增加 `req_id` 参数。
|
||||
|
||||
```python title="execute 方法"
|
||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
|
||||
|
@ -372,13 +372,9 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
|
||||
```
|
||||
|
||||
:::tip
|
||||
查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。
|
||||
:::
|
||||
|
||||
##### TaosResult 类的使用
|
||||
|
||||
上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效。
|
||||
类似上文介绍的使用方法,增加 `req_id` 参数。
|
||||
|
||||
```python title="blocks_iter 方法"
|
||||
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
|
||||
|
@ -391,14 +387,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
|
||||
```
|
||||
|
||||
:::note
|
||||
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST 连接">
|
||||
|
||||
类似上文介绍的使用方法,增加 `req_id` 参数。
|
||||
|
||||
##### TaosRestCursor 类的使用
|
||||
|
||||
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
|
||||
|
@ -420,8 +413,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
|
||||
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket 连接">
|
||||
|
||||
类似上文介绍的使用方法,增加 `req_id` 参数。
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
|
||||
```
|
||||
|
@ -460,27 +456,169 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
|
||||
### 数据订阅
|
||||
|
||||
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
|
||||
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="原生连接">
|
||||
|
||||
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API,相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
|
||||
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
|
||||
|
||||
#### 创建 Consumer
|
||||
|
||||
创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
#### 订阅 topics
|
||||
|
||||
Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
|
||||
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
#### 消费数据
|
||||
|
||||
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
```
|
||||
|
||||
#### 获取消费进度
|
||||
|
||||
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
|
||||
|
||||
```python
|
||||
assignments = consumer.assignment()
|
||||
```
|
||||
|
||||
#### 重置消费进度
|
||||
|
||||
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
|
||||
|
||||
```python
|
||||
tp = TopicPartition(topic='topic1', partition=0, offset=0)
|
||||
consumer.seek(tp)
|
||||
```
|
||||
|
||||
#### 结束消费
|
||||
|
||||
消费结束后,应当取消订阅,并关闭 Consumer。
|
||||
|
||||
```python
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
#### tmq 订阅示例代码
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_example.py}}
|
||||
```
|
||||
|
||||
#### 获取和重置消费进度示例代码
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket 连接">
|
||||
|
||||
除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。
|
||||
除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
|
||||
|
||||
taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
|
||||
|
||||
#### 创建 Consumer
|
||||
|
||||
创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
|
||||
|
||||
```python
|
||||
import taosws
|
||||
|
||||
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
```
|
||||
|
||||
#### 订阅 topics
|
||||
|
||||
Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
|
||||
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
#### 消费数据
|
||||
|
||||
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(timeout=1.0)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
for block in message:
|
||||
for row in block:
|
||||
print(row)
|
||||
```
|
||||
|
||||
#### 获取消费进度
|
||||
|
||||
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
|
||||
|
||||
```python
|
||||
assignments = consumer.assignment()
|
||||
```
|
||||
|
||||
#### 重置消费进度
|
||||
|
||||
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
|
||||
|
||||
```python
|
||||
consumer.seek(topic='topic1', partition=0, offset=0)
|
||||
```
|
||||
|
||||
#### 结束消费
|
||||
|
||||
消费结束后,应当取消订阅,并关闭 Consumer。
|
||||
|
||||
```python
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
#### tmq 订阅示例代码
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||
```
|
||||
|
||||
连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
|
||||
|
||||
#### 获取和重置消费进度示例代码
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
||||
|
|
|
@ -45,9 +45,9 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
|
||||
:::note
|
||||
|
||||
- 表的每行长度不能超过 48KB(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
|
||||
- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
|
||||
- BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
|
||||
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
|
||||
|
||||
:::
|
||||
|
|
|
@ -121,6 +121,8 @@ alter_database_option: {
|
|||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ table_option: {
|
|||
|
||||
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
|
||||
2. 表名最大长度为 192;
|
||||
3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
3. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
|
||||
5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节;
|
||||
6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
|
||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
|||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
|
|
@ -890,9 +890,10 @@ ignore_null_values: {
|
|||
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
|
||||
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
|
||||
- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。
|
||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||
- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
||||
- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
||||
|
@ -1001,7 +1002,6 @@ SAMPLE(expr, k)
|
|||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### TAIL
|
||||
|
@ -1080,7 +1080,6 @@ CSUM(expr)
|
|||
|
||||
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
|
||||
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### DERIVATIVE
|
||||
|
@ -1104,7 +1103,6 @@ ignore_negative: {
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||
|
||||
### DIFF
|
||||
|
@ -1167,7 +1165,6 @@ MAVG(expr, k)
|
|||
|
||||
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
|
||||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### STATECOUNT
|
||||
|
@ -1193,7 +1190,6 @@ STATECOUNT(expr, oper, val)
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
|
||||
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
|
||||
|
||||
|
||||
|
@ -1221,7 +1217,6 @@ STATEDURATION(expr, oper, val, unit)
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
|
||||
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
|
||||
|
||||
|
||||
|
@ -1239,8 +1234,6 @@ TWA(expr)
|
|||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**: TWA 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
|
||||
|
||||
## 系统信息函数
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ description: 合法字符集和命名中的限制规则
|
|||
|
||||
- 数据库名最大长度为 64 字节
|
||||
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
|
||||
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB) (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64 字节
|
||||
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
|
||||
- 标签名最大长度为 64 字节
|
||||
|
|
|
@ -91,11 +91,30 @@ taos --dump-config
|
|||
### maxShellConns
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------- |
|
||||
| --------| ----------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 一个 dnode 容许的连接数 |
|
||||
| 含义 | 一个 dnode 容许的连接数 |
|
||||
| 取值范围 | 10-50000000 |
|
||||
| 缺省值 | 5000 |
|
||||
| 缺省值 | 5000 |
|
||||
|
||||
### numOfRpcSessions
|
||||
|
||||
| 属性 | 说明 |
|
||||
| --------| ---------------------- |
|
||||
| 适用范围 | 客户端和服务端都适用 |
|
||||
| 含义 | 一个客户端能创建的最大连接数|
|
||||
| 取值范围 | 100-100000 |
|
||||
| 缺省值 | 10000 |
|
||||
|
||||
### timeToGetAvailableConn
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------|
|
||||
| 适用范围 | 客户端和服务端都适用 |
|
||||
| 含义 |获得可用连接的最长等待时间|
|
||||
| 取值范围 | 10-50000000(单位为毫秒)|
|
||||
| 缺省值 | 500000 |
|
||||
|
||||
|
||||
### numOfRpcSessions
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
48KB,标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
|
||||
48KB(从 3.0.5.0 版本开始为 64KB),标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
|
||||

|
||||
|
||||
## 什么是 Confluent?
|
||||
|
||||
[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
|
||||
|
||||
1. Schema Registry
|
||||
2. REST 代理
|
||||
3. 非 Java 客户端
|
||||
4. 很多打包好的 Kafka Connect 插件
|
||||
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
|
||||
|
||||
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
|
||||

|
||||
|
||||
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
|
||||
|
||||
## 前置条件
|
||||
|
||||
运行本教程中示例的前提条件。
|
||||
|
||||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
|
||||
|
||||
## 安装 Confluent
|
||||
|
||||
Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
|
||||
## 安装 Kafka
|
||||
|
||||
在任意目录下执行:
|
||||
|
||||
```
|
||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||
```shell
|
||||
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||
```
|
||||
|
||||
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
||||
然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
|
||||
|
||||
```title=".profile"
|
||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||
export KAFKA_HOME=/opt/kafka
|
||||
export PATH=$PATH:$KAFKA_HOME/bin
|
||||
```
|
||||
|
||||
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
||||
|
||||
安装完成之后,可以输入`confluent version`做简单验证:
|
||||
|
||||
```
|
||||
# confluent version
|
||||
confluent - Confluent CLI
|
||||
|
||||
Version: v2.6.1
|
||||
Git Ref: 6d920590
|
||||
Build Date: 2022-02-18T06:14:21Z
|
||||
Go Version: go1.17.6 (linux/amd64)
|
||||
Development: false
|
||||
```
|
||||
|
||||
## 安装 TDengine Connector 插件
|
||||
|
||||
### 从源码安装
|
||||
### 编译插件
|
||||
|
||||
```
|
||||
```shell
|
||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
```
|
||||
|
||||
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
|
||||
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`。
|
||||
|
||||
### 用 confluent-hub 安装
|
||||
### 配置插件
|
||||
|
||||
[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。
|
||||
**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。
|
||||
将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
|
||||
|
||||
## 启动 Confluent
|
||||
|
||||
```
|
||||
confluent local services start
|
||||
```properties
|
||||
plugin.path=/usr/share/java,/opt/kafka/components
|
||||
```
|
||||
|
||||
:::note
|
||||
一定要先安装插件再启动 Confluent, 否则加载插件会失败。
|
||||
:::
|
||||
## 启动 Kafka
|
||||
|
||||
:::tip
|
||||
若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 :
|
||||
```shell
|
||||
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||
|
||||
```title="控制台输出日志" {1}
|
||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
||||
Starting ZooKeeper
|
||||
ZooKeeper is [UP]
|
||||
Starting Kafka
|
||||
Kafka is [UP]
|
||||
Starting Schema Registry
|
||||
Schema Registry is [UP]
|
||||
Starting Kafka REST
|
||||
Kafka REST is [UP]
|
||||
Starting Connect
|
||||
Connect is [UP]
|
||||
Starting ksqlDB Server
|
||||
ksqlDB Server is [UP]
|
||||
Starting Control Center
|
||||
Control Center is [UP]
|
||||
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||
|
||||
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||
```
|
||||
|
||||
清空数据可执行 `rm -rf /tmp/confluent.106668`。
|
||||
:::
|
||||
|
||||
### 验证各个组件是否启动成功
|
||||
### 验证 kafka Connect 是否启动成功
|
||||
|
||||
输入命令:
|
||||
|
||||
```
|
||||
confluent local services status
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
如果各组件都启动成功,会得到如下输出:
|
||||
|
||||
```txt
|
||||
[]
|
||||
```
|
||||
Connect is [UP]
|
||||
Control Center is [UP]
|
||||
Kafka is [UP]
|
||||
Kafka REST is [UP]
|
||||
ksqlDB Server is [UP]
|
||||
Schema Registry is [UP]
|
||||
ZooKeeper is [UP]
|
||||
```
|
||||
|
||||
### 验证插件是否安装成功
|
||||
|
||||
在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
|
||||
|
||||
```
|
||||
confluent local services connect plugin list
|
||||
```
|
||||
|
||||
如果成功安装,会输出如下:
|
||||
|
||||
```txt {4,9}
|
||||
Available Connect Plugins:
|
||||
[
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"type": "sink",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"type": "source",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
......
|
||||
```
|
||||
|
||||
如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
|
||||
```
|
||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
||||
```
|
||||
该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。
|
||||
|
||||
与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
|
||||
|
||||
|
||||
## TDengine Sink Connector 的使用
|
||||
|
||||
|
@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
|
|||
|
||||
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
|
||||
|
||||
### 添加配置文件
|
||||
### 添加 Sink Connector 配置文件
|
||||
|
||||
```
|
||||
```shell
|
||||
mkdir ~/test
|
||||
cd ~/test
|
||||
vi sink-demo.properties
|
||||
vi sink-demo.json
|
||||
```
|
||||
|
||||
sink-demo.properties 内容如下:
|
||||
sink-demo.json 内容如下:
|
||||
|
||||
```ini title="sink-demo.properties"
|
||||
name=TDengineSinkConnector
|
||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
||||
tasks.max=1
|
||||
topics=meters
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.user=root
|
||||
connection.password=taosdata
|
||||
connection.database=power
|
||||
db.schemaless=line
|
||||
data.precision=ns
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="sink-demo.json"
|
||||
{
|
||||
"name": "TDengineSinkConnector",
|
||||
"config": {
|
||||
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.user": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "power",
|
||||
"db.schemaless": "line",
|
||||
"data.precision": "ns",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
关键配置说明:
|
||||
|
||||
1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。
|
||||
2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。
|
||||
1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
|
||||
2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
|
||||
|
||||
### 创建 Connector 实例
|
||||
### 创建 Sink Connector 实例
|
||||
|
||||
```
|
||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
||||
```shell
|
||||
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
若以上命令执行成功,则有如下输出:
|
||||
|
@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
|
|||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"name": "TDengineSinkConnector"
|
||||
"name": "TDengineSinkConnector",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||
},
|
||||
"tasks": [],
|
||||
"type": "sink"
|
||||
|
@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
|
|||
|
||||
使用 kafka-console-producer 向主题 meters 添加测试数据。
|
||||
|
||||
```
|
||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
||||
```shell
|
||||
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
|
|||
|
||||
使用 TDengine CLI 验证同步是否成功。
|
||||
|
||||
```
|
||||
```sql
|
||||
taos> use power;
|
||||
Database changed.
|
||||
|
||||
taos> select * from meters;
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
_ts | current | voltage | phase | groupid | location |
|
||||
===============================================================================================================================================================
|
||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||
|
@ -297,29 +216,36 @@ TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [Influx
|
|||
|
||||
下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
|
||||
|
||||
### 添加配置文件
|
||||
### 添加 Source Connector 配置文件
|
||||
|
||||
```
|
||||
vi source-demo.properties
|
||||
```shell
|
||||
vi source-demo.json
|
||||
```
|
||||
|
||||
输入以下内容:
|
||||
|
||||
```ini title="source-demo.properties"
|
||||
name=TDengineSourceConnector
|
||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
||||
tasks.max=1
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.username=root
|
||||
connection.password=taosdata
|
||||
connection.database=test
|
||||
connection.attempts=3
|
||||
connection.backoff.ms=5000
|
||||
topic.prefix=tdengine-source-
|
||||
poll.interval.ms=1000
|
||||
fetch.max.rows=100
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="source-demo.json"
|
||||
{
|
||||
"name":"TDengineSourceConnector",
|
||||
"config":{
|
||||
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"tasks.max": 1,
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.username": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "test",
|
||||
"connection.attempts": 3,
|
||||
"connection.backoff.ms": 5000,
|
||||
"topic.prefix": "tdengine-source",
|
||||
"poll.interval.ms": 1000,
|
||||
"fetch.max.rows": 100,
|
||||
"topic.per.stable": true,
|
||||
"topic.ignore.db": false,
|
||||
"out.format": "line",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 准备测试数据
|
||||
|
@ -344,27 +270,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
|||
|
||||
使用 TDengine CLI, 执行 SQL 文件。
|
||||
|
||||
```
|
||||
```shell
|
||||
taos -f prepare-source-data.sql
|
||||
```
|
||||
|
||||
### 创建 Connector 实例
|
||||
### 创建 Source Connector 实例
|
||||
|
||||
```
|
||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
||||
```shell
|
||||
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### 查看 topic 数据
|
||||
|
||||
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
||||
|
||||
```
|
||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||
```shell
|
||||
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
|
||||
```
|
||||
|
||||
输出:
|
||||
|
||||
```
|
||||
```txt
|
||||
......
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||
|
@ -373,7 +299,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
|
|||
|
||||
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
|
||||
|
||||
```
|
||||
```sql
|
||||
USE test;
|
||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||
|
@ -387,15 +313,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
|||
|
||||
查看当前活跃的 connector:
|
||||
|
||||
```
|
||||
confluent local services connect connector status
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
||||
|
||||
```
|
||||
confluent local services connect connector unload TDengineSinkConnector
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
```shell
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||
```
|
||||
|
||||
## 配置参考
|
||||
|
@ -437,20 +363,19 @@ confluent local services connect connector unload TDengineSourceConnector
|
|||
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
|
||||
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
|
||||
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
||||
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
|
||||
7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
|
||||
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 0,即获取到当前最新时间的所有数据。
|
||||
7. `out.format` : 结果集输出格式。`line` 表示输出格式为 InfluxDB Line 协议格式,`json` 表示输出格式是 json。默认为 line。
|
||||
8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
|
||||
9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 `<topic.prefix>-<stable.name>`,false 表示规则为 `<topic.prefix>-<connection.database>-<stable.name>`,默认 false。在 `topic.per.stable` 设置为 false 时不生效。
|
||||
|
||||
## 其他说明
|
||||
|
||||
1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
|
||||
2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
|
||||
1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:<https://kafka.apache.org/documentation/#connect>。
|
||||
|
||||
## 问题反馈
|
||||
|
||||
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。
|
||||
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:<https://github.com/taosdata/kafka-connect-tdengine/issues>。
|
||||
|
||||
## 参考
|
||||
|
||||
1. https://www.confluent.io/what-is-apache-kafka
|
||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
||||
3. https://docs.confluent.io/platform/current/platform.html
|
||||
1. <https://kafka.apache.org/documentation/>
|
||||
|
|
|
@ -247,10 +247,17 @@ launchctl limit maxfiles
|
|||
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
||||
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
||||
|
||||
### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
|
||||
### 21 在服务器上的使用 taos-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
|
||||
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
|
||||
|
||||
### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因?
|
||||
### 22 表名确认是存在的,但在写入或查询时返回表名不存在,什么原因?
|
||||
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
|
||||
|
||||
### 23 在 taos-CLI 中查询,字段内容不能完全显示出来怎么办?
|
||||
可以使用 \G 参数来竖式显示,如 show databases\G; (为了输入方便,在"\"后加 TAB 键,会自动补全后面的内容)
|
||||
|
||||
### 24 使用 taosBenchmark 测试工具写入数据查询很快,为什么我写入的数据查询非常慢?
|
||||
TDengine 在写入数据时如果有很严重的乱序写入问题,会严重影响查询性能,所以需要在写入前解决乱序的问题。如果业务是从 kafka 消费写入,请合理设计消费者,尽可能的一个子表数据由一个消费者去消费并写入,避免由设计产生的乱序。
|
||||
|
||||
### 25 我想统计下前后两条写入记录之间的时间差值是多少?
|
||||
使用 DIFF 函数,可以查看时间列或数值列前后两条记录的差值,非常方便,详细说明见 SQL手册->函数->DIFF
|
||||
|
|
|
@ -10,6 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.5.0
|
||||
|
||||
<Release type="tdengine" version="3.0.5.0" />
|
||||
|
||||
## 3.0.4.2
|
||||
|
||||
<Release type="tdengine" version="3.0.4.2" />
|
||||
|
||||
## 3.0.4.1
|
||||
|
||||
<Release type="tdengine" version="3.0.4.1" />
|
||||
|
|
|
@ -10,9 +10,9 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.0
|
||||
## 2.5.1
|
||||
|
||||
<Release type="tools" version="2.5.0" />
|
||||
<Release type="tools" version="2.5.1" />
|
||||
|
||||
## 2.5.0
|
||||
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.taosdata</groupId>
|
||||
<artifactId>consumer</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<properties>
|
||||
<maven.compiler.source>8</maven.compiler.source>
|
||||
<maven.compiler.target>8</maven.compiler.target>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>30.1.1-jre</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>ConsumerDemo</id>
|
||||
<configuration>
|
||||
<finalName>ConsumerDemo</finalName>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>com.taosdata.ConsumerDemo</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
<descriptorRefs>
|
||||
<descriptorRef>jar-with-dependencies</descriptorRef>
|
||||
</descriptorRefs>
|
||||
</configuration>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>single</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
<encoding>UTF-8</encoding>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
|
@ -0,0 +1,52 @@
|
|||
# How to Run the Consumer Demo Code On Linux OS
|
||||
TDengine's Consumer demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using
|
||||
```
|
||||
sudo apt-get install maven
|
||||
```
|
||||
|
||||
## Install TDengine Client and TaosAdapter
|
||||
Make sure you have already installed a tdengine client on your current develop environment.
|
||||
Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client.
|
||||
|
||||
## Run Consumer Demo using mvn plugin
|
||||
run command:
|
||||
```
|
||||
mvn clean compile exec:java -Dexec.mainClass="com.taosdata.ConsumerDemo"
|
||||
```
|
||||
|
||||
## Custom configuration
|
||||
```shell
|
||||
# the host of TDengine server
|
||||
export TAOS_HOST="127.0.0.1"
|
||||
|
||||
# the port of TDengine server
|
||||
export TAOS_PORT="6041"
|
||||
|
||||
# the consumer type, can be "ws" or "jni"
|
||||
export TAOS_TYPE="ws"
|
||||
|
||||
# the number of consumers
|
||||
export TAOS_JDBC_CONSUMER_NUM="1"
|
||||
|
||||
# the number of processors to consume
|
||||
export TAOS_JDBC_PROCESSOR_NUM="2"
|
||||
|
||||
# the number of records to be consumed per processor per second
|
||||
export TAOS_JDBC_RATE_PER_PROCESSOR="1000"
|
||||
|
||||
# poll wait time in ms
|
||||
export TAOS_JDBC_POLL_SLEEP="100"
|
||||
```
|
||||
|
||||
## Run Consumer Demo using jar
|
||||
|
||||
To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
|
||||
```
|
||||
mvn clean package assembly:single
|
||||
```
|
||||
|
||||
To run ConsumerDemo.jar, go to ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
|
||||
```
|
||||
java -jar target/ConsumerDemo-jar-with-dependencies.jar
|
||||
```
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
package com.taosdata;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
public class Bean {
|
||||
private Timestamp ts;
|
||||
private Integer c1;
|
||||
private String c2;
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public Integer getC1() {
|
||||
return c1;
|
||||
}
|
||||
|
||||
public void setC1(Integer c1) {
|
||||
this.c1 = c1;
|
||||
}
|
||||
|
||||
public String getC2() {
|
||||
return c2;
|
||||
}
|
||||
|
||||
public void setC2(String c2) {
|
||||
this.c2 = c2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("Bean {");
|
||||
sb.append("ts=").append(ts);
|
||||
sb.append(", c1=").append(c1);
|
||||
sb.append(", c2='").append(c2).append('\'');
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package com.taosdata;
|
||||
|
||||
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
|
||||
|
||||
public class BeanDeserializer extends ReferenceDeserializer<Bean> {
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package com.taosdata;
|
||||
|
||||
public class Config {
|
||||
public static final String TOPIC = "test_consumer";
|
||||
public static final String TAOS_HOST = "127.0.0.1";
|
||||
public static final String TAOS_PORT = "6041";
|
||||
public static final String TAOS_TYPE = "ws";
|
||||
public static final int TAOS_JDBC_CONSUMER_NUM = 1;
|
||||
public static final int TAOS_JDBC_PROCESSOR_NUM = 2;
|
||||
public static final int TAOS_JDBC_RATE_PER_PROCESSOR = 1000;
|
||||
public static final int TAOS_JDBC_POLL_SLEEP = 100;
|
||||
|
||||
private final int consumerNum;
|
||||
private final int processCapacity;
|
||||
private final int rate;
|
||||
private final int pollSleep;
|
||||
private final String type;
|
||||
private final String host;
|
||||
private final String port;
|
||||
|
||||
public Config(String type, String host, String port, int consumerNum, int processCapacity, int rate, int pollSleep) {
|
||||
this.type = type;
|
||||
this.consumerNum = consumerNum;
|
||||
this.processCapacity = processCapacity;
|
||||
this.rate = rate;
|
||||
this.pollSleep = pollSleep;
|
||||
this.host = host;
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public int getConsumerNum() {
|
||||
return consumerNum;
|
||||
}
|
||||
|
||||
public int getProcessCapacity() {
|
||||
return processCapacity;
|
||||
}
|
||||
|
||||
public int getRate() {
|
||||
return rate;
|
||||
}
|
||||
|
||||
public int getPollSleep() {
|
||||
return pollSleep;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public String getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public static Config getFromENV() {
|
||||
String host = System.getenv("TAOS_HOST") != null ? System.getenv("TAOS_HOST") : TAOS_HOST;
|
||||
String port = System.getenv("TAOS_PORT") != null ? System.getenv("TAOS_PORT") : TAOS_PORT;
|
||||
String type = System.getenv("TAOS_TYPE") != null ? System.getenv("TAOS_TYPE") : TAOS_TYPE;
|
||||
|
||||
String c = System.getenv("TAOS_JDBC_CONSUMER_NUM");
|
||||
int num = c != null ? Integer.parseInt(c) : TAOS_JDBC_CONSUMER_NUM;
|
||||
|
||||
String p = System.getenv("TAOS_JDBC_PROCESSOR_NUM");
|
||||
int capacity = p != null ? Integer.parseInt(p) : TAOS_JDBC_PROCESSOR_NUM;
|
||||
|
||||
String r = System.getenv("TAOS_JDBC_RATE_PER_PROCESSOR");
|
||||
int rate = r != null ? Integer.parseInt(r) : TAOS_JDBC_RATE_PER_PROCESSOR;
|
||||
|
||||
String s = System.getenv("TAOS_JDBC_POLL_SLEEP");
|
||||
int sleep = s != null ? Integer.parseInt(s) : TAOS_JDBC_POLL_SLEEP;
|
||||
|
||||
return new Config(type, host, port, num, capacity, rate, sleep);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
package com.taosdata;
|
||||
|
||||
import com.taosdata.jdbc.tmq.TMQConstants;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static com.taosdata.Config.*;
|
||||
|
||||
public class ConsumerDemo {
|
||||
public static void main(String[] args) throws SQLException {
|
||||
// Config
|
||||
Config config = Config.getFromENV();
|
||||
// Generated data
|
||||
mockData();
|
||||
|
||||
Properties prop = new Properties();
|
||||
prop.setProperty(TMQConstants.CONNECT_TYPE, config.getType());
|
||||
prop.setProperty(TMQConstants.BOOTSTRAP_SERVERS, config.getHost() + ":" + config.getPort());
|
||||
prop.setProperty(TMQConstants.CONNECT_USER, "root");
|
||||
prop.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
|
||||
prop.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
||||
prop.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||
prop.setProperty(TMQConstants.GROUP_ID, "gId");
|
||||
prop.setProperty(TMQConstants.VALUE_DESERIALIZER, "com.taosdata.BeanDeserializer");
|
||||
for (int i = 0; i < config.getConsumerNum() - 1; i++) {
|
||||
new Thread(new Worker(prop, config)).start();
|
||||
}
|
||||
new Worker(prop, config).run();
|
||||
}
|
||||
|
||||
public static void mockData() throws SQLException {
|
||||
String dbName = "test_consumer";
|
||||
String tableName = "st";
|
||||
String url = "jdbc:TAOS-RS://" + TAOS_HOST + ":" + TAOS_PORT + "/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
Statement statement = connection.createStatement();
|
||||
statement.executeUpdate("create database if not exists " + dbName + " WAL_RETENTION_PERIOD 3650");
|
||||
statement.executeUpdate("use " + dbName);
|
||||
statement.executeUpdate("create table if not exists " + tableName + " (ts timestamp, c1 int, c2 nchar(100)) ");
|
||||
statement.executeUpdate("create topic if not exists " + TOPIC + " as select ts, c1, c2 from " + tableName);
|
||||
|
||||
ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(r -> {
|
||||
Thread t = new Thread(r);
|
||||
t.setName("mock-data-thread-" + t.getId());
|
||||
return t;
|
||||
});
|
||||
AtomicInteger atomic = new AtomicInteger();
|
||||
scheduledExecutorService.scheduleWithFixedDelay(() -> {
|
||||
int i = atomic.getAndIncrement();
|
||||
try {
|
||||
statement.executeUpdate("insert into " + tableName + " values(now, " + i + ",'" + i + "')");
|
||||
} catch (SQLException e) {
|
||||
// ignore
|
||||
}
|
||||
}, 0, 10, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package com.taosdata;
|
||||
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.time.Duration;
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.Collections;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
||||
public class Worker implements Runnable {
|
||||
|
||||
int sleepTime;
|
||||
int rate;
|
||||
|
||||
ForkJoinPool pool = new ForkJoinPool();
|
||||
Semaphore semaphore;
|
||||
|
||||
TaosConsumer<Bean> consumer;
|
||||
|
||||
public Worker(Properties prop, Config config) throws SQLException {
|
||||
consumer = new TaosConsumer<>(prop);
|
||||
consumer.subscribe(Collections.singletonList(Config.TOPIC));
|
||||
semaphore = new Semaphore(config.getProcessCapacity());
|
||||
sleepTime = config.getPollSleep();
|
||||
rate = config.getRate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
// 控制请求频率
|
||||
if (semaphore.tryAcquire()) {
|
||||
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
|
||||
pool.submit(() -> {
|
||||
RateLimiter limiter = RateLimiter.create(rate);
|
||||
try {
|
||||
for (ConsumerRecord<Bean> record : records) {
|
||||
// 流量控制
|
||||
limiter.acquire();
|
||||
// 业务处理数据
|
||||
System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
|
||||
}
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@
|
|||
#spring.datasource.password=taosdata
|
||||
# datasource config - JDBC-RESTful
|
||||
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
||||
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
|
||||
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
|
||||
spring.datasource.username=root
|
||||
spring.datasource.password=taosdata
|
||||
spring.datasource.druid.initial-size=5
|
||||
|
|
|
@ -42,27 +42,27 @@ IF (TD_LINUX)
|
|||
)
|
||||
|
||||
target_link_libraries(tmq
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(stream_demo
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(schemaless
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(prepare
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(demo
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(asyncdemo
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||
|
|
|
@ -162,6 +162,7 @@ static int l_query(lua_State *L){
|
|||
case TSDB_DATA_TYPE_JSON:
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY:
|
||||
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
|
||||
lua_pushlstring(L,(char *)row[i], length[i]);
|
||||
break;
|
||||
|
|
|
@ -161,6 +161,7 @@ static int l_query(lua_State *L){
|
|||
case TSDB_DATA_TYPE_JSON:
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY:
|
||||
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
|
||||
lua_pushlstring(L,(char *)row[i], length[i]);
|
||||
break;
|
||||
|
|
|
@ -51,7 +51,8 @@ typedef void TAOS_SUB;
|
|||
#define TSDB_DATA_TYPE_BLOB 18 // binary
|
||||
#define TSDB_DATA_TYPE_MEDIUMBLOB 19
|
||||
#define TSDB_DATA_TYPE_BINARY TSDB_DATA_TYPE_VARCHAR // string
|
||||
#define TSDB_DATA_TYPE_MAX 20
|
||||
#define TSDB_DATA_TYPE_GEOMETRY 20 // geometry
|
||||
#define TSDB_DATA_TYPE_MAX 21
|
||||
|
||||
typedef enum {
|
||||
TSDB_OPTION_LOCALE,
|
||||
|
|
|
@ -37,6 +37,13 @@ extern "C" {
|
|||
)
|
||||
// clang-format on
|
||||
|
||||
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
|
||||
|
||||
typedef struct STableKeyInfo {
|
||||
uint64_t uid;
|
||||
uint64_t groupId;
|
||||
} STableKeyInfo;
|
||||
|
||||
typedef struct SWinKey {
|
||||
uint64_t groupId;
|
||||
TSKEY ts;
|
||||
|
@ -224,6 +231,7 @@ typedef struct SColumnInfoData {
|
|||
};
|
||||
SColumnInfo info; // column info
|
||||
bool hasNull; // if current column data has null value.
|
||||
bool reassigned; // if current column data is reassigned.
|
||||
} SColumnInfoData;
|
||||
|
||||
typedef struct SQueryTableDataCond {
|
||||
|
|
|
@ -178,6 +178,7 @@ int32_t getJsonValueLen(const char* data);
|
|||
|
||||
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
|
||||
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
|
||||
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
||||
const SColumnInfoData* pSource, int32_t numOfRow2);
|
||||
|
@ -247,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
|
|||
tb_uid_t suid);
|
||||
|
||||
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
||||
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
|
||||
|
||||
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
|
||||
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
|
||||
|
|
|
@ -145,7 +145,7 @@ int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMall
|
|||
extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull);
|
||||
|
||||
// for stmt bind
|
||||
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind);
|
||||
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen);
|
||||
void tColDataSortMerge(SArray *colDataArr);
|
||||
|
||||
// for raw block
|
||||
|
|
|
@ -29,7 +29,6 @@ extern "C" {
|
|||
#define SLOW_LOG_TYPE_OTHERS 0x4
|
||||
#define SLOW_LOG_TYPE_ALL 0xFFFFFFFF
|
||||
|
||||
|
||||
// cluster
|
||||
extern char tsFirst[];
|
||||
extern char tsSecond[];
|
||||
|
@ -83,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
|
|||
// mnode
|
||||
extern int64_t tsMndSdbWriteDelta;
|
||||
extern int64_t tsMndLogRetention;
|
||||
extern bool tsMndSkipGrant;
|
||||
|
||||
// monitor
|
||||
extern bool tsEnableMonitor;
|
||||
|
@ -131,7 +131,7 @@ extern int32_t tsSlowLogScope;
|
|||
// client
|
||||
extern int32_t tsMinSlidingTime;
|
||||
extern int32_t tsMinIntervalTime;
|
||||
extern int32_t tsMaxMemUsedByInsert;
|
||||
extern int32_t tsMaxInsertBatchRows;
|
||||
|
||||
// build info
|
||||
extern char version[];
|
||||
|
@ -180,6 +180,8 @@ extern int32_t tsRpcRetryInterval;
|
|||
extern bool tsDisableStream;
|
||||
extern int64_t tsStreamBufferSize;
|
||||
extern int64_t tsCheckpointInterval;
|
||||
extern bool tsFilterScalarMode;
|
||||
extern int32_t tsMaxStreamBackendCache;
|
||||
|
||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
|
||||
|
|
|
@ -2009,10 +2009,8 @@ typedef struct {
|
|||
int8_t withMeta;
|
||||
char* sql;
|
||||
char subDbName[TSDB_DB_FNAME_LEN];
|
||||
union {
|
||||
char* ast;
|
||||
char subStbName[TSDB_TABLE_FNAME_LEN];
|
||||
};
|
||||
char* ast;
|
||||
char subStbName[TSDB_TABLE_FNAME_LEN];
|
||||
} SCMCreateTopicReq;
|
||||
|
||||
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
|
||||
|
@ -2809,37 +2807,49 @@ typedef struct {
|
|||
int64_t suid;
|
||||
} SMqRebVgReq;
|
||||
|
||||
static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI64(buf, pReq->leftForVer);
|
||||
tlen += taosEncodeFixedI32(buf, pReq->vgId);
|
||||
tlen += taosEncodeFixedI64(buf, pReq->oldConsumerId);
|
||||
tlen += taosEncodeFixedI64(buf, pReq->newConsumerId);
|
||||
tlen += taosEncodeString(buf, pReq->subKey);
|
||||
tlen += taosEncodeFixedI8(buf, pReq->subType);
|
||||
tlen += taosEncodeFixedI8(buf, pReq->withMeta);
|
||||
static FORCE_INLINE int tEncodeSMqRebVgReq(SEncoder *pCoder, const SMqRebVgReq* pReq) {
|
||||
if (tStartEncode(pCoder) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pReq->leftForVer) < 0) return -1;
|
||||
if (tEncodeI32(pCoder, pReq->vgId) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pReq->oldConsumerId) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pReq->newConsumerId) < 0) return -1;
|
||||
if (tEncodeCStr(pCoder, pReq->subKey) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pReq->subType) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pReq->withMeta) < 0) return -1;
|
||||
|
||||
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
tlen += taosEncodeString(buf, pReq->qmsg);
|
||||
if (tEncodeCStr(pCoder, pReq->qmsg) < 0) return -1;
|
||||
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
tlen += taosEncodeFixedI64(buf, pReq->suid);
|
||||
if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
|
||||
if (tEncodeCStr(pCoder, pReq->qmsg) < 0) return -1;
|
||||
}
|
||||
return tlen;
|
||||
tEndEncode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq) {
|
||||
buf = taosDecodeFixedI64(buf, &pReq->leftForVer);
|
||||
buf = taosDecodeFixedI32(buf, &pReq->vgId);
|
||||
buf = taosDecodeFixedI64(buf, &pReq->oldConsumerId);
|
||||
buf = taosDecodeFixedI64(buf, &pReq->newConsumerId);
|
||||
buf = taosDecodeStringTo(buf, pReq->subKey);
|
||||
buf = taosDecodeFixedI8(buf, &pReq->subType);
|
||||
buf = taosDecodeFixedI8(buf, &pReq->withMeta);
|
||||
static FORCE_INLINE int tDecodeSMqRebVgReq(SDecoder *pCoder, SMqRebVgReq* pReq) {
|
||||
if (tStartDecode(pCoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI64(pCoder, &pReq->leftForVer) < 0) return -1;
|
||||
|
||||
if (tDecodeI32(pCoder, &pReq->vgId) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pReq->oldConsumerId) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pReq->newConsumerId) < 0) return -1;
|
||||
if (tDecodeCStrTo(pCoder, pReq->subKey) < 0) return -1;
|
||||
if (tDecodeI8(pCoder, &pReq->subType) < 0) return -1;
|
||||
if (tDecodeI8(pCoder, &pReq->withMeta) < 0) return -1;
|
||||
|
||||
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
buf = taosDecodeString(buf, &pReq->qmsg);
|
||||
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
|
||||
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
buf = taosDecodeFixedI64(buf, &pReq->suid);
|
||||
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
|
||||
if (!tDecodeIsEnd(pCoder)){
|
||||
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
|
||||
}
|
||||
}
|
||||
return (void*)buf;
|
||||
|
||||
tEndDecode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define TIME_IS_VAR_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y')
|
||||
#define IS_CALENDAR_TIME_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y')
|
||||
|
||||
#define TIME_UNIT_NANOSECOND 'b'
|
||||
#define TIME_UNIT_MICROSECOND 'u'
|
||||
|
@ -74,7 +74,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
|
|||
|
||||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||
|
||||
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
|
||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
||||
|
||||
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
||||
|
|
|
@ -146,211 +146,212 @@
|
|||
#define TK_MEDIUMBLOB 128
|
||||
#define TK_BLOB 129
|
||||
#define TK_VARBINARY 130
|
||||
#define TK_DECIMAL 131
|
||||
#define TK_COMMENT 132
|
||||
#define TK_MAX_DELAY 133
|
||||
#define TK_WATERMARK 134
|
||||
#define TK_ROLLUP 135
|
||||
#define TK_TTL 136
|
||||
#define TK_SMA 137
|
||||
#define TK_DELETE_MARK 138
|
||||
#define TK_FIRST 139
|
||||
#define TK_LAST 140
|
||||
#define TK_SHOW 141
|
||||
#define TK_PRIVILEGES 142
|
||||
#define TK_DATABASES 143
|
||||
#define TK_TABLES 144
|
||||
#define TK_STABLES 145
|
||||
#define TK_MNODES 146
|
||||
#define TK_QNODES 147
|
||||
#define TK_FUNCTIONS 148
|
||||
#define TK_INDEXES 149
|
||||
#define TK_ACCOUNTS 150
|
||||
#define TK_APPS 151
|
||||
#define TK_CONNECTIONS 152
|
||||
#define TK_LICENCES 153
|
||||
#define TK_GRANTS 154
|
||||
#define TK_QUERIES 155
|
||||
#define TK_SCORES 156
|
||||
#define TK_TOPICS 157
|
||||
#define TK_VARIABLES 158
|
||||
#define TK_CLUSTER 159
|
||||
#define TK_BNODES 160
|
||||
#define TK_SNODES 161
|
||||
#define TK_TRANSACTIONS 162
|
||||
#define TK_DISTRIBUTED 163
|
||||
#define TK_CONSUMERS 164
|
||||
#define TK_SUBSCRIPTIONS 165
|
||||
#define TK_VNODES 166
|
||||
#define TK_ALIVE 167
|
||||
#define TK_LIKE 168
|
||||
#define TK_TBNAME 169
|
||||
#define TK_QTAGS 170
|
||||
#define TK_AS 171
|
||||
#define TK_INDEX 172
|
||||
#define TK_FUNCTION 173
|
||||
#define TK_INTERVAL 174
|
||||
#define TK_COUNT 175
|
||||
#define TK_LAST_ROW 176
|
||||
#define TK_TOPIC 177
|
||||
#define TK_META 178
|
||||
#define TK_CONSUMER 179
|
||||
#define TK_GROUP 180
|
||||
#define TK_DESC 181
|
||||
#define TK_DESCRIBE 182
|
||||
#define TK_RESET 183
|
||||
#define TK_QUERY 184
|
||||
#define TK_CACHE 185
|
||||
#define TK_EXPLAIN 186
|
||||
#define TK_ANALYZE 187
|
||||
#define TK_VERBOSE 188
|
||||
#define TK_NK_BOOL 189
|
||||
#define TK_RATIO 190
|
||||
#define TK_NK_FLOAT 191
|
||||
#define TK_OUTPUTTYPE 192
|
||||
#define TK_AGGREGATE 193
|
||||
#define TK_BUFSIZE 194
|
||||
#define TK_LANGUAGE 195
|
||||
#define TK_REPLACE 196
|
||||
#define TK_STREAM 197
|
||||
#define TK_INTO 198
|
||||
#define TK_PAUSE 199
|
||||
#define TK_RESUME 200
|
||||
#define TK_TRIGGER 201
|
||||
#define TK_AT_ONCE 202
|
||||
#define TK_WINDOW_CLOSE 203
|
||||
#define TK_IGNORE 204
|
||||
#define TK_EXPIRED 205
|
||||
#define TK_FILL_HISTORY 206
|
||||
#define TK_UPDATE 207
|
||||
#define TK_SUBTABLE 208
|
||||
#define TK_UNTREATED 209
|
||||
#define TK_KILL 210
|
||||
#define TK_CONNECTION 211
|
||||
#define TK_TRANSACTION 212
|
||||
#define TK_BALANCE 213
|
||||
#define TK_VGROUP 214
|
||||
#define TK_LEADER 215
|
||||
#define TK_MERGE 216
|
||||
#define TK_REDISTRIBUTE 217
|
||||
#define TK_SPLIT 218
|
||||
#define TK_DELETE 219
|
||||
#define TK_INSERT 220
|
||||
#define TK_NULL 221
|
||||
#define TK_NK_QUESTION 222
|
||||
#define TK_NK_ARROW 223
|
||||
#define TK_ROWTS 224
|
||||
#define TK_QSTART 225
|
||||
#define TK_QEND 226
|
||||
#define TK_QDURATION 227
|
||||
#define TK_WSTART 228
|
||||
#define TK_WEND 229
|
||||
#define TK_WDURATION 230
|
||||
#define TK_IROWTS 231
|
||||
#define TK_ISFILLED 232
|
||||
#define TK_CAST 233
|
||||
#define TK_NOW 234
|
||||
#define TK_TODAY 235
|
||||
#define TK_TIMEZONE 236
|
||||
#define TK_CLIENT_VERSION 237
|
||||
#define TK_SERVER_VERSION 238
|
||||
#define TK_SERVER_STATUS 239
|
||||
#define TK_CURRENT_USER 240
|
||||
#define TK_CASE 241
|
||||
#define TK_WHEN 242
|
||||
#define TK_THEN 243
|
||||
#define TK_ELSE 244
|
||||
#define TK_BETWEEN 245
|
||||
#define TK_IS 246
|
||||
#define TK_NK_LT 247
|
||||
#define TK_NK_GT 248
|
||||
#define TK_NK_LE 249
|
||||
#define TK_NK_GE 250
|
||||
#define TK_NK_NE 251
|
||||
#define TK_MATCH 252
|
||||
#define TK_NMATCH 253
|
||||
#define TK_CONTAINS 254
|
||||
#define TK_IN 255
|
||||
#define TK_JOIN 256
|
||||
#define TK_INNER 257
|
||||
#define TK_SELECT 258
|
||||
#define TK_DISTINCT 259
|
||||
#define TK_WHERE 260
|
||||
#define TK_PARTITION 261
|
||||
#define TK_BY 262
|
||||
#define TK_SESSION 263
|
||||
#define TK_STATE_WINDOW 264
|
||||
#define TK_EVENT_WINDOW 265
|
||||
#define TK_SLIDING 266
|
||||
#define TK_FILL 267
|
||||
#define TK_VALUE 268
|
||||
#define TK_VALUE_F 269
|
||||
#define TK_NONE 270
|
||||
#define TK_PREV 271
|
||||
#define TK_NULL_F 272
|
||||
#define TK_LINEAR 273
|
||||
#define TK_NEXT 274
|
||||
#define TK_HAVING 275
|
||||
#define TK_RANGE 276
|
||||
#define TK_EVERY 277
|
||||
#define TK_ORDER 278
|
||||
#define TK_SLIMIT 279
|
||||
#define TK_SOFFSET 280
|
||||
#define TK_LIMIT 281
|
||||
#define TK_OFFSET 282
|
||||
#define TK_ASC 283
|
||||
#define TK_NULLS 284
|
||||
#define TK_ABORT 285
|
||||
#define TK_AFTER 286
|
||||
#define TK_ATTACH 287
|
||||
#define TK_BEFORE 288
|
||||
#define TK_BEGIN 289
|
||||
#define TK_BITAND 290
|
||||
#define TK_BITNOT 291
|
||||
#define TK_BITOR 292
|
||||
#define TK_BLOCKS 293
|
||||
#define TK_CHANGE 294
|
||||
#define TK_COMMA 295
|
||||
#define TK_CONCAT 296
|
||||
#define TK_CONFLICT 297
|
||||
#define TK_COPY 298
|
||||
#define TK_DEFERRED 299
|
||||
#define TK_DELIMITERS 300
|
||||
#define TK_DETACH 301
|
||||
#define TK_DIVIDE 302
|
||||
#define TK_DOT 303
|
||||
#define TK_EACH 304
|
||||
#define TK_FAIL 305
|
||||
#define TK_FILE 306
|
||||
#define TK_FOR 307
|
||||
#define TK_GLOB 308
|
||||
#define TK_ID 309
|
||||
#define TK_IMMEDIATE 310
|
||||
#define TK_IMPORT 311
|
||||
#define TK_INITIALLY 312
|
||||
#define TK_INSTEAD 313
|
||||
#define TK_ISNULL 314
|
||||
#define TK_KEY 315
|
||||
#define TK_MODULES 316
|
||||
#define TK_NK_BITNOT 317
|
||||
#define TK_NK_SEMI 318
|
||||
#define TK_NOTNULL 319
|
||||
#define TK_OF 320
|
||||
#define TK_PLUS 321
|
||||
#define TK_PRIVILEGE 322
|
||||
#define TK_RAISE 323
|
||||
#define TK_RESTRICT 324
|
||||
#define TK_ROW 325
|
||||
#define TK_SEMI 326
|
||||
#define TK_STAR 327
|
||||
#define TK_STATEMENT 328
|
||||
#define TK_STRICT 329
|
||||
#define TK_STRING 330
|
||||
#define TK_TIMES 331
|
||||
#define TK_VALUES 332
|
||||
#define TK_VARIABLE 333
|
||||
#define TK_VIEW 334
|
||||
#define TK_WAL 335
|
||||
#define TK_GEOMETRY 131
|
||||
#define TK_DECIMAL 132
|
||||
#define TK_COMMENT 133
|
||||
#define TK_MAX_DELAY 134
|
||||
#define TK_WATERMARK 135
|
||||
#define TK_ROLLUP 136
|
||||
#define TK_TTL 137
|
||||
#define TK_SMA 138
|
||||
#define TK_DELETE_MARK 139
|
||||
#define TK_FIRST 140
|
||||
#define TK_LAST 141
|
||||
#define TK_SHOW 142
|
||||
#define TK_PRIVILEGES 143
|
||||
#define TK_DATABASES 144
|
||||
#define TK_TABLES 145
|
||||
#define TK_STABLES 146
|
||||
#define TK_MNODES 147
|
||||
#define TK_QNODES 148
|
||||
#define TK_FUNCTIONS 149
|
||||
#define TK_INDEXES 150
|
||||
#define TK_ACCOUNTS 151
|
||||
#define TK_APPS 152
|
||||
#define TK_CONNECTIONS 153
|
||||
#define TK_LICENCES 154
|
||||
#define TK_GRANTS 155
|
||||
#define TK_QUERIES 156
|
||||
#define TK_SCORES 157
|
||||
#define TK_TOPICS 158
|
||||
#define TK_VARIABLES 159
|
||||
#define TK_CLUSTER 160
|
||||
#define TK_BNODES 161
|
||||
#define TK_SNODES 162
|
||||
#define TK_TRANSACTIONS 163
|
||||
#define TK_DISTRIBUTED 164
|
||||
#define TK_CONSUMERS 165
|
||||
#define TK_SUBSCRIPTIONS 166
|
||||
#define TK_VNODES 167
|
||||
#define TK_ALIVE 168
|
||||
#define TK_LIKE 169
|
||||
#define TK_TBNAME 170
|
||||
#define TK_QTAGS 171
|
||||
#define TK_AS 172
|
||||
#define TK_INDEX 173
|
||||
#define TK_FUNCTION 174
|
||||
#define TK_INTERVAL 175
|
||||
#define TK_COUNT 176
|
||||
#define TK_LAST_ROW 177
|
||||
#define TK_TOPIC 178
|
||||
#define TK_META 179
|
||||
#define TK_CONSUMER 180
|
||||
#define TK_GROUP 181
|
||||
#define TK_DESC 182
|
||||
#define TK_DESCRIBE 183
|
||||
#define TK_RESET 184
|
||||
#define TK_QUERY 185
|
||||
#define TK_CACHE 186
|
||||
#define TK_EXPLAIN 187
|
||||
#define TK_ANALYZE 188
|
||||
#define TK_VERBOSE 189
|
||||
#define TK_NK_BOOL 190
|
||||
#define TK_RATIO 191
|
||||
#define TK_NK_FLOAT 192
|
||||
#define TK_OUTPUTTYPE 193
|
||||
#define TK_AGGREGATE 194
|
||||
#define TK_BUFSIZE 195
|
||||
#define TK_LANGUAGE 196
|
||||
#define TK_REPLACE 197
|
||||
#define TK_STREAM 198
|
||||
#define TK_INTO 199
|
||||
#define TK_PAUSE 200
|
||||
#define TK_RESUME 201
|
||||
#define TK_TRIGGER 202
|
||||
#define TK_AT_ONCE 203
|
||||
#define TK_WINDOW_CLOSE 204
|
||||
#define TK_IGNORE 205
|
||||
#define TK_EXPIRED 206
|
||||
#define TK_FILL_HISTORY 207
|
||||
#define TK_UPDATE 208
|
||||
#define TK_SUBTABLE 209
|
||||
#define TK_UNTREATED 210
|
||||
#define TK_KILL 211
|
||||
#define TK_CONNECTION 212
|
||||
#define TK_TRANSACTION 213
|
||||
#define TK_BALANCE 214
|
||||
#define TK_VGROUP 215
|
||||
#define TK_LEADER 216
|
||||
#define TK_MERGE 217
|
||||
#define TK_REDISTRIBUTE 218
|
||||
#define TK_SPLIT 219
|
||||
#define TK_DELETE 220
|
||||
#define TK_INSERT 221
|
||||
#define TK_NULL 222
|
||||
#define TK_NK_QUESTION 223
|
||||
#define TK_NK_ARROW 224
|
||||
#define TK_ROWTS 225
|
||||
#define TK_QSTART 226
|
||||
#define TK_QEND 227
|
||||
#define TK_QDURATION 228
|
||||
#define TK_WSTART 229
|
||||
#define TK_WEND 230
|
||||
#define TK_WDURATION 231
|
||||
#define TK_IROWTS 232
|
||||
#define TK_ISFILLED 233
|
||||
#define TK_CAST 234
|
||||
#define TK_NOW 235
|
||||
#define TK_TODAY 236
|
||||
#define TK_TIMEZONE 237
|
||||
#define TK_CLIENT_VERSION 238
|
||||
#define TK_SERVER_VERSION 239
|
||||
#define TK_SERVER_STATUS 240
|
||||
#define TK_CURRENT_USER 241
|
||||
#define TK_CASE 242
|
||||
#define TK_WHEN 243
|
||||
#define TK_THEN 244
|
||||
#define TK_ELSE 245
|
||||
#define TK_BETWEEN 246
|
||||
#define TK_IS 247
|
||||
#define TK_NK_LT 248
|
||||
#define TK_NK_GT 249
|
||||
#define TK_NK_LE 250
|
||||
#define TK_NK_GE 251
|
||||
#define TK_NK_NE 252
|
||||
#define TK_MATCH 253
|
||||
#define TK_NMATCH 254
|
||||
#define TK_CONTAINS 255
|
||||
#define TK_IN 256
|
||||
#define TK_JOIN 257
|
||||
#define TK_INNER 258
|
||||
#define TK_SELECT 259
|
||||
#define TK_DISTINCT 260
|
||||
#define TK_WHERE 261
|
||||
#define TK_PARTITION 262
|
||||
#define TK_BY 263
|
||||
#define TK_SESSION 264
|
||||
#define TK_STATE_WINDOW 265
|
||||
#define TK_EVENT_WINDOW 266
|
||||
#define TK_SLIDING 267
|
||||
#define TK_FILL 268
|
||||
#define TK_VALUE 269
|
||||
#define TK_VALUE_F 270
|
||||
#define TK_NONE 271
|
||||
#define TK_PREV 272
|
||||
#define TK_NULL_F 273
|
||||
#define TK_LINEAR 274
|
||||
#define TK_NEXT 275
|
||||
#define TK_HAVING 276
|
||||
#define TK_RANGE 277
|
||||
#define TK_EVERY 278
|
||||
#define TK_ORDER 279
|
||||
#define TK_SLIMIT 280
|
||||
#define TK_SOFFSET 281
|
||||
#define TK_LIMIT 282
|
||||
#define TK_OFFSET 283
|
||||
#define TK_ASC 284
|
||||
#define TK_NULLS 285
|
||||
#define TK_ABORT 286
|
||||
#define TK_AFTER 287
|
||||
#define TK_ATTACH 288
|
||||
#define TK_BEFORE 289
|
||||
#define TK_BEGIN 290
|
||||
#define TK_BITAND 291
|
||||
#define TK_BITNOT 292
|
||||
#define TK_BITOR 293
|
||||
#define TK_BLOCKS 294
|
||||
#define TK_CHANGE 295
|
||||
#define TK_COMMA 296
|
||||
#define TK_CONCAT 297
|
||||
#define TK_CONFLICT 298
|
||||
#define TK_COPY 299
|
||||
#define TK_DEFERRED 300
|
||||
#define TK_DELIMITERS 301
|
||||
#define TK_DETACH 302
|
||||
#define TK_DIVIDE 303
|
||||
#define TK_DOT 304
|
||||
#define TK_EACH 305
|
||||
#define TK_FAIL 306
|
||||
#define TK_FILE 307
|
||||
#define TK_FOR 308
|
||||
#define TK_GLOB 309
|
||||
#define TK_ID 310
|
||||
#define TK_IMMEDIATE 311
|
||||
#define TK_IMPORT 312
|
||||
#define TK_INITIALLY 313
|
||||
#define TK_INSTEAD 314
|
||||
#define TK_ISNULL 315
|
||||
#define TK_KEY 316
|
||||
#define TK_MODULES 317
|
||||
#define TK_NK_BITNOT 318
|
||||
#define TK_NK_SEMI 319
|
||||
#define TK_NOTNULL 320
|
||||
#define TK_OF 321
|
||||
#define TK_PLUS 322
|
||||
#define TK_PRIVILEGE 323
|
||||
#define TK_RAISE 324
|
||||
#define TK_RESTRICT 325
|
||||
#define TK_ROW 326
|
||||
#define TK_SEMI 327
|
||||
#define TK_STAR 328
|
||||
#define TK_STATEMENT 329
|
||||
#define TK_STRICT 330
|
||||
#define TK_STRING 331
|
||||
#define TK_TIMES 332
|
||||
#define TK_VALUES 333
|
||||
#define TK_VARIABLE 334
|
||||
#define TK_VIEW 335
|
||||
#define TK_WAL 336
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ typedef struct {
|
|||
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
||||
|
||||
#define IS_VAR_DATA_TYPE(t) \
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
|
||||
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
||||
|
@ -316,6 +316,8 @@ static FORCE_INLINE bool isNull(const void *val, int32_t type) {
|
|||
return *(uint32_t *)val == TSDB_DATA_UINT_NULL;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return *(uint64_t *)val == TSDB_DATA_UBIGINT_NULL;
|
||||
case TSDB_DATA_TYPE_GEOMETRY:
|
||||
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *)varDataVal(val) == TSDB_DATA_GEOMETRY_NULL;
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
|
|
@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg {
|
|||
uint32_t maxDataBlockNumPerQuery;
|
||||
} SDataSinkMgtCfg;
|
||||
|
||||
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg);
|
||||
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI);
|
||||
|
||||
typedef struct SInputData {
|
||||
const struct SSDataBlock* pData;
|
||||
|
|
|
@ -23,6 +23,7 @@ extern "C" {
|
|||
#include "query.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsgcb.h"
|
||||
#include "storageapi.h"
|
||||
|
||||
typedef void* qTaskInfo_t;
|
||||
typedef void* DataSinkHandle;
|
||||
|
@ -41,7 +42,6 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
void* tqReader;
|
||||
void* meta;
|
||||
void* config;
|
||||
void* vnode;
|
||||
void* mnd;
|
||||
|
@ -51,10 +51,10 @@ typedef struct {
|
|||
bool initTableReader;
|
||||
bool initTqReader;
|
||||
int32_t numOfVgroups;
|
||||
void* sContext; // SSnapContext*
|
||||
|
||||
void* sContext; // SSnapContext*
|
||||
|
||||
void* pStateBackend;
|
||||
void* pStateBackend;
|
||||
struct SStorageAPI api;
|
||||
} SReadHandle;
|
||||
|
||||
// in queue mode, data streams are seperated by msg
|
||||
|
@ -82,6 +82,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
|
|||
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int32_t vgId, int32_t* numOfCols,
|
||||
uint64_t id);
|
||||
|
||||
int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo);
|
||||
|
||||
/**
|
||||
* set the task Id, usually used by message queue process
|
||||
* @param tinfo
|
||||
|
@ -90,6 +92,8 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3
|
|||
*/
|
||||
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
||||
|
||||
//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code);
|
||||
|
||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
||||
|
||||
// todo refactor
|
||||
|
@ -186,7 +190,17 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
|
|||
|
||||
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
|
||||
|
||||
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
|
||||
void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order);
|
||||
void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery);
|
||||
STimeWindow getAlignQueryTimeWindow(const SInterval* pInterval, int64_t key);
|
||||
/**
|
||||
* return the scan info, in the form of tuple of two items, including table uid and current timestamp
|
||||
* @param tinfo
|
||||
* @param uid
|
||||
* @param ts
|
||||
* @return
|
||||
*/
|
||||
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
|
||||
|
||||
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
|
||||
|
||||
|
|
|
@ -0,0 +1,436 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_STORAGEAPI_H
|
||||
#define TDENGINE_STORAGEAPI_H
|
||||
|
||||
#include "function.h"
|
||||
#include "index.h"
|
||||
#include "taosdef.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tscalablebf.h"
|
||||
#include "tsimplehash.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define TIMEWINDOW_RANGE_CONTAINED 1
|
||||
#define TIMEWINDOW_RANGE_EXTERNAL 2
|
||||
|
||||
#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1
|
||||
#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
|
||||
#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
|
||||
#define CACHESCAN_RETRIEVE_LAST 0x8
|
||||
|
||||
#define META_READER_NOLOCK 0x1
|
||||
|
||||
typedef struct SMeta SMeta;
|
||||
typedef TSKEY (*GetTsFun)(void*);
|
||||
|
||||
typedef struct SMetaEntry {
|
||||
int64_t version;
|
||||
int8_t type;
|
||||
int8_t flags; // TODO: need refactor?
|
||||
tb_uid_t uid;
|
||||
char* name;
|
||||
union {
|
||||
struct {
|
||||
SSchemaWrapper schemaRow;
|
||||
SSchemaWrapper schemaTag;
|
||||
SRSmaParam rsmaParam;
|
||||
} stbEntry;
|
||||
struct {
|
||||
int64_t ctime;
|
||||
int32_t ttlDays;
|
||||
int32_t commentLen;
|
||||
char* comment;
|
||||
tb_uid_t suid;
|
||||
uint8_t* pTags;
|
||||
} ctbEntry;
|
||||
struct {
|
||||
int64_t ctime;
|
||||
int32_t ttlDays;
|
||||
int32_t commentLen;
|
||||
char* comment;
|
||||
int32_t ncid; // next column id
|
||||
SSchemaWrapper schemaRow;
|
||||
} ntbEntry;
|
||||
struct {
|
||||
STSma* tsma;
|
||||
} smaEntry;
|
||||
};
|
||||
|
||||
uint8_t* pBuf;
|
||||
} SMetaEntry;
|
||||
|
||||
typedef struct SMetaReader {
|
||||
int32_t flags;
|
||||
void* pMeta;
|
||||
SDecoder coder;
|
||||
SMetaEntry me;
|
||||
void* pBuf;
|
||||
int32_t szBuf;
|
||||
struct SStoreMeta* pAPI;
|
||||
} SMetaReader;
|
||||
|
||||
typedef struct SMTbCursor {
|
||||
void* pMeta;
|
||||
void* pDbc;
|
||||
void* pKey;
|
||||
void* pVal;
|
||||
int32_t kLen;
|
||||
int32_t vLen;
|
||||
SMetaReader mr;
|
||||
int8_t paused;
|
||||
} SMTbCursor;
|
||||
|
||||
typedef struct SRowBuffPos {
|
||||
void* pRowBuff;
|
||||
void* pKey;
|
||||
bool beFlushed;
|
||||
bool beUsed;
|
||||
} SRowBuffPos;
|
||||
|
||||
// tq
|
||||
typedef struct SMetaTableInfo {
|
||||
int64_t suid;
|
||||
int64_t uid;
|
||||
SSchemaWrapper* schema;
|
||||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
} SMetaTableInfo;
|
||||
|
||||
typedef struct SSnapContext {
|
||||
SMeta* pMeta; // todo remove it
|
||||
int64_t snapVersion;
|
||||
void* pCur;
|
||||
int64_t suid;
|
||||
int8_t subType;
|
||||
SHashObj* idVersion;
|
||||
SHashObj* suidInfo;
|
||||
SArray* idList;
|
||||
int32_t index;
|
||||
bool withMeta;
|
||||
bool queryMeta; // true-get meta, false-get data
|
||||
} SSnapContext;
|
||||
|
||||
typedef struct {
|
||||
int64_t uid;
|
||||
int64_t ctbNum;
|
||||
} SMetaStbStats;
|
||||
|
||||
// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
||||
// int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
// int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
||||
// int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
// bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
|
||||
// bool tqCurrentBlockConsumed(const STqReader* pReader);
|
||||
// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
|
||||
// bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
|
||||
// bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
|
||||
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
|
||||
// *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
|
||||
// *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
|
||||
|
||||
// clang-format off
|
||||
/*-------------------------------------------------new api format---------------------------------------------------*/
|
||||
typedef struct TsdReader {
|
||||
int32_t (*tsdReaderOpen)(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
|
||||
SSDataBlock* pResBlock, void** ppReader, const char* idstr, bool countOnly,
|
||||
SHashObj** pIgnoreTables);
|
||||
void (*tsdReaderClose)();
|
||||
void (*tsdSetReaderTaskId)(void *pReader, const char *pId);
|
||||
int32_t (*tsdSetQueryTableList)();
|
||||
int32_t (*tsdNextDataBlock)();
|
||||
|
||||
int32_t (*tsdReaderRetrieveBlockSMAInfo)();
|
||||
SSDataBlock *(*tsdReaderRetrieveDataBlock)();
|
||||
|
||||
void (*tsdReaderReleaseDataBlock)();
|
||||
|
||||
int32_t (*tsdReaderResetStatus)();
|
||||
int32_t (*tsdReaderGetDataBlockDistInfo)();
|
||||
int64_t (*tsdReaderGetNumOfInMemRows)();
|
||||
void (*tsdReaderNotifyClosing)();
|
||||
} TsdReader;
|
||||
|
||||
typedef struct SStoreCacheReader {
|
||||
int32_t (*openReader)(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
|
||||
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
|
||||
void *(*closeReader)(void *pReader);
|
||||
int32_t (*retrieveRows)(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
|
||||
SArray *pTableUidList);
|
||||
int32_t (*reuseReader)(void *pReader, void *pTableIdList, int32_t numOfTables);
|
||||
} SStoreCacheReader;
|
||||
|
||||
// clang-format on
|
||||
|
||||
/*------------------------------------------------------------------------------------------------------------------*/
|
||||
/*
|
||||
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
||||
int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
||||
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
|
||||
bool tqCurrentBlockConsumed(const STqReader* pReader);
|
||||
|
||||
int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
|
||||
bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
|
||||
bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
|
||||
|
||||
int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char* idstr);
|
||||
STqReader *tqReaderOpen(void *pVnode);
|
||||
void tqReaderClose(STqReader *);
|
||||
|
||||
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
|
||||
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
|
||||
SWalReader* tqGetWalReader(STqReader* pReader);
|
||||
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
|
||||
*/
|
||||
// todo rename
|
||||
typedef struct SStoreTqReader {
|
||||
struct STqReader* (*tqReaderOpen)();
|
||||
void (*tqReaderClose)();
|
||||
|
||||
int32_t (*tqReaderSeek)();
|
||||
int32_t (*tqRetrieveBlock)();
|
||||
bool (*tqReaderNextBlockInWal)();
|
||||
bool (*tqNextBlockImpl)(); // todo remove it
|
||||
SSDataBlock* (*tqGetResultBlock)();
|
||||
|
||||
void (*tqReaderSetColIdList)();
|
||||
int32_t (*tqReaderSetQueryTableList)();
|
||||
|
||||
int32_t (*tqReaderAddTables)();
|
||||
int32_t (*tqReaderRemoveTables)();
|
||||
|
||||
bool (*tqReaderIsQueriedTable)();
|
||||
bool (*tqReaderCurrentBlockConsumed)();
|
||||
|
||||
struct SWalReader* (*tqReaderGetWalReader)(); // todo remove it
|
||||
int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
|
||||
|
||||
int32_t (*tqReaderSetSubmitMsg)(); // todo remove it
|
||||
bool (*tqReaderNextBlockFilterOut)();
|
||||
} SStoreTqReader;
|
||||
|
||||
typedef struct SStoreSnapshotFn {
|
||||
int32_t (*createSnapshot)(SSnapContext* ctx, int64_t uid);
|
||||
int32_t (*destroySnapshot)(SSnapContext* ctx);
|
||||
SMetaTableInfo (*getMetaTableInfoFromSnapshot)(SSnapContext* ctx);
|
||||
int32_t (*getTableInfoFromSnapshot)(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid);
|
||||
} SStoreSnapshotFn;
|
||||
|
||||
/**
|
||||
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
|
||||
void metaReaderReleaseLock(SMetaReader *pReader);
|
||||
void metaReaderClear(SMetaReader *pReader);
|
||||
int32_t metaReaderGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
|
||||
int32_t metaReaderGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
|
||||
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList);
|
||||
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
|
||||
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
|
||||
|
||||
int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid);
|
||||
int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType);
|
||||
bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
|
||||
int32_t metaGetCachedTableUidList(SMeta *pMeta, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList,
|
||||
bool *acquired);
|
||||
int32_t metaUidFilterCachePut(SMeta *pMeta, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
|
||||
int32_t payloadLen, double selectivityRatio);
|
||||
tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
|
||||
int32_t metaGetCachedTbGroup(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
|
||||
int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t
|
||||
payloadLen);
|
||||
*/
|
||||
|
||||
typedef struct SStoreMeta {
|
||||
SMTbCursor* (*openTableMetaCursor)(void* pVnode); // metaOpenTbCursor
|
||||
void (*closeTableMetaCursor)(SMTbCursor* pTbCur); // metaCloseTbCursor
|
||||
void (*pauseTableMetaCursor)(SMTbCursor* pTbCur); // metaPauseTbCursor
|
||||
void (*resumeTableMetaCursor)(SMTbCursor* pTbCur, int8_t first); // metaResumeTbCursor
|
||||
int32_t (*cursorNext)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorNext
|
||||
int32_t (*cursorPrev)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorPrev
|
||||
|
||||
int32_t (*getTableTags)(void* pVnode, uint64_t suid, SArray* uidList);
|
||||
int32_t (*getTableTagsByUid)(void* pVnode, int64_t suid, SArray* uidList);
|
||||
const void* (*extractTagVal)(const void* tag, int16_t type, STagVal* tagVal); // todo remove it
|
||||
|
||||
int32_t (*getTableUidByName)(void* pVnode, char* tbName, uint64_t* uid);
|
||||
int32_t (*getTableTypeByName)(void* pVnode, char* tbName, ETableType* tbType);
|
||||
int32_t (*getTableNameByUid)(void* pVnode, uint64_t uid, char* tbName);
|
||||
bool (*isTableExisted)(void* pVnode, tb_uid_t uid);
|
||||
|
||||
int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
|
||||
int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
|
||||
int32_t payloadLen);
|
||||
|
||||
int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
|
||||
bool* acquireRes);
|
||||
int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
|
||||
int32_t payloadLen, double selectivityRatio);
|
||||
|
||||
void* (*storeGetIndexInfo)();
|
||||
void* (*getInvertIndex)(void* pVnode);
|
||||
int32_t (*getChildTableList)(
|
||||
void* pVnode, int64_t suid,
|
||||
SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
|
||||
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
|
||||
void* storeGetVersionRange;
|
||||
void* storeGetLastTimestamp;
|
||||
|
||||
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
|
||||
|
||||
// db name, vgId, numOfTables, numOfSTables
|
||||
int32_t (*getNumOfChildTables)(
|
||||
void* pVnode, int64_t uid,
|
||||
int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
|
||||
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
|
||||
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
|
||||
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
|
||||
|
||||
int64_t (*getNumOfRowsInMem)(void* pVnode);
|
||||
/**
|
||||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
||||
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
*/
|
||||
} SStoreMeta;
|
||||
|
||||
typedef struct SStoreMetaReader {
|
||||
void (*initReader)(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI);
|
||||
void (*clearReader)(SMetaReader* pReader);
|
||||
void (*readerReleaseLock)(SMetaReader* pReader);
|
||||
int32_t (*getTableEntryByUid)(SMetaReader* pReader, tb_uid_t uid);
|
||||
int32_t (*getTableEntryByName)(SMetaReader* pReader, const char* name);
|
||||
int32_t (*getEntryGetUidCache)(SMetaReader* pReader, tb_uid_t uid);
|
||||
} SStoreMetaReader;
|
||||
|
||||
typedef struct SUpdateInfo {
|
||||
SArray* pTsBuckets;
|
||||
uint64_t numBuckets;
|
||||
SArray* pTsSBFs;
|
||||
uint64_t numSBFs;
|
||||
int64_t interval;
|
||||
int64_t watermark;
|
||||
TSKEY minTS;
|
||||
SScalableBf* pCloseWinSBF;
|
||||
SHashObj* pMap;
|
||||
uint64_t maxDataVersion;
|
||||
} SUpdateInfo;
|
||||
|
||||
typedef struct {
|
||||
void* iter; // rocksdb_iterator_t* iter;
|
||||
void* snapshot; // rocksdb_snapshot_t* snapshot;
|
||||
void* readOpt; // rocksdb_readoptions_t* readOpt;
|
||||
void* db; // rocksdb_t* db;
|
||||
void* pCur;
|
||||
int64_t number;
|
||||
} SStreamStateCur;
|
||||
|
||||
typedef struct SStateStore {
|
||||
int32_t (*streamStatePutParName)(SStreamState* pState, int64_t groupId, const char* tbname);
|
||||
int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal);
|
||||
|
||||
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||
void (*streamStateFreeVal)(void* val);
|
||||
|
||||
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key);
|
||||
int32_t (*streamStateGetByPos)(SStreamState* pState, void* pos, void** pVal);
|
||||
int32_t (*streamStateDel)(SStreamState* pState, const SWinKey* key);
|
||||
int32_t (*streamStateClear)(SStreamState* pState);
|
||||
void (*streamStateSetNumber)(SStreamState* pState, int32_t number);
|
||||
int32_t (*streamStateSaveInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
|
||||
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
|
||||
|
||||
int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateFillDel)(SStreamState* pState, const SWinKey* key);
|
||||
|
||||
int32_t (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
|
||||
int32_t (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
|
||||
|
||||
SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key);
|
||||
SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
|
||||
void (*streamStateFreeCur)(SStreamStateCur* pCur);
|
||||
|
||||
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
|
||||
int32_t* pVLen);
|
||||
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
|
||||
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
|
||||
int32_t (*streamStateSessionClear)(SStreamState* pState);
|
||||
int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
|
||||
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
|
||||
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
|
||||
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
|
||||
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
|
||||
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
|
||||
|
||||
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
|
||||
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
|
||||
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
|
||||
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
|
||||
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
|
||||
|
||||
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
|
||||
|
||||
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
|
||||
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark);
|
||||
|
||||
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
|
||||
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
|
||||
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);
|
||||
|
||||
SStreamState* (*streamStateOpen)(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||
void (*streamStateClose)(SStreamState* pState, bool remove);
|
||||
int32_t (*streamStateBegin)(SStreamState* pState);
|
||||
int32_t (*streamStateCommit)(SStreamState* pState);
|
||||
void (*streamStateDestroy)(SStreamState* pState, bool remove);
|
||||
int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark);
|
||||
} SStateStore;
|
||||
|
||||
typedef struct SStorageAPI {
|
||||
SStoreMeta metaFn; // todo: refactor
|
||||
TsdReader tsdReader;
|
||||
SStoreMetaReader metaReaderFn;
|
||||
SStoreCacheReader cacheFn;
|
||||
SStoreSnapshotFn snapshotFn;
|
||||
SStoreTqReader tqReaderFn;
|
||||
SStateStore stateStore;
|
||||
SMetaDataFilterAPI metaFilter;
|
||||
SFunctionStateStore functionStore;
|
||||
} SStorageAPI;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_STORAGEAPI_H
|
|
@ -21,6 +21,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "tcommon.h"
|
||||
#include "tsimplehash.h"
|
||||
#include "tvariant.h"
|
||||
|
||||
struct SqlFunctionCtx;
|
||||
|
@ -76,7 +77,7 @@ enum {
|
|||
enum {
|
||||
MAIN_SCAN = 0x0u,
|
||||
REVERSE_SCAN = 0x1u, // todo remove it
|
||||
PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan
|
||||
PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan
|
||||
};
|
||||
|
||||
typedef struct SPoint1 {
|
||||
|
@ -127,16 +128,59 @@ typedef struct SSerializeDataHandle {
|
|||
void *pState;
|
||||
} SSerializeDataHandle;
|
||||
|
||||
// incremental state storage
|
||||
typedef struct STdbState {
|
||||
void *rocksdb;
|
||||
void **pHandle;
|
||||
void *writeOpts;
|
||||
void *readOpts;
|
||||
void **cfOpts;
|
||||
void *dbOpt;
|
||||
struct SStreamTask *pOwner;
|
||||
void *param;
|
||||
void *env;
|
||||
SListNode *pComparNode;
|
||||
void *pBackend;
|
||||
char idstr[64];
|
||||
void *compactFactory;
|
||||
TdThreadRwlock rwLock;
|
||||
|
||||
void *db;
|
||||
void *pStateDb;
|
||||
void *pFuncStateDb;
|
||||
void *pFillStateDb; // todo refactor
|
||||
void *pSessionStateDb;
|
||||
void *pParNameDb;
|
||||
void *pParTagDb;
|
||||
void *txn;
|
||||
} STdbState;
|
||||
|
||||
typedef struct {
|
||||
STdbState *pTdbState;
|
||||
struct SStreamFileState *pFileState;
|
||||
int32_t number;
|
||||
SSHashObj *parNameMap;
|
||||
int64_t checkPointId;
|
||||
int32_t taskId;
|
||||
int64_t streamId;
|
||||
int64_t streamBackendRid;
|
||||
} SStreamState;
|
||||
|
||||
typedef struct SFunctionStateStore {
|
||||
int32_t (*streamStateFuncPut)(SStreamState *pState, const SWinKey *key, const void *value, int32_t vLen);
|
||||
int32_t (*streamStateFuncGet)(SStreamState *pState, const SWinKey *key, void **ppVal, int32_t *pVLen);
|
||||
} SFunctionStateStore;
|
||||
|
||||
// sql function runtime context
|
||||
typedef struct SqlFunctionCtx {
|
||||
SInputColumnInfoData input;
|
||||
SResultDataInfo resDataInfo;
|
||||
uint32_t order; // data block scanner order: asc|desc
|
||||
uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason]
|
||||
uint8_t isNotNullFunc;// not return null value.
|
||||
uint8_t scanFlag; // record current running step, default: 0
|
||||
int16_t functionId; // function id
|
||||
char *pOutput; // final result output buffer, point to sdata->data
|
||||
uint32_t order; // data block scanner order: asc|desc
|
||||
uint8_t isPseudoFunc; // denote current function is pseudo function or not [added for perf reason]
|
||||
uint8_t isNotNullFunc; // not return null value.
|
||||
uint8_t scanFlag; // record current running step, default: 0
|
||||
int16_t functionId; // function id
|
||||
char *pOutput; // final result output buffer, point to sdata->data
|
||||
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param
|
||||
SFunctParam *param;
|
||||
// corresponding output buffer for timestamp of each result, e.g., diff/csum
|
||||
|
@ -155,6 +199,7 @@ typedef struct SqlFunctionCtx {
|
|||
SSerializeDataHandle saveHandle;
|
||||
int32_t exprIdx;
|
||||
char *udfName;
|
||||
SFunctionStateStore *pStore;
|
||||
} SqlFunctionCtx;
|
||||
|
||||
typedef struct tExprNode {
|
||||
|
|
|
@ -158,6 +158,17 @@ typedef enum EFunctionType {
|
|||
FUNCTION_TYPE_STDDEV_PARTIAL,
|
||||
FUNCTION_TYPE_STDDEV_MERGE,
|
||||
|
||||
// geometry functions
|
||||
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,
|
||||
FUNCTION_TYPE_AS_TEXT,
|
||||
FUNCTION_TYPE_MAKE_POINT,
|
||||
FUNCTION_TYPE_INTERSECTS,
|
||||
FUNCTION_TYPE_EQUALS,
|
||||
FUNCTION_TYPE_TOUCHES,
|
||||
FUNCTION_TYPE_COVERS,
|
||||
FUNCTION_TYPE_CONTAINS,
|
||||
FUNCTION_TYPE_CONTAINS_PROPERLY,
|
||||
|
||||
// user defined funcion
|
||||
FUNCTION_TYPE_UDF = 10000
|
||||
} EFunctionType;
|
||||
|
|
|
@ -109,7 +109,7 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
|
|||
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
|
||||
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
|
||||
#define IS_VAR_DATA_TYPE(t) \
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
|
||||
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_GEOM_FUNC_H
|
||||
#define TDENGINE_GEOM_FUNC_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "function.h"
|
||||
|
||||
int32_t makePointFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
int32_t geomFromTextFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t asTextFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
int32_t intersectsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t equalsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t touchesFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t coversFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t containsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t containsProperlyFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_GEOM_FUNC_H
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_GEOS_WRAPPER_H
|
||||
#define TDENGINE_GEOS_WRAPPER_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include "os.h"
|
||||
|
||||
#include "tgeosctx.h"
|
||||
|
||||
void geosFreeBuffer(void *buffer);
|
||||
|
||||
int32_t initCtxMakePoint();
|
||||
int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size);
|
||||
|
||||
int32_t initCtxGeomFromText();
|
||||
int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t *size);
|
||||
|
||||
int32_t initCtxAsText();
|
||||
int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT);
|
||||
|
||||
int32_t initCtxRelationFunc();
|
||||
int32_t doIntersects(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||
bool swapped, char *res);
|
||||
int32_t doEquals(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||
bool swapped, char *res);
|
||||
int32_t doTouches(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||
bool swapped, char *res);
|
||||
int32_t doCovers(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||
bool swapped, char *res);
|
||||
int32_t doContains(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||
bool swapped, char *res);
|
||||
int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||
bool swapped, char *res);
|
||||
|
||||
int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom);
|
||||
void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*TDENGINE_GEOS_WRAPPER_H*/
|
|
@ -212,13 +212,38 @@ typedef struct SIndexMetaArg {
|
|||
void* idx;
|
||||
void* ivtIdx;
|
||||
uint64_t suid;
|
||||
int (*metaFilterFunc)(void* metaEx, void* param, SArray* result);
|
||||
} SIndexMetaArg;
|
||||
|
||||
/**
|
||||
* the underlying storage module must implement this API to employ the index functions.
|
||||
* @param pMeta
|
||||
* @param param
|
||||
* @param results
|
||||
* @return
|
||||
*/
|
||||
typedef struct SMetaFltParam {
|
||||
uint64_t suid;
|
||||
int16_t cid;
|
||||
int16_t type;
|
||||
void *val;
|
||||
bool reverse;
|
||||
bool equal;
|
||||
int (*filterFunc)(void *a, void *b, int16_t type);
|
||||
} SMetaFltParam;
|
||||
|
||||
typedef struct SMetaDataFilterAPI {
|
||||
int32_t (*metaFilterTableIds)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
|
||||
int32_t (*metaFilterCreateTime)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
|
||||
int32_t (*metaFilterTableName)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
|
||||
int32_t (*metaFilterTtl)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
|
||||
} SMetaDataFilterAPI;
|
||||
|
||||
typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus;
|
||||
|
||||
SIdxFltStatus idxGetFltStatus(SNode* pFilterNode);
|
||||
SIdxFltStatus idxGetFltStatus(SNode* pFilterNode, SMetaDataFilterAPI* pAPI);
|
||||
|
||||
int32_t doFilterTag(SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result, SIdxFltStatus* status);
|
||||
int32_t doFilterTag(SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result, SIdxFltStatus* status, SMetaDataFilterAPI* pAPI);
|
||||
|
||||
/*
|
||||
* init index env
|
||||
|
|
|
@ -364,6 +364,7 @@ typedef struct SCreateTopicStmt {
|
|||
bool ignoreExists;
|
||||
bool withMeta;
|
||||
SNode* pQuery;
|
||||
SNode* pWhere;
|
||||
} SCreateTopicStmt;
|
||||
|
||||
typedef struct SDropTopicStmt {
|
||||
|
|
|
@ -328,6 +328,8 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* p
|
|||
SNode* nodesListGetNode(SNodeList* pList, int32_t index);
|
||||
SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
|
||||
void nodesDestroyList(SNodeList* pList);
|
||||
bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList);
|
||||
|
||||
// Only clear the linked list structure, without releasing the elements inside
|
||||
void nodesClearList(SNodeList* pList);
|
||||
|
||||
|
@ -346,6 +348,7 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon
|
|||
void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext);
|
||||
|
||||
bool nodesEqualNode(const SNode* a, const SNode* b);
|
||||
bool nodesMatchNode(const SNode* pSub, const SNode* pNode);
|
||||
|
||||
SNode* nodesCloneNode(const SNode* pNode);
|
||||
SNodeList* nodesCloneList(const SNodeList* pList);
|
||||
|
|
|
@ -112,6 +112,7 @@ typedef struct SJoinLogicNode {
|
|||
SNode* pOnConditions;
|
||||
bool isSingleTableJoin;
|
||||
EOrder inputTsOrder;
|
||||
SNode* pColEqualOnConditions;
|
||||
} SJoinLogicNode;
|
||||
|
||||
typedef struct SAggLogicNode {
|
||||
|
@ -406,6 +407,7 @@ typedef struct SSortMergeJoinPhysiNode {
|
|||
SNode* pOnConditions;
|
||||
SNodeList* pTargets;
|
||||
EOrder inputTsOrder;
|
||||
SNode* pColEqualOnConditions;
|
||||
} SSortMergeJoinPhysiNode;
|
||||
|
||||
typedef struct SAggPhysiNode {
|
||||
|
@ -448,7 +450,7 @@ typedef struct SMergePhysiNode {
|
|||
bool ignoreGroupId;
|
||||
} SMergePhysiNode;
|
||||
|
||||
typedef struct SWinodwPhysiNode {
|
||||
typedef struct SWindowPhysiNode {
|
||||
SPhysiNode node;
|
||||
SNodeList* pExprs; // these are expression list of parameter expression of function
|
||||
SNodeList* pFuncs;
|
||||
|
@ -461,10 +463,10 @@ typedef struct SWinodwPhysiNode {
|
|||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
bool mergeDataBlock;
|
||||
} SWinodwPhysiNode;
|
||||
} SWindowPhysiNode;
|
||||
|
||||
typedef struct SIntervalPhysiNode {
|
||||
SWinodwPhysiNode window;
|
||||
SWindowPhysiNode window;
|
||||
int64_t interval;
|
||||
int64_t offset;
|
||||
int64_t sliding;
|
||||
|
@ -497,7 +499,7 @@ typedef struct SMultiTableIntervalPhysiNode {
|
|||
} SMultiTableIntervalPhysiNode;
|
||||
|
||||
typedef struct SSessionWinodwPhysiNode {
|
||||
SWinodwPhysiNode window;
|
||||
SWindowPhysiNode window;
|
||||
int64_t gap;
|
||||
} SSessionWinodwPhysiNode;
|
||||
|
||||
|
@ -506,14 +508,14 @@ typedef SSessionWinodwPhysiNode SStreamSemiSessionWinodwPhysiNode;
|
|||
typedef SSessionWinodwPhysiNode SStreamFinalSessionWinodwPhysiNode;
|
||||
|
||||
typedef struct SStateWinodwPhysiNode {
|
||||
SWinodwPhysiNode window;
|
||||
SWindowPhysiNode window;
|
||||
SNode* pStateKey;
|
||||
} SStateWinodwPhysiNode;
|
||||
|
||||
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
|
||||
|
||||
typedef struct SEventWinodwPhysiNode {
|
||||
SWinodwPhysiNode window;
|
||||
SWindowPhysiNode window;
|
||||
SNode* pStartCond;
|
||||
SNode* pEndCond;
|
||||
} SEventWinodwPhysiNode;
|
||||
|
|
|
@ -241,6 +241,12 @@ typedef enum EFillMode {
|
|||
FILL_MODE_NEXT
|
||||
} EFillMode;
|
||||
|
||||
typedef enum ETimeLineMode {
|
||||
TIME_LINE_NONE = 1,
|
||||
TIME_LINE_MULTI,
|
||||
TIME_LINE_GLOBAL,
|
||||
} ETimeLineMode;
|
||||
|
||||
typedef struct SFillNode {
|
||||
ENodeType type; // QUERY_NODE_FILL
|
||||
EFillMode mode;
|
||||
|
@ -263,50 +269,50 @@ typedef struct SCaseWhenNode {
|
|||
} SCaseWhenNode;
|
||||
|
||||
typedef struct SSelectStmt {
|
||||
ENodeType type; // QUERY_NODE_SELECT_STMT
|
||||
bool isDistinct;
|
||||
SNodeList* pProjectionList;
|
||||
SNode* pFromTable;
|
||||
SNode* pWhere;
|
||||
SNodeList* pPartitionByList;
|
||||
SNodeList* pTags; // for create stream
|
||||
SNode* pSubtable; // for create stream
|
||||
SNode* pWindow;
|
||||
SNodeList* pGroupByList; // SGroupingSetNode
|
||||
SNode* pHaving;
|
||||
SNode* pRange;
|
||||
SNode* pEvery;
|
||||
SNode* pFill;
|
||||
SNodeList* pOrderByList; // SOrderByExprNode
|
||||
SLimitNode* pLimit;
|
||||
SLimitNode* pSlimit;
|
||||
STimeWindow timeRange;
|
||||
char stmtName[TSDB_TABLE_NAME_LEN];
|
||||
uint8_t precision;
|
||||
int32_t selectFuncNum;
|
||||
int32_t returnRows; // EFuncReturnRows
|
||||
bool isEmptyResult;
|
||||
bool isTimeLineResult;
|
||||
bool isSubquery;
|
||||
bool hasAggFuncs;
|
||||
bool hasRepeatScanFuncs;
|
||||
bool hasIndefiniteRowsFunc;
|
||||
bool hasMultiRowsFunc;
|
||||
bool hasSelectFunc;
|
||||
bool hasSelectValFunc;
|
||||
bool hasOtherVectorFunc;
|
||||
bool hasUniqueFunc;
|
||||
bool hasTailFunc;
|
||||
bool hasInterpFunc;
|
||||
bool hasInterpPseudoColFunc;
|
||||
bool hasLastRowFunc;
|
||||
bool hasLastFunc;
|
||||
bool hasTimeLineFunc;
|
||||
bool hasUdaf;
|
||||
bool hasStateKey;
|
||||
bool onlyHasKeepOrderFunc;
|
||||
bool groupSort;
|
||||
bool tagScan;
|
||||
ENodeType type; // QUERY_NODE_SELECT_STMT
|
||||
bool isDistinct;
|
||||
SNodeList* pProjectionList;
|
||||
SNode* pFromTable;
|
||||
SNode* pWhere;
|
||||
SNodeList* pPartitionByList;
|
||||
SNodeList* pTags; // for create stream
|
||||
SNode* pSubtable; // for create stream
|
||||
SNode* pWindow;
|
||||
SNodeList* pGroupByList; // SGroupingSetNode
|
||||
SNode* pHaving;
|
||||
SNode* pRange;
|
||||
SNode* pEvery;
|
||||
SNode* pFill;
|
||||
SNodeList* pOrderByList; // SOrderByExprNode
|
||||
SLimitNode* pLimit;
|
||||
SLimitNode* pSlimit;
|
||||
STimeWindow timeRange;
|
||||
char stmtName[TSDB_TABLE_NAME_LEN];
|
||||
uint8_t precision;
|
||||
int32_t selectFuncNum;
|
||||
int32_t returnRows; // EFuncReturnRows
|
||||
ETimeLineMode timeLineResMode;
|
||||
bool isEmptyResult;
|
||||
bool isSubquery;
|
||||
bool hasAggFuncs;
|
||||
bool hasRepeatScanFuncs;
|
||||
bool hasIndefiniteRowsFunc;
|
||||
bool hasMultiRowsFunc;
|
||||
bool hasSelectFunc;
|
||||
bool hasSelectValFunc;
|
||||
bool hasOtherVectorFunc;
|
||||
bool hasUniqueFunc;
|
||||
bool hasTailFunc;
|
||||
bool hasInterpFunc;
|
||||
bool hasInterpPseudoColFunc;
|
||||
bool hasLastRowFunc;
|
||||
bool hasLastFunc;
|
||||
bool hasTimeLineFunc;
|
||||
bool hasUdaf;
|
||||
bool hasStateKey;
|
||||
bool onlyHasKeepOrderFunc;
|
||||
bool groupSort;
|
||||
bool tagScan;
|
||||
} SSelectStmt;
|
||||
|
||||
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
|
||||
|
@ -321,6 +327,7 @@ typedef struct SSetOperator {
|
|||
SNode* pLimit;
|
||||
char stmtName[TSDB_TABLE_NAME_LEN];
|
||||
uint8_t precision;
|
||||
ETimeLineMode timeLineResMode;
|
||||
} SSetOperator;
|
||||
|
||||
typedef enum ESqlClause {
|
||||
|
|
|
@ -51,6 +51,12 @@ typedef enum {
|
|||
TARGET_TYPE_OTHER,
|
||||
} ETargetType;
|
||||
|
||||
typedef enum {
|
||||
TCOL_TYPE_COLUMN = 1,
|
||||
TCOL_TYPE_TAG,
|
||||
TCOL_TYPE_NONE,
|
||||
} ETableColumnType;
|
||||
|
||||
#define QUERY_POLICY_VNODE 1
|
||||
#define QUERY_POLICY_HYBRID 2
|
||||
#define QUERY_POLICY_QNODE 3
|
||||
|
@ -253,6 +259,7 @@ void destroyQueryExecRes(SExecResult* pRes);
|
|||
int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len);
|
||||
char* parseTagDatatoJson(void* p);
|
||||
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
|
||||
void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType);
|
||||
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
|
||||
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
|
||||
void freeVgInfo(SDBVgInfo* vgInfo);
|
||||
|
|
|
@ -27,65 +27,63 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "storageapi.h"
|
||||
|
||||
// void* streamBackendInit(const char* path);
|
||||
// void streamBackendCleanup(void* arg);
|
||||
// SListNode* streamBackendAddCompare(void* backend, void* arg);
|
||||
// void streamBackendDelCompare(void* backend, void* arg);
|
||||
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
|
||||
|
||||
typedef struct STdbState {
|
||||
rocksdb_t* rocksdb;
|
||||
rocksdb_column_family_handle_t** pHandle;
|
||||
rocksdb_writeoptions_t* writeOpts;
|
||||
rocksdb_readoptions_t* readOpts;
|
||||
rocksdb_options_t** cfOpts;
|
||||
rocksdb_options_t* dbOpt;
|
||||
struct SStreamTask* pOwner;
|
||||
void* param;
|
||||
void* env;
|
||||
SListNode* pComparNode;
|
||||
void* pBackendHandle;
|
||||
char idstr[64];
|
||||
void* compactFactory;
|
||||
// <<<<<<< HEAD
|
||||
// typedef struct STdbState {
|
||||
// rocksdb_t* rocksdb;
|
||||
// rocksdb_column_family_handle_t** pHandle;
|
||||
// rocksdb_writeoptions_t* writeOpts;
|
||||
// rocksdb_readoptions_t* readOpts;
|
||||
// rocksdb_options_t** cfOpts;
|
||||
// rocksdb_options_t* dbOpt;
|
||||
// struct SStreamTask* pOwner;
|
||||
// void* param;
|
||||
// void* env;
|
||||
// SListNode* pComparNode;
|
||||
// void* pBackend;
|
||||
// char idstr[64];
|
||||
// void* compactFactory;
|
||||
// TdThreadRwlock rwLock;
|
||||
// =======
|
||||
// typedef struct STdbState {
|
||||
// rocksdb_t* rocksdb;
|
||||
// rocksdb_column_family_handle_t** pHandle;
|
||||
// rocksdb_writeoptions_t* writeOpts;
|
||||
// rocksdb_readoptions_t* readOpts;
|
||||
// rocksdb_options_t** cfOpts;
|
||||
// rocksdb_options_t* dbOpt;
|
||||
// struct SStreamTask* pOwner;
|
||||
// void* param;
|
||||
// void* env;
|
||||
// SListNode* pComparNode;
|
||||
// void* pBackendHandle;
|
||||
// char idstr[64];
|
||||
// void* compactFactory;
|
||||
//
|
||||
// TDB* db;
|
||||
// TTB* pStateDb;
|
||||
// TTB* pFuncStateDb;
|
||||
// TTB* pFillStateDb; // todo refactor
|
||||
// TTB* pSessionStateDb;
|
||||
// TTB* pParNameDb;
|
||||
// TTB* pParTagDb;
|
||||
// TXN* txn;
|
||||
//} STdbState;
|
||||
//>>>>>>> enh/dev3.0
|
||||
|
||||
TDB* db;
|
||||
TTB* pStateDb;
|
||||
TTB* pFuncStateDb;
|
||||
TTB* pFillStateDb; // todo refactor
|
||||
TTB* pSessionStateDb;
|
||||
TTB* pParNameDb;
|
||||
TTB* pParTagDb;
|
||||
TXN* txn;
|
||||
} STdbState;
|
||||
|
||||
// incremental state storage
|
||||
typedef struct {
|
||||
STdbState* pTdbState;
|
||||
SStreamFileState* pFileState;
|
||||
int32_t number;
|
||||
SSHashObj* parNameMap;
|
||||
int64_t checkPointId;
|
||||
int32_t taskId;
|
||||
int64_t streamId;
|
||||
} SStreamState;
|
||||
|
||||
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||
SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||
void streamStateClose(SStreamState* pState, bool remove);
|
||||
int32_t streamStateBegin(SStreamState* pState);
|
||||
int32_t streamStateCommit(SStreamState* pState);
|
||||
void streamStateDestroy(SStreamState* pState, bool remove);
|
||||
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
|
||||
|
||||
typedef struct {
|
||||
rocksdb_iterator_t* iter;
|
||||
rocksdb_snapshot_t* snapshot;
|
||||
rocksdb_readoptions_t* readOpt;
|
||||
rocksdb_t* db;
|
||||
|
||||
TBC* pCur;
|
||||
int64_t number;
|
||||
} SStreamStateCur;
|
||||
|
||||
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
|
||||
|
||||
|
@ -119,7 +117,7 @@ int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
|
|||
|
||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||
void streamFreeVal(void* val);
|
||||
void streamStateFreeVal(void* val);
|
||||
|
||||
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||
|
|
|
@ -78,11 +78,11 @@ enum {
|
|||
TASK_TRIGGER_STATUS__ACTIVE,
|
||||
};
|
||||
|
||||
enum {
|
||||
typedef enum {
|
||||
TASK_LEVEL__SOURCE = 1,
|
||||
TASK_LEVEL__AGG,
|
||||
TASK_LEVEL__SINK,
|
||||
};
|
||||
} ETASK_LEVEL;
|
||||
|
||||
enum {
|
||||
TASK_OUTPUT__FIXED_DISPATCH = 1,
|
||||
|
@ -206,7 +206,7 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
|
|||
void* streamQueueNextItem(SStreamQueue* queue);
|
||||
|
||||
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
|
||||
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
|
||||
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
|
||||
|
||||
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit);
|
||||
|
||||
|
@ -284,13 +284,13 @@ struct SStreamTask {
|
|||
int16_t dispatchMsgType;
|
||||
SStreamStatus status;
|
||||
int32_t selfChildId;
|
||||
int32_t nodeId;
|
||||
int32_t nodeId; // vgroup id
|
||||
SEpSet epSet;
|
||||
SCheckpointInfo chkInfo;
|
||||
STaskExec exec;
|
||||
|
||||
// fill history
|
||||
int8_t fillHistory;
|
||||
int8_t fillHistory; // fill history
|
||||
int64_t ekey; // end ts key
|
||||
int64_t endVer; // end version
|
||||
|
||||
// children info
|
||||
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
|
||||
|
@ -327,6 +327,7 @@ struct SStreamTask {
|
|||
int64_t checkpointingId;
|
||||
int32_t checkpointAlignCnt;
|
||||
struct SStreamMeta* pMeta;
|
||||
SSHashObj* pNameMap;
|
||||
};
|
||||
|
||||
// meta
|
||||
|
@ -344,14 +345,15 @@ typedef struct SStreamMeta {
|
|||
SRWLatch lock;
|
||||
int32_t walScanCounter;
|
||||
void* streamBackend;
|
||||
int32_t streamBackendId;
|
||||
int64_t streamBackendRid;
|
||||
SHashObj* pTaskBackendUnique;
|
||||
} SStreamMeta;
|
||||
|
||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
|
||||
|
||||
SStreamTask* tNewStreamTask(int64_t streamId);
|
||||
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam,
|
||||
SArray* pTaskList);
|
||||
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||
void tFreeStreamTask(SStreamTask* pTask);
|
||||
|
|
|
@ -21,23 +21,16 @@
|
|||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlist.h"
|
||||
#include "storageapi.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct SStreamFileState SStreamFileState;
|
||||
typedef struct SRowBuffPos {
|
||||
void* pRowBuff;
|
||||
void* pKey;
|
||||
bool beFlushed;
|
||||
bool beUsed;
|
||||
} SRowBuffPos;
|
||||
|
||||
typedef SList SStreamSnapshot;
|
||||
|
||||
typedef TSKEY (*GetTsFun)(void*);
|
||||
|
||||
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
||||
GetTsFun fp, void* pFile, TSKEY delMark);
|
||||
void streamFileStateDestroy(SStreamFileState* pFileState);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "tarray.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tscalablebf.h"
|
||||
#include "storageapi.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -30,18 +30,18 @@ typedef struct SUpdateKey {
|
|||
TSKEY ts;
|
||||
} SUpdateKey;
|
||||
|
||||
typedef struct SUpdateInfo {
|
||||
SArray *pTsBuckets;
|
||||
uint64_t numBuckets;
|
||||
SArray *pTsSBFs;
|
||||
uint64_t numSBFs;
|
||||
int64_t interval;
|
||||
int64_t watermark;
|
||||
TSKEY minTS;
|
||||
SScalableBf *pCloseWinSBF;
|
||||
SHashObj *pMap;
|
||||
uint64_t maxDataVersion;
|
||||
} SUpdateInfo;
|
||||
//typedef struct SUpdateInfo {
|
||||
// SArray *pTsBuckets;
|
||||
// uint64_t numBuckets;
|
||||
// SArray *pTsSBFs;
|
||||
// uint64_t numSBFs;
|
||||
// int64_t interval;
|
||||
// int64_t watermark;
|
||||
// TSKEY minTS;
|
||||
// SScalableBf *pCloseWinSBF;
|
||||
// SHashObj *pMap;
|
||||
// uint64_t maxDataVersion;
|
||||
//} SUpdateInfo;
|
||||
|
||||
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
|
||||
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
|
||||
|
|
|
@ -149,6 +149,7 @@ struct SWalReader {
|
|||
TdFilePtr pIdxFile;
|
||||
int64_t curFileFirstVer;
|
||||
int64_t curVersion;
|
||||
int64_t skipToVersion; // skip data and jump to destination version, usually used by stream resume ignoring untreated data
|
||||
int64_t capacity;
|
||||
TdThreadMutex mutex;
|
||||
SWalFilterCond cond;
|
||||
|
@ -200,6 +201,8 @@ int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver);
|
|||
int32_t walNextValidMsg(SWalReader *pRead);
|
||||
int64_t walReaderGetCurrentVer(const SWalReader *pReader);
|
||||
int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
|
||||
int64_t walReaderGetSkipToVersion(SWalReader *pReader);
|
||||
void walReaderSetSkipToVersion(SWalReader *pReader, int64_t ver);
|
||||
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
|
||||
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset);
|
||||
|
||||
|
@ -209,7 +212,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
|
|||
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
|
||||
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
|
||||
|
||||
SWalRef *walRefFirstVer(SWal *, SWalRef *);
|
||||
void walRefFirstVer(SWal *, SWalRef *);
|
||||
void walRefLastVer(SWal *, SWalRef *);
|
||||
SWalRef *walRefCommittedVer(SWal *);
|
||||
|
||||
SWalRef *walOpenRef(SWal *);
|
||||
|
|
|
@ -22,21 +22,20 @@ extern "C" {
|
|||
|
||||
// If the error is in a third-party library, place this header file under the third-party library header file.
|
||||
// When you want to use this feature, you should find or add the same function in the following sectio
|
||||
// #if !defined(WINDOWS)
|
||||
#if !defined(WINDOWS)
|
||||
|
||||
// #ifndef ALLOW_FORBID_FUNC
|
||||
// #define malloc MALLOC_FUNC_TAOS_FORBID
|
||||
// #define calloc CALLOC_FUNC_TAOS_FORBID
|
||||
// #define realloc REALLOC_FUNC_TAOS_FORBID
|
||||
// #define free FREE_FUNC_TAOS_FORBID
|
||||
// #ifdef strdup
|
||||
// #undef strdup
|
||||
// #define strdup STRDUP_FUNC_TAOS_FORBID
|
||||
// #endif
|
||||
// #endif // ifndef ALLOW_FORBID_FUNC
|
||||
// #endif // if !defined(WINDOWS)
|
||||
#ifndef ALLOW_FORBID_FUNC
|
||||
#define malloc MALLOC_FUNC_TAOS_FORBID
|
||||
#define calloc CALLOC_FUNC_TAOS_FORBID
|
||||
#define realloc REALLOC_FUNC_TAOS_FORBID
|
||||
#define free FREE_FUNC_TAOS_FORBID
|
||||
#ifdef strdup
|
||||
#undef strdup
|
||||
#define strdup STRDUP_FUNC_TAOS_FORBID
|
||||
#endif
|
||||
#endif // ifndef ALLOW_FORBID_FUNC
|
||||
#endif // if !defined(WINDOWS)
|
||||
|
||||
// // #define taosMemoryFree malloc
|
||||
// #define taosMemoryMalloc malloc
|
||||
// #define taosMemoryCalloc calloc
|
||||
// #define taosMemoryRealloc realloc
|
||||
|
|
|
@ -409,6 +409,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
|
||||
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
|
||||
#define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal
|
||||
#define TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x0416)
|
||||
|
||||
// vnode
|
||||
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
|
||||
|
|
|
@ -32,7 +32,7 @@ extern "C" {
|
|||
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
|
||||
|
||||
// Bytes for each type.
|
||||
extern const int32_t TYPE_BYTES[16];
|
||||
extern const int32_t TYPE_BYTES[21];
|
||||
|
||||
// TODO: replace and remove code below
|
||||
#define CHAR_BYTES sizeof(char)
|
||||
|
@ -53,10 +53,11 @@ extern const int32_t TYPE_BYTES[16];
|
|||
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
|
||||
#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
|
||||
|
||||
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
|
||||
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
|
||||
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
|
||||
#define TSDB_DATA_BINARY_NULL 0xFF
|
||||
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
|
||||
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
|
||||
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
|
||||
#define TSDB_DATA_BINARY_NULL 0xFF
|
||||
#define TSDB_DATA_GEOMETRY_NULL 0xFF
|
||||
|
||||
#define TSDB_DATA_UTINYINT_NULL 0xFF
|
||||
#define TSDB_DATA_USMALLINT_NULL 0xFFFF
|
||||
|
@ -253,6 +254,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_IPv4ADDR_LEN 16
|
||||
#define TSDB_FILENAME_LEN 128
|
||||
#define TSDB_SHOW_SQL_LEN 2048
|
||||
#define TSDB_SHOW_SCHEMA_JSON_LEN TSDB_MAX_COLUMNS * 256
|
||||
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
||||
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
||||
|
||||
|
@ -410,6 +412,8 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
|
||||
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||
#define TSDB_MAX_GEOMETRY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||
|
||||
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
||||
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
||||
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_UTIL_GEOS_CTX_H_
|
||||
#define _TD_UTIL_GEOS_CTX_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <geos_c.h>
|
||||
|
||||
typedef struct SGeosContext {
|
||||
GEOSContextHandle_t handle;
|
||||
|
||||
GEOSWKTReader *WKTReader;
|
||||
GEOSWKTWriter *WKTWriter;
|
||||
|
||||
GEOSWKBReader *WKBReader;
|
||||
GEOSWKBWriter *WKBWriter;
|
||||
|
||||
char errMsg[512];
|
||||
} SGeosContext;
|
||||
|
||||
SGeosContext* getThreadLocalGeosCtx();
|
||||
void destroyThreadLocalGeosCtx();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_UTIL_GEOS_CTX_H_*/
|
|
@ -123,6 +123,16 @@ else
|
|||
echo "Unknown cpuType: ${cpuType}"
|
||||
exit 1
|
||||
fi
|
||||
# check the tdengine cloud base image existed or not
|
||||
if [ "$cloudBuild" == "y" ]; then
|
||||
CloudBase=$(docker images | grep tdengine/tdengine-cloud-base ||:)
|
||||
if [[ "$CloudBase" == "" ]]; then
|
||||
echo "Rebuild tdengine cloud base image..."
|
||||
docker build --rm -f "${communityDir}/packaging/docker/DockerfileCloud.base" -t tdengine/tdengine-cloud-base "." --build-arg cpuType=${cpuTypeAlias}
|
||||
else
|
||||
echo "Already found tdengine cloud base image"
|
||||
fi
|
||||
fi
|
||||
|
||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||
if [ "$cloudBuild" != "y" ]; then
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue