Merge branch 'main' of https://github.com/taosdata/TDengine into mark/tmq

This commit is contained in:
wangmm0220 2023-06-27 18:18:27 +08:00
commit 7d15958509
133 changed files with 13877 additions and 2289 deletions

1
.gitignore vendored
View File

@ -16,7 +16,6 @@ debug/
release/
target/
debs/
deps/
rpms/
mac/
*.pyc

28
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,28 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-yaml
- id: check-json
- id: end-of-file-fixer
- id: trailing-whitespace
repos:
- repo: https://github.com/psf/black
rev: stable
hooks:
- id: black
repos:
- repo: https://github.com/pocc/pre-commit-hooks
rev: master
hooks:
- id: cppcheck
args: ["--error-exitcode=0"]
repos:
- repo: https://github.com/crate-ci/typos
rev: v1.15.7
hooks:
- id: typos

View File

@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
include(${TD_SUPPORT_DIR}/cmake.platform)
include(${TD_SUPPORT_DIR}/cmake.define)
include(${TD_SUPPORT_DIR}/cmake.options)
include(${TD_SUPPORT_DIR}/cmake.version)
# contrib
add_subdirectory(contrib)

View File

@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
```
#### 在 CentOS 上构建 taosTools 安装依赖软件
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig
brew install argp-standalone gflags pkgconfig
```
### 设置 golang 开发环境

View File

@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
```
#### Install build dependencies for taosTools on CentOS
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig
brew install argp-standalone gflags pkgconfig
```
### Setup golang environment

View File

@ -189,3 +189,9 @@ option(
"If build release version"
OFF
)
option(
BUILD_CONTRIB
"If build thirdpart from source"
OFF
)

View File

@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LOONGARCH_64 TRUE)
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS("-D_TD_MIPS_")
ADD_DEFINITIONS("-D_TD_MIPS_64")
ENDIF ()
ELSE ()
# if generate ARM version:
@ -172,5 +178,17 @@ ENDIF()
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
set(TD_DEPS_DIR "x86")
if (TD_LINUX)
IF (TD_ARM_64 OR TD_ARM_32)
set(TD_DEPS_DIR "arm")
ELSEIF (TD_MIPS_64)
set(TD_DEPS_DIR "mips")
ELSE()
set(TD_DEPS_DIR "x86")
ENDIF()
endif()
MESSAGE(STATUS "DEPS_DIR" ${TD_DEPS_DIR})
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.5.0")
SET(TD_VER_NUMBER "3.0.5.2.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -1,16 +1,29 @@
# rocksdb
#set(librocksdb_src ${TD_CONTRIB_DIR}/rocksdb)
#set(librocksdb_binary ${TD_CONTRIB_DIR}/rocksdb/build)
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
if (${BUILD_CONTRIB})
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
else()
if (NOT ${TD_LINUX})
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()
endif()

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 3.0
GIT_TAG main
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -77,11 +77,25 @@ if(${BUILD_WITH_LEVELDB})
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_LEVELDB})
# rocksdb
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
if (${BUILD_CONTRIB})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif()
else()
if (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
else()
if(${BUILD_WITH_ROCKSDB})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
endif()
endif()
# canonical-raft
if(${BUILD_WITH_CRAFT})
@ -175,8 +189,8 @@ if(${BUILD_TEST})
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
)
endif(${TD_DARWIN})
endif(${BUILD_TEST})
# cJson
@ -237,66 +251,103 @@ if (${BUILD_WITH_UV})
endif(${TD_LINUX})
endif (${BUILD_WITH_UV})
if(${BUILD_WITH_ROCKSDB})
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if (${BUILD_WITH_ROCKSDB})
if (${BUILD_CONTRIB})
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
endif(${TD_WINDOWS})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
if(${TD_WINDOWS})
option(WITH_JNI "" OFF)
endif(${TD_WINDOWS})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
if(${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
endif(${BUILD_WITH_ROCKSDB})
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
else()
if (NOT ${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
endif()
endif()
endif()
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
@ -304,10 +355,10 @@ if(${BUILD_WITH_LUCENE})
option(ENABLE_TEST "Enable the tests" OFF)
add_subdirectory(lucene EXCLUDE_FROM_ALL)
target_include_directories(
lucene++
lucene++
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lucene/include>
)
)
endif(${BUILD_WITH_LUCENE})
# NuRaft
@ -367,7 +418,7 @@ if(${BUILD_MSVCREGEX})
target_include_directories(msvcregex
PRIVATE "msvcregex"
)
target_link_libraries(msvcregex
target_link_libraries(msvcregex
INTERFACE Shell32
)
SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex)
@ -427,8 +478,8 @@ if(${BUILD_WITH_BDB})
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/bdb/libdb.a"
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/bdb"
)
target_link_libraries(bdb
INTERFACE pthread
target_link_libraries(bdb
INTERFACE pthread
)
endif(${BUILD_WITH_BDB})
@ -440,12 +491,12 @@ if(${BUILD_WITH_SQLITE})
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/sqlite/.libs/libsqlite3.a"
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/sqlite"
)
target_link_libraries(sqlite
INTERFACE m
INTERFACE pthread
target_link_libraries(sqlite
INTERFACE m
INTERFACE pthread
)
if(NOT TD_WINDOWS)
target_link_libraries(sqlite
target_link_libraries(sqlite
INTERFACE dl
)
endif(NOT TD_WINDOWS)
@ -453,22 +504,22 @@ endif(${BUILD_WITH_SQLITE})
# addr2line
if(${BUILD_ADDR2LINE})
if(NOT ${TD_WINDOWS})
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
check_include_file( "stddef.h" HAVE_STDDEF_H )
check_include_file( "stdlib.h" HAVE_STDLIB_H )
check_include_file( "string.h" HAVE_STRING_H )
check_include_file( "memory.h" HAVE_MEMORY_H )
check_include_file( "strings.h" HAVE_STRINGS_H )
if(NOT ${TD_WINDOWS})
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
check_include_file( "stddef.h" HAVE_STDDEF_H )
check_include_file( "stdlib.h" HAVE_STDLIB_H )
check_include_file( "string.h" HAVE_STRING_H )
check_include_file( "memory.h" HAVE_MEMORY_H )
check_include_file( "strings.h" HAVE_STRINGS_H )
check_include_file( "stdint.h" HAVE_STDINT_H )
check_include_file( "unistd.h" HAVE_UNISTD_H )
check_include_file( "sgidefs.h" HAVE_SGIDEFS_H )
check_include_file( "stdafx.h" HAVE_STDAFX_H )
check_include_file( "elf.h" HAVE_ELF_H )
check_include_file( "libelf.h" HAVE_LIBELF_H )
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
check_include_file( "elf.h" HAVE_ELF_H )
check_include_file( "libelf.h" HAVE_LIBELF_H )
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
check_include_file( "alloca.h" HAVE_ALLOCA_H )
check_include_file( "elfaccess.h" HAVE_ELFACCESS_H)
check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H )
@ -476,7 +527,7 @@ if(${BUILD_ADDR2LINE})
check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H )
set(VERSION 0.3.1)
set(PACKAGE_VERSION "\"${VERSION}\"")
set(PACKAGE_VERSION "\"${VERSION}\"")
configure_file(libdwarf/cmake/config.h.cmake config.h)
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
@ -507,6 +558,7 @@ if(${BUILD_GEOS})
endif(${TD_LINUX})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL)
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
target_include_directories(
geos_c
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>

BIN
deps/arm/rocksdb_static/librocksdb.a vendored Normal file

Binary file not shown.

2844
deps/arm/rocksdb_static/rocksdb/c.h vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
deps/mips/rocksdb_static/librocksdb.a vendored Normal file

Binary file not shown.

2844
deps/mips/rocksdb_static/rocksdb/c.h vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
deps/x86/rocksdb_static/librocksdb.a vendored Normal file

Binary file not shown.

2844
deps/x86/rocksdb_static/rocksdb/c.h vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -20,6 +20,19 @@ The standard server installation package includes `taos`, `taosd`, `taosAdapter`
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
## Operating environment requirements
In the Linux system, the minimum requirements for the operating environment are as follows:
linux core version - 3.10.0-1160.83.1.el7.x86_64;
glibc version - 2.17;
If compiling and installing through clone source code, it is also necessary to meet the following requirements:
cmake version - 3.26.4 or above;
gcc version - 9.3.1 or above;
## Installation
<Tabs>

View File

@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
```
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
### Insert Multiple Rows
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
```
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
### Insert into Multiple Tables
@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
```
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).

View File

@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
def assignment(self):
pass
def poll(self, timeout: float = 1.0):
pass
def close(self):
pass

View File

@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query
TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode.
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. The unit of buffer is MB.
```sql
create database db0 vgroups 100 buffer 16MB
create database db0 vgroups 100 buffer 16
```
In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings.
@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every
## Metadata Cache
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters.
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. The unit of pagesize is kb.
```sql
create database db0 pages 128 pagesize 16kb
create database db0 pages 128 pagesize 16
```
The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache.

View File

@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function.
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
### Implementing a Scalar Function in C
The implementation of a scalar function is described as follows:
@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows:
def process(input: datablock) -> tuple[output_type]:
```
Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
#### Aggregate UDF Interface
@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]:
# return tuple object consisted of object of type outputtype
```
Noteprocess() must be implemeted, init() and destroy() must be defined too but they can do nothing.
Noteprocess() must be implemented, init() and destroy() must be defined too but they can do nothing.
#### Aggregate Template
@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
### Data Mapping between TDengine SQL and Python UDF
@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart
#### Sample 3: UDF with n arguments
A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
```python
def init():
@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s)
#### Sample 4: Utilize 3rd party package
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty.
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly.
```shell
pip3 install moment
@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s)
#### Sample 5: Aggregate Function
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`.
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`.
```python
import io
@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging.
2. log() is the function for logging, it converts the input object to string and output with an end of line
3. destroy() closes the log file \
4. start() returns the initial buffer for storing the intermediate result
5. reduce() processes each daa block and aggregates the result
5. reduce() processes each data block and aggregates the result
6. finish() converts the final buffer() to final result\
Create the UDF.

View File

@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit])
```
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL;

View File

@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
A PARTITION BY clause is processed as follows:
- The PARTITION BY clause must occur after the WHERE clause
- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
```sql

View File

@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(4) | Obsoleted |
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |

View File

@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
| 3.2.2 | subscription add seek function | 3.0.5.0 or later |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
| 3.2.0 | This version has been deprecated | - |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
@ -284,10 +285,11 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms.
- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false.
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true.
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms.
- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false.
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true.
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
- httpPoolSize: size of REST concurrent requests. The default value is 20.
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
@ -351,10 +353,11 @@ The configuration parameters in properties are as follows.
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
### Priority of configuration parameters
@ -419,6 +422,19 @@ while(resultSet.next()){
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### execute SQL with reqId
This reqId can be used to request link tracing.
```java
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
aStmt.execute("create database if not exists db", 1L);
aStmt.executeUpdate("use db", 2L);
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
Timestamp ts = rs.getTimestamp(1);
}
```
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
@ -936,6 +952,14 @@ public class SchemalessWsTest {
</TabItem>
</Tabs>
### Schemaless with reqId
This reqId can be used to request link tracing.
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
```
### Data Subscription
The TDengine Java Connector supports subscription functionality with the following application API.
@ -993,7 +1017,7 @@ while(true) {
#### Assignment subscription Offset
```
```java
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
@ -1002,6 +1026,29 @@ Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
```
Example usage is as follows.
```java
String topic = "offset_seek_test";
Map<TopicPartition, Long> offset = null;
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(topic));
for (int i = 0; i < 10; i++) {
if (i == 3) {
// Saving consumption position
offset = consumer.position(topic);
}
if (i == 5) {
// reset consumption to the previously saved position
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
consumer.seek(entry.getKey(), entry.getValue());
}
}
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
}
}
```
#### Close subscriptions
```java
@ -1308,3 +1355,7 @@ For additional troubleshooting, see [FAQ](../../../train-faq/faq).
## API Reference
[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver)
```
```

File diff suppressed because it is too large Load Diff

View File

@ -31,21 +31,57 @@ Websocket connections are supported on all platforms that can run Go.
| connector-rust version | TDengine version | major features |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
| v0.6.0 | 3.0.0.0 | Base features. |
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
## Installation
## Handling exceptions
After the error is reported, the specific information of the error can be obtained:
```rust
match conn.exec(sql) {
Ok(_) => {
Ok(())
}
Err(e) => {
eprintln!("ERROR: {:?}", e);
Err(e)
}
}
```
## TDengine DataType vs. Rust DataType
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows:
| TDengine DataType | Rust DataType |
| ----------------- | ----------------- |
| TIMESTAMP | Timestamp |
| INT | i32 |
| BIGINT | i64 |
| FLOAT | f32 |
| DOUBLE | f64 |
| SMALLINT | i16 |
| TINYINT | i8 |
| BOOL | bool |
| BINARY | Vec<u8\> |
| NCHAR | String |
| JSON | serde_json::Value |
Note: Only TAG supports JSON types
## Installation Steps
### Pre-installation preparation
* Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
### Add taos dependency
### Install the connectors
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
@ -146,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
let conn1 = builder.build();
// use websocket protocol.
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let conn2 = builder2.build();
```
After the connection is established, you can perform operations on your database.
@ -228,41 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se
## Usage examples
### Write data
### Create database and tables
#### SQL Write
```rust
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "query";
// create database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
// create table
taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
]).await?;
}
```
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Insert data
<RustInsert />
#### STMT Write
<RustBind />
#### Schemaless Write
<RustSml />
### Query data
<RustQuery />
## API Reference
### execute SQL with req_id
### Connector Constructor
You create a connector constructor by using a DSN.
This req_id can be used to request link tracing.
```rust
let cfg = TaosBuilder::default().build()?;
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
```
You use the builder object to create multiple connections.
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
Parameter binding details see [API Reference](#stmt-api)
<RustBind />
### Schemaless Writing
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
<RustSml />
### Schemaless with req_id
This req_id can be used to request link tracing.
```rust
let conn: Taos = cfg.build();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(100u64)
.build()?;
client.put(&sml_data)?
```
### Connection pooling
### Data Subscription
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
#### Create a Topic
```rust
taos.exec_many([
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
```
#### Create a Consumer
You create a TMQ connector by using a DSN.
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
Create a consumer:
```rust
let mut consumer = tmq.build()?;
```
#### Subscribe to consume data
A single consumer can subscribe to one or more topics.
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
Get assignments
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
#### Assignment subscription Offset
Seek offset
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
#### Close subscriptions
```rust
consumer.unsubscribe().await;
```
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
- `client.id`: Subscriber client ID.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.
#### Full Sample Code
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
### Use with connection pool
In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2].
@ -292,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos].
let taos = pool.get()?;
```
### Connectors
### More sample programs
The source code of the sample application is under `TDengine/examples/rust` :
[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust)
## Frequently Asked Questions
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
## API Reference
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
@ -378,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
- `.use_database(database: &str)`: Executes the `USE` statement.
In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage.
In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage.
### Bind Interface
<p>
<a id="stmt-api" style={{color:'#141414'}}>
Bind Interface
</a>
</p>
Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement.
@ -391,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
The bind object provides a set of interfaces for implementing parameter binding.
#### `.set_tbname(name)`
`.set_tbname(name)`
To bind table names.
@ -400,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
stmt.set_tbname("d0")?;
```
#### `.set_tags(&[tag])`
`.set_tags(&[tag])`
Bind sub-table table names and tag values when the SQL statement uses a super table.
@ -410,7 +611,7 @@ stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("taos".to_string())])?;
```
#### `.bind(&[column])`
`.bind(&[column])`
Bind value types. Use the [ColumnView] structure to create and bind the required types.
@ -434,7 +635,7 @@ let params = vec![
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
```
#### `.execute()`
`.execute()`
Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`.
@ -449,92 +650,6 @@ stmt.execute()?;
For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs).
### Subscriptions
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
You create a TMQ connector by using a DSN.
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
Create a consumer:
```rust
let mut consumer = tmq.build()?;
```
A single consumer can subscribe to one or more topics.
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
Get assignments
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
Seek offset
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
Unsubscribe:
```rust
consumer.unsubscribe().await;
```
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
- `client.id`: Subscriber client ID.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).

View File

@ -24,6 +24,46 @@ The source code for the Python connector is hosted on [GitHub](https://github.co
We recommend using the latest version of `taospy`, regardless of the version of TDengine.
|Python Connector Version|major changes|
|:-------------------:|:----:|
|2.7.9|support for getting assignment and seek function on subscription|
|2.7.8|add `execute_many` method|
|Python Websocket Connector Version|major changes|
|:----------------------------:|:-----:|
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|0.2.4|support `unsubscribe` on subscription|
## Handling Exceptions
There are 4 types of exception in python connector.
- The exception of Python Connector itself.
- The exception of native library.
- The exception of websocket
- The exception of subscription.
- The exception of other TDengine function modules.
|Error Type|Description|Suggested Actions|
|:--------:|:---------:|:---------------:|
|InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version|
|ConnectionError|connection error|please check TDengine's status and the connection params|
|DatabaseError|database error|please upgrade Python connector to latest|
|OperationalError|operation error||
|ProgrammingError|||
|StatementError|the exception of stmt||
|ResultError|||
|SchemalessError|the exception of stmt schemaless||
|TmqError|the exception of stmt tmq||
It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html).
All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example:
```python
{{#include docs/examples/python/handle_exception.py}}
```
## Supported features
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
@ -343,6 +383,8 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `Connection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
@ -353,6 +395,46 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Querying Data
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
```python
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
```
:::tip
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
:::
</TabItem>
<TabItem value="rest" label="REST connection">
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python
{{#include docs/examples/python/rest_client_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
</TabItem>
</Tabs>
### Usage with req_id
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
@ -453,6 +535,170 @@ As the way to connect introduced above but add `req_id` argument.
</TabItem>
</Tabs>
### Subscription
Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
The `consumer` in the connector contains the subscription api.
#### Create Consumer
The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
```python
from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
#### Subscribe topics
The `subscribe` function is used to subscribe to a list of topics.
```python
consumer.subscribe(['topic1', 'topic2'])
```
#### Consume
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
```python
while True:
res = consumer.poll(1)
if not res:
continue
err = res.error()
if err is not None:
raise err
val = res.value()
for block in val:
print(block.fetchall())
```
#### assignment
The `assignment` function is used to get the assignment of the topic.
```python
assignments = consumer.assignment()
```
#### Seek
The `seek` function is used to reset the assignment of the topic.
```python
tp = TopicPartition(topic='topic1', partition=0, offset=0)
consumer.seek(tp)
```
#### After consuming data
You should unsubscribe to the topics and close the consumer after consuming.
```python
consumer.unsubscribe()
consumer.close()
```
#### Tmq subscription example
```python
{{#include docs/examples/python/tmq_example.py}}
```
#### assignment and seek example
```python
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
In addition to native connections, the connector also supports subscriptions via websockets.
#### Create Consumer
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
```python
import taosws
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
#### subscribe topics
The `subscribe` function is used to subscribe to a list of topics.
```python
consumer.subscribe(['topic1', 'topic2'])
```
#### Consume
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
```python
while True:
res = consumer.poll(timeout=1.0)
if not res:
continue
err = res.error()
if err is not None:
raise err
for block in message:
for row in block:
print(row)
```
#### assignment
The `assignment` function is used to get the assignment of the topic.
```python
assignments = consumer.assignment()
```
#### Seek
The `seek` function is used to reset the assignment of the topic.
```python
consumer.seek(topic='topic1', partition=0, offset=0)
```
#### After consuming data
You should unsubscribe to the topics and close the consumer after consuming.
```python
consumer.unsubscribe()
consumer.close()
```
#### Subscription example
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
#### Assignment and seek example
```python
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
</TabItem>
</Tabs>
### Schemaless Insert
Connector support schemaless insert.
@ -503,11 +749,143 @@ Insert with req_id argument
</TabItem>
</Tabs>
### Parameter Binding
The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
<Tabs>
<TabItem value="native" label="native connection">
#### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
#### parameter binding
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
```
stmt.bind_param_batch(params)
```
#### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
#### Close Stmt
```
stmt.close()
```
#### Example
```python
{{#include docs/examples/python/stmt_example.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
#### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
#### Prepare sql
Call `prepare` method in stmt to prepare sql.
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
#### parameter binding
Call the `bind_param` method to bind parameters.
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
Call the `add_batch` method to add parameters to the batch.
```
stmt.add_batch()
```
#### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
#### Close Stmt
```
stmt.close()
```
#### Example
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
bind multiple rows at once |
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
@ -515,14 +893,6 @@ Insert with req_id argument
## Other notes
### Exception handling
All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example:
```python
{{#include docs/examples/python/handle_exception.py}}
```
### About nanoseconds
Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.

View File

@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows.
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
| **Schemaless** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::info
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
@ -60,11 +59,10 @@ The different database framework specifications for various programming language
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
| **Connection Management** | Support | Support | Support | Support | Support | Support |
| **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
| **Parameter Binding** | Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Schemaless** | Supported | Not Supported | Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::warning

View File

@ -19,16 +19,20 @@ taosd -C
## Configuration File on Client Side
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example:
```
taos -c /home/cfg
```
means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
Parameter `-C` can be used on the CLI of `taos` to show its configuration, like below:
```bash
taos -C
```
```bash
taos --dump-config
```
## Configuration Parameters
:::note
@ -77,8 +81,9 @@ The parameters described in this document by the effect that they have on the sy
| Default Value | 6030 |
:::note
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
:::
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
@ -120,6 +125,8 @@ The parameters described in this document by the effect that they have on the sy
:::note
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
:::
### monitor
| Attribute | Description |

61
docs/en/20-third-party/14-dbeaver.md vendored Normal file
View File

@ -0,0 +1,61 @@
---
sidebar_label: DBeaver
title: DBeaver
description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud.
---
[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises.
## Prerequisites
To use DBeaver to manage TDengine, you need to prepare the following:
- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps.
- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual.
- If you use TDengine Cloud, please [register](https://cloud.tdengine.com/) for an account.
## Usage
### Use DBeaver to access on-premises TDengine cluster
1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category.
![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp)
2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it.
![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp))
3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct.
![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine.
![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp)
5. You can also manipulate TDengine data by executing SQL commands.
![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp)
### Use DBeaver to access TDengine Cloud
1. Log in to the TDengine Cloud service, select **Programming** > **Java** in the management console, and then copy the string value of `TDENGINE_JDBC_URL` displayed in the **Config** section.
![Copy JDBC URL from TDengine Cloud](./dbeaver/tdengine-cloud-jdbc-dsn-en.webp)
2. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine Cloud** in the **Timeseries** category.
![Connect TDengine Cloud with DBeaver](./dbeaver/dbeaver-connect-tdengine-cloud-en.webp)
3. Configure the TDengine Cloud connection by filling in the JDBC URL value. Click **Test Connection**. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine Cloud service is running properly and whether the JDBC URL is correct.
![Configure the TDengine Cloud connection](./dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine Cloud.
![Browse TDengine Cloud data with DBeaver](./dbeaver/dbeaver-browse-data-cloud-en.webp)
5. You can also manipulate TDengine Cloud data by executing SQL commands.
![Use SQL commands to manipulate TDengine Cloud data in DBeaver](./dbeaver/dbeaver-sql-execution-cloud-en.webp)

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

View File

@ -56,7 +56,7 @@ This error indicates that the client could not connect to the server. Perform th
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable..
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable..
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />

View File

@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.5.2
<Release type="tools" version="2.5.2" />
## 2.5.1
<Release type="tools" version="2.5.1" />

View File

@ -0,0 +1,82 @@
#!
import taosws
import taos
db_name = 'test_ws_stmt'
def before():
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def stmt_insert():
before()
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break
def stmt_insert_into_stable():
before()
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break

View File

@ -0,0 +1,78 @@
#!
import time
import taosws
import taos
def before_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def after_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.close()
def stmt_insert():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
def stmt_insert_into_stable():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
if __name__ == '__main__':
stmt_insert()
stmt_insert_into_stable()

View File

@ -0,0 +1,58 @@
import taos
from taos.tmq import Consumer
import taosws
def prepare():
conn = taos.connect()
conn.execute("drop topic if exists tmq_assignment_demo_topic")
conn.execute("drop database if exists tmq_assignment_demo_db")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
conn.select_db("tmq_assignment_demo_db")
conn.execute(
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
conn.execute(
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
def taos_get_assignment_and_seek_demo():
prepare()
consumer = Consumer(
{
"group.id": "0",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
}
)
consumer.subscribe(["tmq_assignment_demo_topic"])
# get topic assignment
assignments = consumer.assignment()
for assignment in assignments:
print(assignment)
# poll
consumer.poll(1)
consumer.poll(1)
# get topic assignment again
after_pool_assignments = consumer.assignment()
for assignment in after_pool_assignments:
print(assignment)
# seek to the beginning
for assignment in assignments:
consumer.seek(assignment)
# now the assignment should be the same as before poll
assignments = consumer.assignment()
for assignment in assignments:
print(assignment)
if __name__ == '__main__':
taosws_get_assignment_and_seek_demo()

View File

@ -0,0 +1,57 @@
import taos
import taosws
def prepare():
conn = taos.connect()
conn.execute("drop topic if exists tmq_assignment_demo_topic")
conn.execute("drop database if exists tmq_assignment_demo_db")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
conn.select_db("tmq_assignment_demo_db")
conn.execute(
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
conn.execute(
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
def taosws_get_assignment_and_seek_demo():
prepare()
consumer = taosws.Consumer(conf={
"td.connect.websocket.scheme": "ws",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
"group.id": "0",
})
consumer.subscribe(["tmq_assignment_demo_topic"])
# get topic assignment
assignments = consumer.assignment()
for assignment in assignments:
print(assignment.to_string())
# poll
consumer.poll(1)
consumer.poll(1)
# get topic assignment again
after_poll_assignments = consumer.assignment()
for assignment in after_poll_assignments:
print(assignment.to_string())
# seek to the beginning
for assignment in assignments:
for a in assignment.assignments():
consumer.seek(assignment.topic(), a.vg_id(), a.offset())
# now the assignment should be the same as before poll
assignments = consumer.assignment()
for assignment in assignments:
print(assignment.to_string())
if __name__ == '__main__':
taosws_get_assignment_and_seek_demo()

View File

@ -16,6 +16,20 @@ TDengine 完整的软件包包括服务端taosd、应用驱动taosc
在 Linux 系统上TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。
## 运行环境要求
在linux系统中运行环境最低要求如下:
linux 内核版本 - 3.10.0-1160.83.1.el7.x86_64;
glibc 版本 - 2.17;
如果通过clone源码进行编译安装还需要满足:
cmake版本 - 3.26.4或以上;
gcc 版本 - 9.3.1或以上;
## 安装
<Tabs>

View File

@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
def assignment(self):
pass
def seek(self, partition):
pass
def close(self):
pass

View File

@ -10,10 +10,10 @@ description: "TDengine 内部的缓存设计"
TDengine 采用时间驱动缓存管理策略First-In-First-OutFIFO又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式Least-Recent-UsedLRU直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候将最早的数据批量写入磁盘。一般意义上来说对于物联网数据的使用用户最为关心最近产生的数据即当前状态。TDengine 充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。
每个 vnode 的写入缓存大小在创建数据库时决定,创建数据库时的两个关键参数 vgroups 和 buffer 分别决定了该数据库中的数据由多少个 vgroup 处理,以及向其中的每个 vnode 分配多少写入缓存。
每个 vnode 的写入缓存大小在创建数据库时决定,创建数据库时的两个关键参数 vgroups 和 buffer 分别决定了该数据库中的数据由多少个 vgroup 处理,以及向其中的每个 vnode 分配多少写入缓存。buffer 的单位是MB。
```sql
create database db0 vgroups 100 buffer 16MB
create database db0 vgroups 100 buffer 16
```
理论上缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助,一般情况下使用默认值即可。
@ -28,10 +28,10 @@ create database db0 vgroups 100 buffer 16MB
## 元数据缓存
为了更高效地处理查询和写入,每个 vnode 都会缓存自己曾经获取到的元数据。元数据缓存由创建数据库时的两个参数 pages 和 pagesize 决定。
为了更高效地处理查询和写入,每个 vnode 都会缓存自己曾经获取到的元数据。元数据缓存由创建数据库时的两个参数 pages 和 pagesize 决定。pagesize 的单位是 kb。
```sql
create database db0 pages 128 pagesize 16kb
create database db0 pages 128 pagesize 16
```
上述语句会为数据库 db0 的每个 vnode 创建 128 个 page每个 page 16kb 的元数据缓存。

View File

@ -17,7 +17,7 @@ TDengine 支持通过 C/Python 语言进行 UDF 定义。接下来结合示例
- 聚合函数需要实现聚合接口函数 aggfn_start aggfn aggfn_finish。
- 如果需要初始化,实现 udf_init如果需要清理工作实现udf_destroy。
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀_start, _finish, _init, _destroy)的连接。列表中的scalarfnaggfn, udf需要替换成udf函数名。
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(`_start`, `_finish`, `_init`, `_destroy`)的连接。列表中的scalarfnaggfn, udf需要替换成udf函数名。
### 用 C 语言实现标量函数
标量函数实现模板如下

View File

@ -36,14 +36,15 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
| 3.2.1 | 新增功能WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更consumer poll 返回结果集为 ConsumerRecord可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
| 3.2.0 | 存在连接问题,不推荐使用 | - |
| 3.1.0 | WebSocket 连接支持订阅功能 | - |
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用其他版本 | - |
| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
| 2.0.42 | 修 WebSocket 连接中 wasNull 接口返回值 | - |
| 2.0.41 | 修 REST 连接中用户名和密码转码方式 | - |
| 2.0.42 | 修 WebSocket 连接中 wasNull 接口返回值 | - |
| 2.0.41 | 修 REST 连接中用户名和密码转码方式 | - |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
| 2.0.37 | 增加对 json tag 支持 | - |
@ -287,10 +288,11 @@ url 中的配置参数如下:
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTPWebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false不再执行失败 SQL 后的任何语句。默认值为false。
- httpConnectTimeout: 连接超时时间,单位 ms 默认值为 5000。
- httpSocketTimeout: socket 超时时间,单位 ms默认值为 5000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- httpConnectTimeout: 连接超时时间,单位 ms 默认值为 60000。
- httpSocketTimeout: socket 超时时间,单位 ms默认值为 60000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms 默认值为 60000。 仅在 batchfetch 设置为 true 时生效。
- useSSL: 连接中是否使用 SSL。
- httpPoolSize: REST 并发请求大小,默认 20。
**注意**部分配置项比如locale、timezone在 REST 连接中不生效。
@ -354,10 +356,11 @@ properties 中的配置参数如下:
- TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
- TSDBDriver.PROPERTY_KEY_LOCALE仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。
- TSDBDriver.PROPERTY_KEY_TIME_ZONE仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms 默认值为 5000。仅在 REST 连接时生效。
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms 默认值为 60000。仅在 REST 连接时生效。
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms 默认值为 60000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
- TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
### 配置参数的优先级
@ -422,6 +425,19 @@ while(resultSet.next()){
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 执行带有 reqId 的 SQL
此 reqId 可用于请求链路追踪。
```java
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
aStmt.execute("create database if not exists db", 1L);
aStmt.executeUpdate("use db", 2L);
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
Timestamp ts = rs.getTimestamp(1);
}
```
### 通过参数绑定写入数据
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入INSERT场景的支持。采用这种方式写入数据时能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
@ -939,6 +955,14 @@ public class SchemalessWsTest {
</TabItem>
</Tabs>
### 执行带有 reqId 的无模式写入
此 reqId 可用于请求链路追踪。
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
```
### 数据订阅
TDengine Java 连接器支持订阅功能,应用 API 如下:
@ -996,7 +1020,7 @@ while(true) {
#### 指定订阅 Offset
```
```java
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
@ -1005,6 +1029,29 @@ Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
```
示例代码:
```java
String topic = "offset_seek_test";
Map<TopicPartition, Long> offset = null;
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(topic));
for (int i = 0; i < 10; i++) {
if (i == 3) {
// Saving consumption position
offset = consumer.position(topic);
}
if (i == 5) {
// reset consumption to the previously saved position
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
consumer.seek(entry.getKey(), entry.getValue());
}
}
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
}
}
```
#### 关闭订阅
```java

File diff suppressed because it is too large Load Diff

View File

@ -30,21 +30,57 @@ Websocket 连接支持所有能运行 Rust 的平台。
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.12 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
| v0.6.0 | 3.0.0.0 | 基础功能。 |
Rust 连接器仍然在快速开发中1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine以避免已知问题。
## 安装
## 处理错误
在报错后,可以获取到错误的具体信息:
```rust
match conn.exec(sql) {
Ok(_) => {
Ok(())
}
Err(e) => {
eprintln!("ERROR: {:?}", e);
Err(e)
}
}
```
## TDengine DataType 和 Rust DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Rust 对应类型转换如下:
| TDengine DataType | Rust DataType |
| ----------------- | ----------------- |
| TIMESTAMP | Timestamp |
| INT | i32 |
| BIGINT | i64 |
| FLOAT | f32 |
| DOUBLE | f64 |
| SMALLINT | i16 |
| TINYINT | i8 |
| BOOL | bool |
| BINARY | Vec<u8\> |
| NCHAR | String |
| JSON | serde_json::Value |
**注意**JSON 类型仅在 tag 中支持。
## 安装步骤
### 安装前准备
* 安装 Rust 开发工具链
* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 添加 taos 依赖
### 安装连接器
根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [taos][taos] 依赖:
@ -151,7 +187,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
let conn1 = builder.build();
// use websocket protocol.
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let conn2 = builder2.build();
```
建立连接后,您可以进行相关数据库操作:
@ -233,41 +270,191 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
## 使用示例
### 写入数据
### 创建数据库和表
#### SQL 写入
```rust
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "query";
// create database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
// create table
taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
]).await?;
}
```
> **注意**:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
<RustInsert />
#### STMT 写入
<RustBind />
#### Schemaless 写入
<RustSml />
### 查询数据
<RustQuery />
## API 参考
### 执行带有 req_id 的 SQL
### 连接构造器
通过 DSN 来构建一个连接器构造器。
此 req_id 可用于请求链路追踪。
```rust
let cfg = TaosBuilder::default().build()?;
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
```
使用 `builder` 对象创建多个连接:
### 通过参数绑定写入数据
TDengine 的 Rust 连接器实现了参数绑定方式对数据写入INSERT场景的支持。采用这种方式写入数据时能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
参数绑定接口详见[API参考](#stmt-api)
<RustBind />
### 无模式写入
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议Line Protocol、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
<RustSml />
### 执行带有 req_id 的无模式写入
此 req_id 可用于请求链路追踪。
```rust
let conn: Taos = cfg.build();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(100u64)
.build()?;
client.put(&sml_data)?
```
### 连接池
### 数据订阅
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
#### 创建 Topic
```rust
taos.exec_many([
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
```
#### 创建 Consumer
从 DSN 开始,构建一个 TMQ 连接器。
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
创建消费者:
```rust
let mut consumer = tmq.build()?;
```
#### 订阅消费数据
消费者可订阅一个或多个 `TOPIC`。
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) 类型,可以使用相应 API 对每个消息进行消费,并通过 `.commit` 进行已消费标记。
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
获取消费进度:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
#### 指定订阅 Offset
按照指定的进度消费:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
#### 关闭订阅
```rust
consumer.unsubscribe().await;
```
对于 TMQ DSN, 有以下配置项可以进行设置,需要注意的是,`group.id` 是必须的。
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
#### 完整示例
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
### 与连接池使用
在复杂应用中,建议启用连接池。[taos] 的连接池默认(异步模式)使用 [deadpool] 实现。
@ -295,7 +482,17 @@ let pool: Pool<TaosBuilder> = Pool::builder(Manager::from_dsn(self.dsn.clone()).
let taos = pool.get()?;
```
### 连接
### 更多示例程序
示例程序源码位于 `TDengine/examples/rust` 下:
请参考:[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust)
## 常见问题
请参考 [FAQ](../../../train-faq/faq)
## API 参考
[Taos][struct.Taos] 对象提供了多个数据库操作的 API
@ -381,9 +578,13 @@ let taos = pool.get()?;
- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。
- `.use_database(database: &str)`: 执行 `USE` 语句。
除此之外,该结构也是 [参数绑定](#参数绑定接口) [行协议接口](#行协议接口) 的入口,使用方法请参考具体的 API 说明。
除此之外,该结构也是参数绑定和行协议接口的入口,使用方法请参考具体的 API 说明。
### 参数绑定接口
<p>
<a id="stmt-api" style={{color:'#141414'}}>
参数绑定接口
</a>
</p>
与 C 接口类似Rust 提供参数绑定接口。首先,通过 [Taos][struct.Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]
@ -394,7 +595,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
参数绑定对象提供了一组接口用于实现参数绑定:
#### `.set_tbname(name)`
`.set_tbname(name)`
用于绑定表名。
@ -403,7 +604,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
stmt.set_tbname("d0")?;
```
#### `.set_tags(&[tag])`
`.set_tags(&[tag])`
当 SQL 语句使用超级表时,用于绑定子表表名和标签值:
@ -413,7 +614,7 @@ stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
```
#### `.bind(&[column])`
`.bind(&[column])`
用于绑定值类型。使用 [ColumnView] 结构体构建需要的类型并绑定:
@ -437,7 +638,7 @@ let params = vec![
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
```
#### `.execute()`
`.execute()`
执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。执行前请确保所有数据已通过 `.add_batch` 加入到执行队列中。
@ -452,92 +653,6 @@ stmt.execute()?;
一个可运行的示例请见 [GitHub 上的示例](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)。
### 订阅
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
从 DSN 开始,构建一个 TMQ 连接器。
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
创建消费者:
```rust
let mut consumer = tmq.build()?;
```
消费者可订阅一个或多个 `TOPIC`。
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) 类型,可以使用相应 API 对每个消息进行消费,并通过 `.commit` 进行已消费标记。
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
获取消费进度:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
按照指定的进度消费:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
停止订阅:
```rust
consumer.unsubscribe().await;
```
对于 TMQ DSN, 有以下配置项可以进行设置,需要注意的是,`group.id` 是必须的。
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。

View File

@ -25,6 +25,46 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
无论使用什么版本的 TDengine 都建议使用最新版本的 `taospy`。
|Python Connector 版本|主要变化|
|:-------------------:|:----:|
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|2.7.8|新增 `execute_many`|
|Python Websocket Connector 版本|主要变化|
|:----------------------------:|:-----:|
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|0.2.4|数据订阅新增取消订阅方法|
## 处理异常
Python 连接器可能会产生 4 种异常:
- Python 连接器本身的异常
- 原生连接方式的异常
- websocket 连接方式异常
- 数据订阅异常
- TDengine 其他功能模块的异常
|Error Type|Description|Suggested Actions|
|:--------:|:---------:|:---------------:|
|InterfaceError|taosc 版本太低,不支持所使用的接口|请检查 TDengine 客户端版本|
|ConnectionError|数据库链接错误|请检查 TDengine 服务端状态和连接参数|
|DatabaseError|数据库错误|请检查 TDengine 服务端版本,并将 Python 连接器升级到最新版|
|OperationalError|操作错误|API 使用错误,请检查代码|
|ProgrammingError|||
|StatementError|stmt 相关异常||
|ResultError|||
|SchemalessError|schemaless 相关异常||
|TmqError|tmq 相关异常||
Python 中通常通过 try-expect 处理异常,异常处理相关请参考 [Python 错误和异常文档](https://docs.python.org/3/tutorial/errors.html)。
Python Connector 的所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如:
```python
{{#include docs/examples/python/handle_exception.py}}
```
## 支持的功能
- 原生连接支持 TDengine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入schemaless
@ -32,7 +72,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
## 安装
### 准备
### 安装前准备
1. 安装 Python。新近版本 taospy 包要求 Python 3.6.2+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。
@ -274,7 +314,7 @@ Transfer-Encoding: chunked
</TabItem>
</Tabs>
## 示例程序
## 使用示例
### 基本使用
@ -343,6 +383,10 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
#### Connection 类的使用
`Connection` 类既包含对 PEP249 Connection 接口的实现(如cursor方法和 close 方法),也包含很多扩展功能(如: execute、 query、schemaless_insert 和 subscribe 方法。
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
@ -353,6 +397,46 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 查询数据
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
`TaosConnection` 类的 `query` 方法可以用来查询数据,返回 `TaosResult` 类型的结果数据。
```python
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
```
:::tip
查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。
:::
</TabItem>
<TabItem value="rest" label="REST 连接">
RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方法用于执行任意 SQL 语句, 并返回执行结果。
```python
{{#include docs/examples/python/rest_client_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
`TaosConnection` 类的 `query` 方法可以用来查询数据,返回 `TaosResult` 类型的结果数据。
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
</TabItem>
</Tabs>
### 与 req_id 一起使用
使用可选的 req_id 参数,指定请求 id可以用于 tracing
@ -456,27 +540,169 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
### 数据订阅
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
#### 创建 Consumer
创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
```python
from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
#### 订阅 topics
Consumer API 的 `subscribe` 方法用于订阅 topicsconsumer 支持同时订阅多个 topic。
```python
consumer.subscribe(['topic1', 'topic2'])
```
#### 消费数据
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间超时时间单位为秒s`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
```python
while True:
res = consumer.poll(1)
if not res:
continue
err = res.error()
if err is not None:
raise err
val = res.value()
for block in val:
print(block.fetchall())
```
#### 获取消费进度
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
```python
assignments = consumer.assignment()
```
#### 重置消费进度
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
```python
tp = TopicPartition(topic='topic1', partition=0, offset=0)
consumer.seek(tp)
```
#### 结束消费
消费结束后,应当取消订阅,并关闭 Consumer。
```python
consumer.unsubscribe()
consumer.close()
```
#### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_example.py}}
```
#### 获取和重置消费进度示例代码
```python
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
除了原生的连接方式Python 连接器还支持通过 websocket 订阅 TMQ 数据。
除了原生的连接方式Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
#### 创建 Consumer
创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
```python
import taosws
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
#### 订阅 topics
Consumer API 的 `subscribe` 方法用于订阅 topicsconsumer 支持同时订阅多个 topic。
```python
consumer.subscribe(['topic1', 'topic2'])
```
#### 消费数据
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间超时时间单位为秒s`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
```python
while True:
res = consumer.poll(timeout=1.0)
if not res:
continue
err = res.error()
if err is not None:
raise err
for block in message:
for row in block:
print(row)
```
#### 获取消费进度
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
```python
assignments = consumer.assignment()
```
#### 重置消费进度
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
```python
consumer.seek(topic='topic1', partition=0, offset=0)
```
#### 结束消费
消费结束后,应当取消订阅,并关闭 Consumer。
```python
consumer.unsubscribe()
consumer.close()
```
#### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
#### 获取和重置消费进度示例代码
```python
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
</TabItem>
</Tabs>
@ -530,7 +756,142 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 其它示例程序
### 通过参数绑定写入数据
TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。
<Tabs>
<TabItem value="native" label="原生连接">
#### 创建 stmt
Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt该方法接收 sql 字符串作为参数sql 字符串目前仅支持用 `?` 来代表绑定的参数。
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
#### 参数绑定
调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。
```
stmt.bind_param_batch(params)
```
#### 执行 sql
调用 stmt 的 `execute` 方法执行 sql
```
stmt.execute()
```
#### 关闭 stmt
最后需要关闭 stmt。
```
stmt.close()
```
#### 示例代码
```python
{{#include docs/examples/python/stmt_example.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
#### 创建 stmt
Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt该方法接收 sql 字符串作为参数sql 字符串目前仅支持用 `?` 来代表绑定的参数。
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
#### 解析 sql
调用 stmt 的 `prepare` 方法来解析 insert 语句。
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
#### 参数绑定
调用 stmt 的 `bind_param` 方法绑定参数。
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
调用 stmt 的 `add_batch` 方法,将参数加入批处理。
```
stmt.add_batch()
```
#### 执行 sql
调用 stmt 的 `execute` 方法执行 sql
```
stmt.execute()
```
#### 关闭 stmt
最后需要关闭 stmt。
```
stmt.close()
```
#### 示例代码
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
```
</TabItem>
</Tabs>
### 更多示例程序
| 示例程序链接 | 示例程序内容 |
| ------------------------------------------------------------------------------------------------------------- | ----------------------- |
@ -542,14 +903,6 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
## 其它说明
### 异常处理
所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如:
```python
{{#include docs/examples/python/handle_exception.py}}
```
``
### 关于纳秒 (nanosecond)
由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒Python 连接器可能会修改相关接口。

View File

@ -45,9 +45,8 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅TMQ** | 暂不支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
:::info
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
@ -59,11 +58,10 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| ------------------------------ | -------- | ---------- | -------- | -------- | ----------- | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **参数绑定** | 支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **Schemaless** | 支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **Schemaless** | 支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **批量拉取(基于 WebSocket** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
:::warning

View File

@ -81,7 +81,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | BINARY(3) | 强一致性。需要注意,`strict` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | BINARY(4) | 废弃参数 |
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |

View File

@ -19,15 +19,19 @@ taosd -C
## 为客户端指定配置文件
TDengine 系统的前台交互客户端应用程序为 taos以及应用驱动它可以与 taosd 共享同一个配置文件 taos.cfg也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如 taos -c /home/cfg表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。更多 taos 的使用方法请见帮助信息 `taos --help`
TDengine 系统的前台交互客户端应用程序为 taos以及应用驱动它可以与 taosd 共享同一个配置文件 taos.cfg也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如:
```
taos -c /home/cfg
```
表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。 另外可以使用 `-C` 显示当前服务器配置参数:
```bash
taos -C
```
```bash
taos --dump-config
```
更多 taos 的使用方法请见帮助信息 `taos --help`
## 配置参数详细列表
@ -139,6 +143,8 @@ taos --dump-config
:::note
请注意,完整的监控功能需要安装并运行 `taoskeeper` 服务。taoskeeper 负责接收监控指标数据并创建 `log` 库。
:::
### monitor
| 属性 | 说明 |

67
docs/zh/20-third-party/13-dbeaver.md vendored Normal file
View File

@ -0,0 +1,67 @@
---
sidebar_label: DBeaver
title: DBeaver
description: 使用 DBeaver 存取 TDengine 数据的详细指南
---
DBeaver 是一款流行的跨平台数据库管理工具方便开发者、数据库管理员、数据分析师等用户管理数据。DBeaver 从 23.1.1 版本开始内嵌支持 TDengine。既支持独立部署的 TDengine 集群也支持 TDengine Cloud。
## 前置条件
### 安装 DBeaver
使用 DBeaver 管理 TDengine 需要以下几方面的准备工作。
- 安装 DBeaver。DBeaver 支持主流操作系统包括 Windows、macOS 和 Linux。请注意[下载](https://dbeaver.io/download/)正确平台和版本23.1.1+)的安装包。详细安装步骤请参考 [DBeaver 官方文档](https://github.com/dbeaver/dbeaver/wiki/Installation)。
- 如果使用独立部署的 TDengine 集群,请确认 TDengine 正常运行,并且 taosAdapter 已经安装并正常运行,具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)。
- 如果使用 TDengine Cloud请[注册](https://cloud.taosdata.com/)相应账号。
## 使用步骤
### 使用 DBeaver 访问内部部署的 TDengine
1. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine。
![DBeaver 连接 TDengine](./dbeaver/dbeaver-connect-tdengine-zh.webp)
2. 配置 TDengine 连接,填入主机地址、端口号、用户名和密码。如果 TDengine 部署在本机,可以只填用户名和密码,默认用户名为 root默认密码为 taosdata。点击“测试连接”可以对连接是否可用进行测试。如果本机没有安装 TDengine Java
连接器DBeaver 会提示下载安装。
![配置 TDengine 连接](./dbeaver/dbeaver-config-tdengine-zh.webp)
3. 连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine 服务和 taosAdapter 是否正确运行,主机地址、端口号、用户名和密码是否正确。
![连接成功](./dbeaver/dbeaver-connect-tdengine-test-zh.webp)
4. 使用 DBeaver 选择数据库和表可以浏览 TDengine 服务的数据。
![DBeaver 浏览 TDengine 数据](./dbeaver/dbeaver-browse-data-zh.webp)
5. 也可以通过执行 SQL 命令的方式对 TDengine 数据进行操作。
![DBeaver SQL 命令](./dbeaver/dbeaver-sql-execution-zh.webp)
### 使用 DBeaver 访问 TDengine Cloud
1. 登录 TDengine Cloud 服务在管理界面中选择“编程”和“Java”然后复制 TDENGINE_JDBC_URL 的字符串值。
![复制 TDengine Cloud DSN](./dbeaver/tdengine-cloud-jdbc-dsn-zh.webp)
2. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine Cloud。
![DBeaver 连接 TDengine Cloud](./dbeaver/dbeaver-connect-tdengine-cloud-zh.webp)
3. 配置 TDengine Cloud 连接,填入 JDBC_URL 值。点击“测试连接”,如果本机没有安装 TDengine Java
连接器DBeaver 会提示下载安装。连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine Cloud 服务是否启动JDBC_URL 是否正确。
![配置 TDengine Cloud 连接](./dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp)
4. 使用 DBeaver 选择数据库和表可以浏览 TDengine Cloud 服务的数据。
![DBeaver 浏览 TDengine Cloud 数据](./dbeaver/dbeaver-browse-cloud-data-zh.webp)
5. 也可以通过执行 SQL 命令的方式对 TDengine Cloud 数据进行操作。
![DBeaver SQL 命令 操作 TDengine Cloud](./dbeaver/dbeaver-sql-execution-cloud-zh.webp)

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View File

@ -165,9 +165,7 @@ Vnode 会保持一个数据版本号version对内存数据进行持久
### 同步复制
对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 `replica` 之外,用户还需要指定新的参数 `strict`。如果 `strict` 等于 1它表示每次 leader 转发给副本时,需要等待半数以上副本达成一致后,才能通知应用,数据在 follower 已经写入成功。如果在一定的时间内得不到半数以上副本的确认leader vnode 将返回错误给应用。
采用同步复制,系统的性能会有所下降,而且 latency 会增加。因为元数据要强一致mnode 之间的数据同步缺省就是采用的同步复制。
对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 3.0 使用了同步复制的机制(参照 RAFT 协议的标准实现)。每次 leader vnode 转发给其他副本时需要等待半数以上包含自己副本达成一致后才能通知应用写入成功。如果在一定的时间内得不到半数以上副本的确认leader vnode 将返回错误给应用。
## 缓存与持久化

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />

View File

@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.5.2
<Release type="tools" version="2.5.2" />
## 2.5.1
<Release type="tools" version="2.5.1" />

View File

@ -216,7 +216,7 @@ bool fmIsUserDefinedFunc(int32_t funcId);
bool fmIsDistExecFunc(int32_t funcId);
bool fmIsForbidFillFunc(int32_t funcId);
bool fmIsForbidStreamFunc(int32_t funcId);
bool fmIsForbidSuperTableFunc(int32_t funcId);
bool fmIsForbidSysTableFunc(int32_t funcId);
bool fmIsIntervalInterpoFunc(int32_t funcId);
bool fmIsInterpFunc(int32_t funcId);
bool fmIsLastRowFunc(int32_t funcId);

View File

@ -704,6 +704,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TAGS_PC TAOS_DEF_ERROR_CODE(0, 0x2665)
#define TSDB_CODE_PAR_INVALID_TIMELINE_QUERY TAOS_DEF_ERROR_CODE(0, 0x2666)
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner

View File

@ -43,7 +43,7 @@ typedef struct SMultiwayMergeTreeInfo {
int32_t tMergeTreeCreate(SMultiwayMergeTreeInfo **pTree, uint32_t numOfEntries, void *param,
__merge_compare_fn_t compareFn);
void tMergeTreeDestroy(SMultiwayMergeTreeInfo *pTree);
void tMergeTreeDestroy(SMultiwayMergeTreeInfo **pTree);
void tMergeTreeAdjust(SMultiwayMergeTreeInfo *pTree, int32_t idx);

View File

@ -24,7 +24,8 @@ extern "C" {
typedef struct SLRUCache SLRUCache;
typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value);
typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value, void *ud);
typedef int (*_taos_lru_functor_t)(const void *key, size_t keyLen, void *value, void *ud);
typedef struct LRUHandle LRUHandle;
@ -41,10 +42,11 @@ SLRUCache *taosLRUCacheInit(size_t capacity, int numShardBits, double highPriPoo
void taosLRUCacheCleanup(SLRUCache *cache);
LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge,
_taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority);
_taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority, void *ud);
LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen);
void taosLRUCacheErase(SLRUCache *cache, const void *key, size_t keyLen);
void taosLRUCacheApply(SLRUCache *cache, _taos_lru_functor_t functor, void *ud);
void taosLRUCacheEraseUnrefEntries(SLRUCache *cache);
bool taosLRUCacheRef(SLRUCache *cache, LRUHandle *handle);

View File

@ -1699,6 +1699,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
len += lenTmp;
pStart += lenTmp;
int32_t estimateColLen = 0;
for (int32_t j = 0; j < numOfRows; ++j) {
if (offset[j] == -1) {
continue;
@ -1708,20 +1709,21 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
int32_t jsonInnerType = *data;
char* jsonInnerData = data + CHAR_BYTES;
if (jsonInnerType == TSDB_DATA_TYPE_NULL) {
len += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L));
estimateColLen += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L));
} else if (tTagIsJson(data)) {
len += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len);
estimateColLen += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len);
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
len += varDataTLen(jsonInnerData) + CHAR_BYTES * 2;
estimateColLen += varDataTLen(jsonInnerData) + CHAR_BYTES * 2;
} else if (jsonInnerType == TSDB_DATA_TYPE_DOUBLE) {
len += (VARSTR_HEADER_SIZE + 32);
estimateColLen += (VARSTR_HEADER_SIZE + 32);
} else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) {
len += (VARSTR_HEADER_SIZE + 5);
estimateColLen += (VARSTR_HEADER_SIZE + 5);
} else {
tscError("estimateJsonLen error: invalid type:%d", jsonInnerType);
return -1;
}
}
len += TMAX(colLen, estimateColLen);
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
int32_t lenTmp = numOfRows * sizeof(int32_t);
len += (lenTmp + colLen);

View File

@ -202,7 +202,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
bool keyEscaped = false;
size_t keyLenEscaped = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -410,7 +410,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
bool keyEscaped = false;
size_t keyLenEscaped = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -436,19 +436,20 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
size_t valueLen = 0;
bool valueEscaped = false;
size_t valueLenEscaped = 0;
bool isInQuote = false;
int quoteNum = 0;
const char *escapeChar = NULL;
while (*sql < sqlEnd) {
// parse value
if (unlikely(*(*sql) == QUOTE && (*(*sql - 1) != SLASH || (*sql - 1) == escapeChar))) {
isInQuote = !isInQuote;
quoteNum++;
(*sql)++;
continue;
}
if (!isInQuote) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
if(quoteNum > 2){
break;
}
continue;
}
if (quoteNum % 2 == 0 && (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql)))) {
break;
}
if (IS_SLASH_LETTER_IN_FIELD_VALUE(*sql) && (*sql - 1) != escapeChar) {
escapeChar = *sql;
@ -460,8 +461,8 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
}
valueLen = *sql - value;
if (unlikely(isInQuote)) {
smlBuildInvalidDataMsg(&info->msgBuf, "only one quote", value);
if (unlikely(quoteNum != 0 && quoteNum != 2)) {
smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(valueLen == 0)) {

View File

@ -224,6 +224,8 @@ TEST(testCase, smlParseCols_Error_Test) {
"st,tt=aa c 1=2 1626006833639000000,",
//field value double quote,slash
"st,tt=aa c=\"a\"a\" 1626006833639000000,",
"escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" co l0=\"col0_value\",col1=\"col1_value\" 1680918783010000000",
"escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" col0=\"co\"l\"0_value\",col1=\"col1_value\" 1680918783010000000"
};
SSmlHandle *info = smlBuildSmlInfo(NULL);

View File

@ -1713,7 +1713,8 @@ static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, s
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total);
// pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total);
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
// clear the offset value of the unused entries.
memset(&pColInfoData->varmeta.offset[total - n], 0, n);
@ -1745,7 +1746,7 @@ int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n) {
static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
// pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
memset(&pColInfoData->varmeta.offset[n], 0, total - n);
} else { // reset the bitmap value
/*int32_t stopIndex = BitmapLen(n) * 8;

View File

@ -467,6 +467,7 @@ typedef struct {
int8_t replica;
int16_t numOfColumns;
int32_t numOfRows;
int32_t curIterPackedRows;
void* pIter;
SMnode* pMnode;
STableMetaRsp* pMeta;

View File

@ -620,6 +620,8 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) {
}
}
buf = taosDecodeString(buf, &pSub->qmsg);
}else{
pSub->qmsg = taosStrdup("");
}
return (void *)buf;
}

View File

@ -764,11 +764,101 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
return numOfRows;
}
/**
* @param pConn the conn queries pack from
* @param[out] pBlock the block data packed into
* @param offset skip [offset] queries in pConn
* @param rowsToPack at most rows to pack
* @return rows packed
*/
static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBlock* pBlock, uint32_t offset, uint32_t rowsToPack) {
int32_t cols = 0;
taosRLockLatch(&pConn->queryLock);
int32_t numOfQueries = taosArrayGetSize(pConn->pQueries);
if (NULL == pConn->pQueries || numOfQueries <= offset) {
taosRUnLockLatch(&pConn->queryLock);
return 0;
}
int32_t i = offset;
for (; i < numOfQueries && (i - offset) < rowsToPack; ++i) {
int32_t curRowIndex = pBlock->info.rows;
SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i);
cols = 0;
char queryId[26 + VARSTR_HEADER_SIZE] = {0};
sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid);
varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)queryId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->queryId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->id, false);
char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE];
STR_TO_VARSTR(app, pConn->app);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)app, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->pid, false);
char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(user, pConn->user);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)user, false);
char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0};
sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port);
varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)endpoint, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stime, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->useconds, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stableQuery, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false);
char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t strSize = sizeof(subStatus);
int32_t offset = VARSTR_HEADER_SIZE;
for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) {
if (i) {
offset += snprintf(subStatus + offset, strSize - offset - 1, ",");
}
SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i);
offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status);
}
varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, subStatus, false);
char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(sql, pQuery->sql);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)sql, false);
pBlock->info.rows++;
}
taosRUnLockLatch(&pConn->queryLock);
return i - offset;
}
static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
SMnode * pMnode = pReq->info.node;
SSdb * pSdb = pMnode->pSdb;
int32_t numOfRows = 0;
int32_t cols = 0;
SConnObj *pConn = NULL;
if (pShow->pIter == NULL) {
@ -776,6 +866,16 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
}
// means fetched some data last time for this conn
if (pShow->curIterPackedRows > 0) {
size_t len = 0;
pConn = taosCacheIterGetData(pShow->pIter, &len);
if (pConn && (taosArrayGetSize(pConn->pQueries) > pShow->curIterPackedRows)) {
numOfRows = packQueriesIntoBlock(pShow, pConn, pBlock, pShow->curIterPackedRows, rows);
pShow->curIterPackedRows += numOfRows;
}
}
while (numOfRows < rows) {
pConn = mndGetNextConn(pMnode, pShow->pIter);
if (pConn == NULL) {
@ -783,85 +883,10 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
break;
}
taosRLockLatch(&pConn->queryLock);
if (NULL == pConn->pQueries || taosArrayGetSize(pConn->pQueries) <= 0) {
taosRUnLockLatch(&pConn->queryLock);
continue;
}
int32_t numOfQueries = taosArrayGetSize(pConn->pQueries);
for (int32_t i = 0; i < numOfQueries && numOfRows < rows; ++i) {
SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i);
cols = 0;
char queryId[26 + VARSTR_HEADER_SIZE] = {0};
sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid);
varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)queryId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->queryId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->id, false);
char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE];
STR_TO_VARSTR(app, pConn->app);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)app, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->pid, false);
char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(user, pConn->user);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)user, false);
char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0};
sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port);
varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)endpoint, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stime, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->useconds, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stableQuery, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->subPlanNum, false);
char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t strSize = sizeof(subStatus);
int32_t offset = VARSTR_HEADER_SIZE;
for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) {
if (i) {
offset += snprintf(subStatus + offset, strSize - offset - 1, ",");
}
SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i);
offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status);
}
varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, subStatus, false);
char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(sql, pQuery->sql);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
numOfRows++;
}
taosRUnLockLatch(&pConn->queryLock);
int32_t packedRows = packQueriesIntoBlock(pShow, pConn, pBlock, 0, rows - numOfRows);
pShow->curIterPackedRows = packedRows;
numOfRows += packedRows;
}
pShow->numOfRows += numOfRows;
return numOfRows;
}

View File

@ -80,14 +80,50 @@ IF (TD_VNODE_PLUGINS)
)
ENDIF ()
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
IF (TD_LINUX)
# IF (NOT ${TD_LINUX})
# target_include_directories(
# vnode
# PUBLIC "inc"
# PUBLIC "src/inc"
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
# )
# ELSE()
# target_include_directories(
# vnode
# PUBLIC "inc"
# PUBLIC "src/inc"
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
# )
#ENDIF(NOT ${TD_LINUX})
if (${BUILD_CONTRIB})
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
else()
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
)
if (${TD_LINUX})
target_include_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
)
target_link_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
)
endif()
endif()
target_link_libraries(
vnode
PUBLIC os
@ -108,28 +144,6 @@ target_link_libraries(
PUBLIC stream
PUBLIC index
)
ELSE()
target_link_libraries(
vnode
PUBLIC os
PUBLIC util
PUBLIC common
PUBLIC tfs
PUBLIC wal
PUBLIC qworker
PUBLIC sync
PUBLIC executor
PUBLIC scheduler
PUBLIC tdb
# PUBLIC bdb
# PUBLIC scalar
PUBLIC rocksdb
PUBLIC transport
PUBLIC stream
PUBLIC index
)
ENDIF()
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)

View File

@ -357,19 +357,25 @@ typedef struct {
STSchema *pTSchema;
} SRocksCache;
typedef struct {
STsdb *pTsdb;
int flush_count;
} SCacheFlushState;
struct STsdb {
char *path;
SVnode *pVnode;
STsdbKeepCfg keepCfg;
TdThreadRwlock rwLock;
SMemTable *mem;
SMemTable *imem;
STsdbFS fs;
SLRUCache *lruCache;
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
char *path;
SVnode *pVnode;
STsdbKeepCfg keepCfg;
TdThreadRwlock rwLock;
SMemTable *mem;
SMemTable *imem;
STsdbFS fs;
SLRUCache *lruCache;
SCacheFlushState flushState;
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
};
struct TSDBKEY {

View File

@ -151,7 +151,6 @@ int32_t metaCacheOpen(SMeta* pMeta) {
taosHashSetFreeFp(pCache->sTagFilterResCache.pTableEntry, freeCacheEntryFp);
taosThreadMutexInit(&pCache->sTagFilterResCache.lock, NULL);
pCache->STbGroupResCache.pResCache = taosLRUCacheInit(5 * 1024 * 1024, -1, 0.5);
if (pCache->STbGroupResCache.pResCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -169,7 +168,6 @@ int32_t metaCacheOpen(SMeta* pMeta) {
taosHashSetFreeFp(pCache->STbGroupResCache.pTableEntry, freeCacheEntryFp);
taosThreadMutexInit(&pCache->STbGroupResCache.lock, NULL);
pMeta->pCache = pCache;
return code;
@ -486,14 +484,14 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv
}
static FORCE_INLINE void setMD5DigestInKey(uint64_t* pBuf, const char* key, int32_t keyLen) {
// ASSERT(keyLen == sizeof(int64_t) * 2);
// ASSERT(keyLen == sizeof(int64_t) * 2);
memcpy(&pBuf[2], key, keyLen);
}
// the format of key:
// hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes)
static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid, const char* key, int32_t keyLen) {
buf[0] = (uint64_t) pHashMap;
buf[0] = (uint64_t)pHashMap;
buf[1] = suid;
setMD5DigestInKey(buf, key, keyLen);
ASSERT(keyLen == sizeof(uint64_t) * 2);
@ -501,7 +499,7 @@ static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid,
int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes) {
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
// generate the composed key for LRU cache
@ -541,7 +539,8 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK
uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes;
if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) {
metaInfo("vgId:%d cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc);
metaInfo("vgId:%d cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc,
((double)(*pEntry)->hitTimes) / acc);
}
taosLRUCacheRelease(pCache, pHandle, false);
@ -551,7 +550,8 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK
return TSDB_CODE_SUCCESS;
}
static void freeUidCachePayload(const void* key, size_t keyLen, void* value) {
static void freeUidCachePayload(const void* key, size_t keyLen, void* value, void* ud) {
(void)ud;
if (value == NULL) {
return;
}
@ -607,7 +607,7 @@ static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyL
int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio) {
int32_t code = 0;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
if (selectivityRatio > tsSelectivityRatio) {
@ -640,7 +640,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
} else { // check if it exists or not
} else { // check if it exists or not
size_t size = listNEles(&(*pEntry)->list);
if (size == 0) {
tdListAppend(&(*pEntry)->list, pKey);
@ -659,7 +659,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int
// add to cache.
taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeUidCachePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
TAOS_LRU_PRIORITY_LOW, NULL);
_end:
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", vgId, suid,
@ -675,7 +675,7 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
SHashObj* pEntryHashMap = pMeta->pCache->sTagFilterResCache.pTableEntry;
uint64_t dummy[2] = {0};
initCacheKey(p, pEntryHashMap, suid, (char*) &dummy[0], 16);
initCacheKey(p, pEntryHashMap, suid, (char*)&dummy[0], 16);
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
taosThreadMutexLock(pLock);
@ -700,12 +700,12 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
tdListEmpty(&(*pEntry)->list);
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d suid:%"PRId64" cached related tag filter uid list cleared", vgId, suid);
metaDebug("vgId:%d suid:%" PRId64 " cached related tag filter uid list cleared", vgId, suid);
return TSDB_CODE_SUCCESS;
}
int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList) {
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
// generate the composed key for LRU cache
@ -738,7 +738,8 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i
uint32_t acc = pMeta->pCache->STbGroupResCache.accTimes;
if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) {
metaInfo("vgId:%d tb group cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc);
metaInfo("vgId:%d tb group cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc,
((double)(*pEntry)->hitTimes) / acc);
}
taosLRUCacheRelease(pCache, pHandle, false);
@ -748,8 +749,8 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i
return TSDB_CODE_SUCCESS;
}
static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value) {
static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value, void* ud) {
(void)ud;
if (value == NULL) {
return;
}
@ -778,8 +779,8 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value)
taosMemoryFree(tmp);
double el = (taosGetTimestampUs() - st) / 1000.0;
metaDebug("clear one item in tb group cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
el);
metaDebug("clear one item in tb group cache, remain cached item:%d, elapsed time:%.2fms",
listNEles(&((*pEntry)->list)), el);
break;
}
}
@ -788,11 +789,10 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value)
taosArrayDestroy((SArray*)value);
}
int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen) {
int32_t code = 0;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
if (payloadLen > tsTagFilterResCacheSize) {
@ -817,7 +817,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
} else { // check if it exists or not
} else { // check if it exists or not
size_t size = listNEles(&(*pEntry)->list);
if (size == 0) {
tdListAppend(&(*pEntry)->list, pKey);
@ -836,7 +836,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int
// add to cache.
taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeTbGroupCachePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
TAOS_LRU_PRIORITY_LOW, NULL);
_end:
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " tb group added into cache, total:%d, tables:%d", vgId, suid,
@ -852,7 +852,7 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) {
SHashObj* pEntryHashMap = pMeta->pCache->STbGroupResCache.pTableEntry;
uint64_t dummy[2] = {0};
initCacheKey(p, pEntryHashMap, suid, (char*) &dummy[0], 16);
initCacheKey(p, pEntryHashMap, suid, (char*)&dummy[0], 16);
TdThreadMutex* pLock = &pMeta->pCache->STbGroupResCache.lock;
taosThreadMutexLock(pLock);
@ -877,8 +877,6 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) {
tdListEmpty(&(*pEntry)->list);
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d suid:%"PRId64" cached related tb group cleared", vgId, suid);
metaDebug("vgId:%d suid:%" PRId64 " cached related tb group cleared", vgId, suid);
return TSDB_CODE_SUCCESS;
}

View File

@ -337,6 +337,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
tagArray = taosArrayInit(1, sizeof(STagVal));
if (!tagArray) {
tdDestroySVCreateTbReq(pCreateTbReq);
taosMemoryFreeClear(pCreateTbReq);
goto _end;
}
STagVal tagVal = {
@ -352,6 +353,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
tagArray = taosArrayDestroy(tagArray);
if (pTag == NULL) {
tdDestroySVCreateTbReq(pCreateTbReq);
taosMemoryFreeClear(pCreateTbReq);
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}

View File

@ -14,6 +14,8 @@
*/
#include "tsdb.h"
#define ROCKS_BATCH_SIZE (4096)
static int32_t tsdbOpenBICache(STsdb *pTsdb) {
int32_t code = 0;
SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5);
@ -213,7 +215,7 @@ static void tsdbCloseRocksCache(STsdb *pTsdb) {
}
static void rocksMayWrite(STsdb *pTsdb, bool force, bool read, bool lock) {
rocksdb_writebatch_t *wb = NULL;
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
if (read) {
if (lock) {
taosThreadMutexLock(&pTsdb->lruMutex);
@ -223,44 +225,33 @@ static void rocksMayWrite(STsdb *pTsdb, bool force, bool read, bool lock) {
if (lock) {
taosThreadMutexLock(&pTsdb->rCache.rMutex);
}
wb = pTsdb->rCache.writebatch;
}
int count = rocksdb_writebatch_count(wb);
if ((force && count > 0) || count >= 1024) {
if ((force && count > 0) || count >= ROCKS_BATCH_SIZE) {
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d, count: %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, count,
err);
rocksdb_free(err);
// pTsdb->flushState.flush_count = 0;
}
rocksdb_writebatch_clear(wb);
}
if (read) {
if (lock) taosThreadMutexUnlock(&pTsdb->lruMutex);
} else {
if (lock) taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
if (lock) {
if (read) {
taosThreadMutexUnlock(&pTsdb->lruMutex);
} else {
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
}
}
}
int32_t tsdbCacheCommit(STsdb *pTsdb) {
int32_t code = 0;
char *err = NULL;
rocksMayWrite(pTsdb, true, false, true);
rocksMayWrite(pTsdb, true, true, true);
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
code = -1;
}
return code;
}
SLastCol *tsdbCacheDeserialize(char const *value) {
static SLastCol *tsdbCacheDeserialize(char const *value) {
if (!value) {
return NULL;
}
@ -278,7 +269,7 @@ SLastCol *tsdbCacheDeserialize(char const *value) {
return pLastCol;
}
void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) {
static void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) {
SColVal *pColVal = &pLastCol->colVal;
size_t length = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pColVal->type)) {
@ -300,6 +291,77 @@ void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) {
*size = length;
}
static void tsdbCachePutBatch(SLastCol *pLastCol, const void *key, size_t klen, SCacheFlushState *state) {
STsdb *pTsdb = state->pTsdb;
SRocksCache *rCache = &pTsdb->rCache;
rocksdb_writebatch_t *wb = rCache->writebatch;
char *rocks_value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(pLastCol, &rocks_value, &vlen);
taosThreadMutexLock(&rCache->rMutex);
rocksdb_writebatch_put(wb, (char *)key, klen, rocks_value, vlen);
taosMemoryFree(rocks_value);
if (++state->flush_count >= ROCKS_BATCH_SIZE) {
char *err = NULL;
rocksdb_write(rCache->db, rCache->writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d, count: %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
state->flush_count, err);
rocksdb_free(err);
}
rocksdb_writebatch_clear(wb);
state->flush_count = 0;
}
taosThreadMutexUnlock(&rCache->rMutex);
}
int tsdbCacheFlushDirty(const void *key, size_t klen, void *value, void *ud) {
SLastCol *pLastCol = (SLastCol *)value;
if (pLastCol->dirty) {
tsdbCachePutBatch(pLastCol, key, klen, (SCacheFlushState *)ud);
pLastCol->dirty = 0;
}
return 0;
}
int32_t tsdbCacheCommit(STsdb *pTsdb) {
int32_t code = 0;
char *err = NULL;
SLRUCache *pCache = pTsdb->lruCache;
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
taosThreadMutexLock(&pTsdb->lruMutex);
taosLRUCacheApply(pCache, tsdbCacheFlushDirty, &pTsdb->flushState);
rocksMayWrite(pTsdb, true, false, false);
rocksMayWrite(pTsdb, true, true, false);
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
taosThreadMutexUnlock(&pTsdb->lruMutex);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
code = -1;
}
return code;
}
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
SLastCol *pLastCol = NULL;
@ -329,21 +391,25 @@ static void reallocVarData(SColVal *pColVal) {
}
}
static void tsdbCacheDeleter(const void *key, size_t keyLen, void *value) {
static void tsdbCacheDeleter(const void *key, size_t klen, void *value, void *ud) {
SLastCol *pLastCol = (SLastCol *)value;
// TODO: add dirty flag to SLastCol
if (pLastCol->dirty) {
// TODO: queue into dirty list, free it after save to backstore
} else {
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type) /* && pLastCol->colVal.value.nData > 0*/) {
taosMemoryFree(pLastCol->colVal.value.pData);
}
taosMemoryFree(value);
tsdbCachePutBatch(pLastCol, key, klen, (SCacheFlushState *)ud);
}
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type) /* && pLastCol->colVal.value.nData > 0*/) {
taosMemoryFree(pLastCol->colVal.value.pData);
}
taosMemoryFree(value);
}
typedef struct {
int idx;
SLastKey key;
} SIdxKey;
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow) {
int32_t code = 0;
@ -370,113 +436,206 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
tsdbRowClose(&iter);
// 3, build keys & multi get from rocks
int num_keys = TARRAY_SIZE(aColVal);
char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char *key_list = taosMemoryMalloc(num_keys * ROCKS_KEY_LEN * 2);
int num_keys = TARRAY_SIZE(aColVal);
TSKEY keyTs = TSDBROW_TS(pRow);
SArray *remainCols = NULL;
SLRUCache *pCache = pTsdb->lruCache;
taosThreadMutexLock(&pTsdb->lruMutex);
for (int i = 0; i < num_keys; ++i) {
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
int16_t cid = pColVal->cid;
memcpy(key_list + i * ROCKS_KEY_LEN, &(SLastKey){.ltype = 1, .uid = uid, .cid = cid}, ROCKS_KEY_LEN);
memcpy(key_list + i * ROCKS_KEY_LEN + num_keys * ROCKS_KEY_LEN, &(SLastKey){.ltype = 0, .uid = uid, .cid = cid},
ROCKS_KEY_LEN);
keys_list[i] = key_list + i * ROCKS_KEY_LEN;
keys_list[num_keys + i] = key_list + i * ROCKS_KEY_LEN + num_keys * ROCKS_KEY_LEN;
keys_list_sizes[i] = ROCKS_KEY_LEN;
keys_list_sizes[num_keys + i] = ROCKS_KEY_LEN;
}
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys * 2; ++i) {
rocksdb_free(errs[i]);
}
taosMemoryFree(key_list);
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
SLastKey *key = &(SLastKey){.ltype = 0, .uid = uid, .cid = cid};
size_t klen = ROCKS_KEY_LEN;
LRUHandle *h = taosLRUCacheLookup(pCache, key, klen);
if (h) {
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
TSKEY keyTs = TSDBROW_TS(pRow);
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
if (pLastCol->ts <= keyTs) {
uint8_t *pVal = NULL;
int nData = pLastCol->colVal.value.nData;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
pVal = pLastCol->colVal.value.pData;
}
pLastCol->ts = keyTs;
pLastCol->colVal = *pColVal;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
if (nData < pColVal->value.nData) {
taosMemoryFree(pVal);
pLastCol->colVal.value.pData = taosMemoryCalloc(1, pColVal->value.nData);
} else {
pLastCol->colVal.value.pData = pVal;
}
if (pColVal->value.nData) {
memcpy(pLastCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
}
// if (!COL_VAL_IS_NONE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
SLastKey key = (SLastKey){.ltype = 0, .uid = uid, .cid = pColVal->cid};
size_t klen = ROCKS_KEY_LEN;
rocksdb_writebatch_put(wb, (char *)&key, klen, value, vlen);
pLastCol = (SLastCol *)value;
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
if (!pLastCol->dirty) {
pLastCol->dirty = 1;
}
}
LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter,
NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
taosLRUCacheRelease(pCache, h, false);
} else {
if (!remainCols) {
remainCols = taosArrayInit(num_keys * 2, sizeof(SIdxKey));
}
taosMemoryFree(value);
taosArrayPush(remainCols, &(SIdxKey){i, *key});
}
if (COL_VAL_IS_VALUE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
key->ltype = 1;
LRUHandle *h = taosLRUCacheLookup(pCache, key, klen);
if (h) {
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
SLastKey key = (SLastKey){.ltype = 1, .uid = uid, .cid = pColVal->cid};
if (pLastCol->ts <= keyTs) {
uint8_t *pVal = NULL;
int nData = pLastCol->colVal.value.nData;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
pVal = pLastCol->colVal.value.pData;
}
pLastCol->ts = keyTs;
pLastCol->colVal = *pColVal;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
if (nData < pColVal->value.nData) {
taosMemoryFree(pVal);
pLastCol->colVal.value.pData = taosMemoryCalloc(1, pColVal->value.nData);
} else {
pLastCol->colVal.value.pData = pVal;
}
if (pColVal->value.nData) {
memcpy(pLastCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
}
rocksdb_writebatch_put(wb, (char *)&key, ROCKS_KEY_LEN, value, vlen);
pLastCol = (SLastCol *)value;
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
if (!pLastCol->dirty) {
pLastCol->dirty = 1;
}
}
LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter,
NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
taosLRUCacheRelease(pCache, h, false);
} else {
if (!remainCols) {
remainCols = taosArrayInit(num_keys * 2, sizeof(SIdxKey));
}
taosMemoryFree(value);
taosArrayPush(remainCols, &(SIdxKey){i, *key});
}
}
//}
rocksdb_free(values_list[i]);
rocksdb_free(values_list[i + num_keys]);
}
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
rocksMayWrite(pTsdb, true, false, false);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
if (remainCols) {
num_keys = TARRAY_SIZE(remainCols);
}
if (remainCols && num_keys > 0) {
char **keys_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
for (int i = 0; i < num_keys; ++i) {
SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[i];
keys_list[i] = (char *)&idxKey->key;
keys_list_sizes[i] = ROCKS_KEY_LEN;
}
char **values_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys, sizeof(char *));
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
rocksdb_free(errs[i]);
}
taosMemoryFree(errs);
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(values_list_sizes);
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[i];
SColVal *pColVal = (SColVal *)TARRAY_DATA(aColVal) + idxKey->idx;
// SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, idxKey->idx);
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (idxKey->key.ltype == 0) {
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
// SLastKey key = (SLastKey){.ltype = 0, .uid = uid, .cid = pColVal->cid};
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksdb_writebatch_put(wb, (char *)&idxKey->key, ROCKS_KEY_LEN, value, vlen);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
pLastCol = (SLastCol *)value;
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge,
tsdbCacheDeleter, NULL, TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosMemoryFree(value);
}
} else {
if (COL_VAL_IS_VALUE(pColVal)) {
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
// SLastKey key = (SLastKey){.ltype = 1, .uid = uid, .cid = pColVal->cid};
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksdb_writebatch_put(wb, (char *)&idxKey->key, ROCKS_KEY_LEN, value, vlen);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
pLastCol = (SLastCol *)value;
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
}
LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge,
tsdbCacheDeleter, NULL, TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosMemoryFree(value);
}
}
}
rocksdb_free(values_list[i]);
}
rocksMayWrite(pTsdb, true, false, true);
taosMemoryFree(values_list);
taosArrayDestroy(remainCols);
}
taosThreadMutexUnlock(&pTsdb->lruMutex);
_exit:
taosArrayDestroy(aColVal);
@ -651,11 +810,6 @@ static SLastCol *tsdbCacheLoadCol(STsdb *pTsdb, SCacheRowsReader *pr, int16_t sl
return pLastCol;
}
typedef struct {
int idx;
SLastKey key;
} SIdxKey;
static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SArray *remainCols,
SCacheRowsReader *pr, int8_t ltype) {
int32_t code = 0;
@ -712,7 +866,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
}
LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL,
TAOS_LRU_PRIORITY_LOW);
TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
@ -787,7 +941,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA
}
LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter,
NULL, TAOS_LRU_PRIORITY_LOW);
NULL, TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
@ -833,9 +987,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
reallocVarData(&lastCol.colVal);
taosArrayPush(pLastArray, &lastCol);
if (h) {
taosLRUCacheRelease(pCache, h, false);
}
taosLRUCacheRelease(pCache, h, false);
} else {
SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pr->pSchema->columns[pr->pSlotIds[i]].type)};
@ -860,9 +1012,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
reallocVarData(&lastCol.colVal);
taosArraySet(pLastArray, idxKey->idx, &lastCol);
if (h) {
taosLRUCacheRelease(pCache, h, false);
}
taosLRUCacheRelease(pCache, h, false);
taosArrayRemove(remainCols, i);
} else {
@ -906,7 +1056,7 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR
}
LRUStatus status = taosLRUCacheInsert(pCache, key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, &h,
TAOS_LRU_PRIORITY_LOW);
TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
@ -965,6 +1115,8 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
rocksMayWrite(pTsdb, true, false, false);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
for (int i = 0; i < num_keys * 2; ++i) {
if (errs[i]) {
rocksdb_free(errs[i]);
@ -975,19 +1127,42 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
taosThreadMutexLock(&pTsdb->rCache.rMutex);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
rocksdb_writebatch_delete(wb, keys_list[i], klen);
}
taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen);
pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
rocksdb_writebatch_delete(wb, keys_list[num_keys + i], klen);
}
taosLRUCacheErase(pTsdb->lruCache, keys_list[num_keys + i], klen);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
rocksdb_free(values_list[i]);
rocksdb_free(values_list[i + num_keys]);
taosThreadMutexLock(&pTsdb->lruMutex);
LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[i], klen);
if (h) {
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h);
if (pLastCol->dirty) {
pLastCol->dirty = 0;
}
taosLRUCacheRelease(pTsdb->lruCache, h, true);
}
taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen);
h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[num_keys + i], klen);
if (h) {
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h);
if (pLastCol->dirty) {
pLastCol->dirty = 0;
}
taosLRUCacheRelease(pTsdb->lruCache, h, true);
}
taosLRUCacheErase(pTsdb->lruCache, keys_list[num_keys + i], klen);
taosThreadMutexUnlock(&pTsdb->lruMutex);
}
for (int i = 0; i < num_keys; ++i) {
taosMemoryFree(keys_list[i]);
@ -997,8 +1172,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
rocksMayWrite(pTsdb, true, false, false);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
rocksMayWrite(pTsdb, true, false, true);
_exit:
taosMemoryFree(pTSchema);
@ -1011,7 +1185,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
SLRUCache *pCache = NULL;
size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024;
pCache = taosLRUCacheInit(cfgCapacity, 1, .5);
pCache = taosLRUCacheInit(cfgCapacity, 0, .5);
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@ -1033,6 +1207,9 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
taosThreadMutexInit(&pTsdb->lruMutex, NULL);
pTsdb->flushState.pTsdb = pTsdb;
pTsdb->flushState.flush_count = 0;
_err:
pTsdb->lruCache = pCache;
return code;
@ -1062,7 +1239,8 @@ static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) {
*len = sizeof(uint64_t);
}
static void deleteTableCacheLast(const void *key, size_t keyLen, void *value) {
static void deleteTableCacheLast(const void *key, size_t keyLen, void *value, void *ud) {
(void)ud;
SArray *pLastArray = (SArray *)value;
int16_t nCol = taosArrayGetSize(pLastArray);
for (int16_t iCol = 0; iCol < nCol; ++iCol) {
@ -3146,7 +3324,8 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *
size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray);
_taos_lru_deleter_t deleter = deleteTableCacheLast;
LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW);
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
@ -3186,7 +3365,7 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr,
size_t charge = pLastArray->capacity * pLastArray->elemSize + sizeof(*pLastArray);
_taos_lru_deleter_t deleter = deleteTableCacheLast;
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW);
taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
@ -3259,7 +3438,8 @@ static int32_t tsdbCacheLoadBlockIdx(SDataFReader *pFileReader, SArray **aBlockI
return code;
}
static void deleteBICache(const void *key, size_t keyLen, void *value) {
static void deleteBICache(const void *key, size_t keyLen, void *value, void *ud) {
(void)ud;
SArray *pArray = (SArray *)value;
taosArrayDestroy(pArray);
@ -3290,7 +3470,8 @@ int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHa
size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray);
_taos_lru_deleter_t deleter = deleteBICache;
LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW);
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}

View File

@ -2846,18 +2846,18 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
return code;
}
pBlockScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, pReader->idStr);
if (pBlockScanInfo == NULL) {
goto _end;
}
TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
// it is a clean block, load it directly
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) &&
pBlock->nRow <= pReader->resBlockInfo.capacity) {
if (asc || (!hasDataInLastBlock(pLastBlockReader))) {
if (asc || (!hasDataInLastBlock(pLastBlockReader) && (pBlock->maxKey.ts > keyInBuf.ts))) {
code = copyBlockDataToSDataBlock(pReader);
if (code) {
goto _end;

View File

@ -107,6 +107,10 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) {
_exit:
tDecoderClear(&dc);
if (code) {
vError("vgId:%d, %s:%d failed to preprocess submit request since %s, msg type:%s", TD_VID(pVnode), __func__, lino,
tstrerror(code), TMSG_INFO(pMsg->msgType));
}
return code;
}
extern int64_t tsMaxKeyByPrecision[];
@ -238,11 +242,11 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) {
tEndDecode(pCoder);
_exit:
if (code) {
vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
}
tDecoderClear(pCoder);
if (code) {
vError("vgId:%d, %s:%d failed to preprocess submit request since %s, msg type:%s", TD_VID(pVnode), __func__, lino,
tstrerror(code), TMSG_INFO(pMsg->msgType));
}
return code;
}
@ -301,8 +305,8 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
_exit:
if (code) {
vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
vError("vgId:%d, failed to preprocess write request since %s, msg type:%s", TD_VID(pVnode), tstrerror(code),
TMSG_INFO(pMsg->msgType));
}
return code;
}

View File

@ -170,6 +170,10 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
*/
int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
bool tsortIsClosed(SSortHandle* pHandle);
void tsortSetClosed(SSortHandle* pHandle);
#ifdef __cplusplus
}
#endif

View File

@ -1133,8 +1133,7 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t));
}
// metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload,
// size, 1);
pStorageAPI->metaFn.putCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
digest[0] = 1;
memcpy(digest + 1, context.digest, tListLen(context.digest));
}

View File

@ -187,6 +187,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
// the scan order may be different from the output result order for agg interval operator.
if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) {
order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder;
} else {
order = pInfo->pFillInfo->order;
}
doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, order);

View File

@ -55,7 +55,6 @@ typedef struct STableMergeScanSortSourceParam {
int32_t readerIdx;
uint64_t uid;
SSDataBlock* inputBlock;
bool multiReader;
STsdbReader* dataReader;
} STableMergeScanSortSourceParam;
@ -466,7 +465,12 @@ static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) {
}
// const void *key, size_t keyLen, void *value
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); }
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value, void* ud) {
(void)key;
(void)keyLen;
(void)ud;
freeTableCachedVal(value);
}
static void doSetNullValue(SSDataBlock* pBlock, const SExprInfo* pExpr, int32_t numOfExpr) {
for (int32_t j = 0; j < numOfExpr; ++j) {
@ -501,7 +505,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
// 1. check if it is existed in meta cache
if (pCache == NULL) {
pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn);
pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_NOLOCK, &pHandle->api.metaFn);
code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid);
if (code != TSDB_CODE_SUCCESS) {
// when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed.
@ -554,7 +558,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
freeReader = true;
int32_t ret = taosLRUCacheInsert(pCache->pTableMetaEntryCache, &pBlock->info.id.uid, sizeof(uint64_t), pVal,
sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW);
sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW, NULL);
if (ret != TAOS_LRU_STATUS_OK) {
qError("failed to put meta into lru cache, code:%d, %s", ret, idStr);
freeTableCachedVal(pVal);
@ -2653,8 +2657,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
int64_t st = taosGetTimestampUs();
void* p = tableListGetInfo(pInfo->base.pTableListInfo, readIdx + pInfo->tableStartIndex);
SReadHandle* pHandle = &pInfo->base.readHandle;
if (NULL == source->dataReader || !source->multiReader) {
if (NULL == source->dataReader) {
code = pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, pQueryCond, p, 1, pBlock, (void**)&source->dataReader, GET_TASKID(pTaskInfo), false, NULL);
if (code != 0) {
T_LONG_JMP(pTaskInfo->env, code);
@ -2718,19 +2721,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
pInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
qTrace("tsdb/read-table-data: %p, close reader", reader);
if (!source->multiReader) {
pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader);
source->dataReader = NULL;
}
pInfo->base.dataReader = NULL;
return pBlock;
}
if (!source->multiReader) {
pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader);
source->dataReader = NULL;
}
pAPI->tsdReader.tsdReaderClose(source->dataReader);
source->dataReader = NULL;
pInfo->base.dataReader = NULL;
blockDataDestroy(source->inputBlock);
source->inputBlock = NULL;
return NULL;
}
@ -2786,7 +2785,19 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
// todo the total available buffer should be determined by total capacity of buffer of this task.
// the additional one is reserved for merge result
pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1);
// pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1);
int32_t kWay = (TSDB_MAX_BYTES_PER_ROW * 2) / (pInfo->pResBlock->info.rowSize);
if (kWay >= 128) {
kWay = 128;
} else if (kWay <= 2) {
kWay = 2;
} else {
int i = 2;
while (i * 2 <= kWay) i = i * 2;
kWay = i;
}
pInfo->sortBufSize = pInfo->bufPageSize * (kWay + 1);
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
pInfo->pSortInputBlock, pTaskInfo->id.str);
@ -2801,9 +2812,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
STableMergeScanSortSourceParam param = {0};
param.readerIdx = i;
param.pOperator = pOperator;
param.multiReader = (numOfTable <= MULTI_READER_MAX_TABLE_NUM) ? true : false;
param.inputBlock = createOneDataBlock(pInfo->pResBlock, false);
blockDataEnsureCapacity(param.inputBlock, pOperator->resultInfo.capacity);
taosArrayPush(pInfo->sortSourceParams, &param);
@ -2886,6 +2895,11 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
}
}
if (tsortIsClosed(pHandle)) {
terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
T_LONG_JMP(pOperator->pTaskInfo->env, terrno);
}
bool limitReached = applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo);
qDebug("%s get sorted row block, rows:%" PRId64 ", limit:%" PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows,
pInfo->limitInfo.numOfOutputRows);

View File

@ -228,6 +228,11 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
// multi-group case not handle here
SSDataBlock* pBlock = NULL;
while (1) {
if (tsortIsClosed(pInfo->pSortHandle)) {
terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
T_LONG_JMP(pOperator->pTaskInfo->env, terrno);
}
pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity,
pInfo->matchInfo.pList, pInfo);
if (pBlock == NULL) {
@ -439,6 +444,11 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = NULL;
while (pInfo->pCurrSortHandle != NULL) {
if (tsortIsClosed(pInfo->pCurrSortHandle)) {
terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
T_LONG_JMP(pOperator->pTaskInfo->env, terrno);
}
// beginSortGroup would fetch all child blocks of pInfo->currGroupId;
ASSERT(pInfo->childOpStatus != CHILD_OP_SAME_GROUP);
pBlock = getGroupSortedBlockData(pInfo->pCurrSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity,

View File

@ -1593,6 +1593,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
SSysTableScanInfo* pInfo = pOperator->info;
char dbName[TSDB_DB_NAME_LEN] = {0};
blockDataCleanup(pInfo->pRes);
const char* name = tNameGetTableName(&pInfo->name);
if (pInfo->showRewrite) {
getDBNameFromCondition(pInfo->pCondition, dbName);

Some files were not shown because too many files have changed in this diff Show More