Merge pull request #21570 from taosdata/fix/liaohj_main

other: merge  3.0 into main.
This commit is contained in:
Haojun Liao 2023-06-02 13:12:27 +08:00 committed by GitHub
commit 2e5a56a037
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
562 changed files with 38002 additions and 12632 deletions

1
.gitignore vendored
View File

@ -131,3 +131,4 @@ tools/BUGS
tools/taos-tools
tools/taosws-rs
tags
.clangd

View File

@ -18,7 +18,7 @@
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
4. 创建pull request将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。
如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。
## 给贡献者的礼品
@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。
## 联系我们
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。

View File

@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools目前它包含 taosBench
### Ubuntu 18.04 及以上版本 & Debian
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
```
#### 为 taos-tools 安装编译需要的软件
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8 & Fedora
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
```
#### 在 CentOS 上构建 taosTools 安装依赖软件
@ -88,7 +88,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
```
#### CentOS 8/Rocky Linux
#### CentOS 8/Fedora/Rocky Linux
```
sudo yum install -y epel-release
@ -101,7 +101,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
若 powertools 安装失败,可以尝试改用:
```
sudo yum config-manager --set-enabled Powertools
sudo yum config-manager --set-enabled powertools
```
#### CentOS + devtoolset
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig
brew install argp-standalone pkgconfig geos
```
### 设置 golang 开发环境

View File

@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
### Ubuntu 18.04 and above or Debian
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
```
#### Install build dependencies for taosTools
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8 & Fedora
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
```
#### Install build dependencies for taosTools on CentOS
@ -94,7 +94,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
```
#### CentOS 8/Rocky Linux
#### CentOS 8/Fedora/Rocky Linux
```
sudo yum install -y epel-release
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig
brew install argp-standalone pkgconfig geos
```
### Setup golang environment

View File

@ -117,14 +117,14 @@ ELSE ()
IF (${BUILD_SANITIZER})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSEIF (${BUILD_RELEASE})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
# disable all assert

View File

@ -64,12 +64,25 @@ IF(${TD_WINDOWS})
ON
)
MESSAGE("build geos Win32")
option(
BUILD_GEOS
"If build geos on Windows"
ON
)
ELSEIF (TD_DARWIN_64)
IF(${BUILD_TEST})
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
ENDIF ()
ENDIF ()
option(
BUILD_GEOS
"If build geos on Windows"
OFF
)
option(
BUILD_SHARED_LIBS
""
@ -109,7 +122,7 @@ option(
option(
BUILD_WITH_ROCKSDB
"If build with rocksdb"
OFF
ON
)
option(

View File

@ -56,7 +56,17 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
SET(TD_DARWIN TRUE)
SET(OSTYPE "macOS")
execute_process(COMMAND geos-config --cflags OUTPUT_VARIABLE GEOS_CFLAGS)
execute_process(COMMAND geos-config --ldflags OUTPUT_VARIABLE GEOS_LDFLAGS)
string(SUBSTRING ${GEOS_CFLAGS} 2 -1 GEOS_CFLAGS)
string(REGEX REPLACE "\n" "" GEOS_CFLAGS ${GEOS_CFLAGS})
string(SUBSTRING ${GEOS_LDFLAGS} 2 -1 GEOS_LDFLAGS)
string(REGEX REPLACE "\n" "" GEOS_LDFLAGS ${GEOS_LDFLAGS})
MESSAGE("GEOS_CFLAGS "${GEOS_CFLAGS})
MESSAGE("GEOS_LDFLAGS "${GEOS_LDFLAGS})
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
INCLUDE_DIRECTORIES(${GEOS_CFLAGS})
LINK_DIRECTORIES(${GEOS_LDFLAGS})
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
MESSAGE("Current system arch is arm64")
@ -162,6 +172,14 @@ ELSE ()
ENDIF ()
ENDIF ()
IF(APPLE)
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
set(CMAKE_HAVE_THREADS_LIBRARY 1)
set(CMAKE_USE_WIN32_THREADS_INIT 0)
set(CMAKE_USE_PTHREADS 1)
set(THREADS_PREFER_PTHREAD_FLAG ON)
ENDIF()
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.4.2")
SET(TD_VER_NUMBER "3.0.4.3")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -0,0 +1,12 @@
# geos
ExternalProject_Add(geos
GIT_REPOSITORY https://github.com/libgeos/geos.git
GIT_TAG 3.11.2
SOURCE_DIR "${TD_CONTRIB_DIR}/geos"
BINARY_DIR ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -1,11 +1,11 @@
# rocksdb
ExternalProject_Add(rocksdb
GIT_REPOSITORY https://github.com/taosdata-contrib/rocksdb.git
GIT_TAG v6.23.3
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
GIT_TAG v8.1.1
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
)

View File

@ -2,6 +2,7 @@
# stub
ExternalProject_Add(stub
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
GIT_TAG 5e903b8e
GIT_SUBMODULES "src"
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG main
GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -134,6 +134,11 @@ if(${BUILD_ADDR2LINE})
endif(NOT ${TD_WINDOWS})
endif(${BUILD_ADDR2LINE})
# geos
if(${BUILD_GEOS})
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
@ -223,12 +228,56 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
if(${BUILD_WITH_ROCKSDB})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
if(${TD_WINDOWS})
option(WITH_JNI "" OFF)
endif(${TD_WINDOWS})
if(${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" OFF)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
@ -434,6 +483,15 @@ if(${BUILD_ADDR2LINE})
endif(NOT ${TD_WINDOWS})
endif(${BUILD_ADDR2LINE})
# geos
if(${BUILD_GEOS})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL)
target_include_directories(
geos_c
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
)
endif(${BUILD_GEOS})
# ================================================================================================
# Build test

View File

@ -1,6 +1,8 @@
message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS})
add_executable(rocksdbTest "")
target_sources(rocksdbTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
target_link_libraries(rocksdbTest rocksdb)
target_link_libraries(rocksdbTest rocksdb)

View File

@ -1,4 +1,5 @@
#include <assert.h>
#include <bits/stdint-uintn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -9,38 +10,307 @@
const char DBPath[] = "rocksdb_c_simple_example";
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
static const int32_t endian_test_var = 1;
#define IS_LITTLE_ENDIAN() (*(uint8_t *)(&endian_test_var) != 0)
#define TD_RT_ENDIAN() (IS_LITTLE_ENDIAN() ? TD_LITTLE_ENDIAN : TD_BIG_ENDIAN)
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
static void *taosDecodeFixedU64(const void *buf, uint64_t *value) {
if (IS_LITTLE_ENDIAN()) {
memcpy(value, buf, sizeof(*value));
} else {
((uint8_t *)value)[7] = ((uint8_t *)buf)[0];
((uint8_t *)value)[6] = ((uint8_t *)buf)[1];
((uint8_t *)value)[5] = ((uint8_t *)buf)[2];
((uint8_t *)value)[4] = ((uint8_t *)buf)[3];
((uint8_t *)value)[3] = ((uint8_t *)buf)[4];
((uint8_t *)value)[2] = ((uint8_t *)buf)[5];
((uint8_t *)value)[1] = ((uint8_t *)buf)[6];
((uint8_t *)value)[0] = ((uint8_t *)buf)[7];
}
return POINTER_SHIFT(buf, sizeof(*value));
}
// ---- Fixed U64
static int32_t taosEncodeFixedU64(void **buf, uint64_t value) {
if (buf != NULL) {
if (IS_LITTLE_ENDIAN()) {
memcpy(*buf, &value, sizeof(value));
} else {
((uint8_t *)(*buf))[0] = value & 0xff;
((uint8_t *)(*buf))[1] = (value >> 8) & 0xff;
((uint8_t *)(*buf))[2] = (value >> 16) & 0xff;
((uint8_t *)(*buf))[3] = (value >> 24) & 0xff;
((uint8_t *)(*buf))[4] = (value >> 32) & 0xff;
((uint8_t *)(*buf))[5] = (value >> 40) & 0xff;
((uint8_t *)(*buf))[6] = (value >> 48) & 0xff;
((uint8_t *)(*buf))[7] = (value >> 56) & 0xff;
}
*buf = POINTER_SHIFT(*buf, sizeof(value));
}
return (int32_t)sizeof(value);
}
typedef struct KV {
uint64_t k1;
uint64_t k2;
} KV;
int kvSerial(KV *kv, char *buf) {
int len = 0;
len += taosEncodeFixedU64((void **)&buf, kv->k1);
len += taosEncodeFixedU64((void **)&buf, kv->k2);
return len;
}
const char *kvDBName(void *name) { return "kvDBname"; }
int kvDBComp(void *state, const char *aBuf, size_t aLen, const char *bBuf, size_t bLen) {
KV w1, w2;
memset(&w1, 0, sizeof(w1));
memset(&w2, 0, sizeof(w2));
char *p1 = (char *)aBuf;
char *p2 = (char *)bBuf;
// p1 += 1;
// p2 += 1;
p1 = taosDecodeFixedU64(p1, &w1.k1);
p2 = taosDecodeFixedU64(p2, &w2.k1);
p1 = taosDecodeFixedU64(p1, &w1.k2);
p2 = taosDecodeFixedU64(p2, &w2.k2);
if (w1.k1 < w2.k1) {
return -1;
} else if (w1.k1 > w2.k1) {
return 1;
}
if (w1.k2 < w2.k2) {
return -1;
} else if (w1.k2 > w2.k2) {
return 1;
}
return 0;
}
int kvDeserial(KV *kv, char *buf) {
char *p1 = (char *)buf;
// p1 += 1;
p1 = taosDecodeFixedU64(p1, &kv->k1);
p1 = taosDecodeFixedU64(p1, &kv->k2);
return 0;
}
int main(int argc, char const *argv[]) {
rocksdb_t * db;
rocksdb_t *db;
rocksdb_backup_engine_t *be;
rocksdb_options_t * options = rocksdb_options_create();
rocksdb_options_set_create_if_missing(options, 1);
// open DB
char *err = NULL;
db = rocksdb_open(options, DBPath, &err);
char *err = NULL;
const char *path = "/tmp/db";
// Write
rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
rocksdb_options_t *opt = rocksdb_options_create();
rocksdb_options_set_create_if_missing(opt, 1);
rocksdb_options_set_create_missing_column_families(opt, 1);
// Read
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
int len = 1;
char buf[256] = {0};
size_t vallen = 0;
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
printf("val:%s\n", val);
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
snprintf(buf, vallen + 5, "val:%s", val);
printf("%ld %ld %s\n", strlen(val), vallen, buf);
// Update
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
char **cfName = calloc(len, sizeof(char *));
for (int i = 0; i < len; i++) {
cfName[i] = "test";
}
const rocksdb_options_t **cfOpt = malloc(len * sizeof(rocksdb_options_t *));
for (int i = 0; i < len; i++) {
cfOpt[i] = rocksdb_options_create_copy(opt);
if (i != 0) {
rocksdb_comparator_t *comp = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
rocksdb_options_set_comparator((rocksdb_options_t *)cfOpt[i], comp);
}
}
// Delete
rocksdb_delete(db, writeoptions, "key", 3, &err);
rocksdb_column_family_handle_t **cfHandle = malloc(len * sizeof(rocksdb_column_family_handle_t *));
db = rocksdb_open_column_families(opt, path, len, (const char *const *)cfName, cfOpt, cfHandle, &err);
// Read again
val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
printf("val:%s\n", val);
{
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
size_t vlen = 0;
char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
printf("Get value %s, and len = %d\n", v, (int)vlen);
}
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
rocksdb_writebatch_put_cf(wBatch, cfHandle[0], "key", strlen("key"), "value", strlen("value"));
rocksdb_write(db, wOpt, wBatch, &err);
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
size_t vlen = 0;
{
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
for (int i = 0; i < 100; i++) {
char buf[128] = {0};
KV kv = {.k1 = (100 - i) % 26, .k2 = i % 26};
kvSerial(&kv, buf);
rocksdb_writebatch_put_cf(wBatch, cfHandle[1], buf, sizeof(kv), "value", strlen("value"));
}
rocksdb_write(db, wOpt, wBatch, &err);
}
{
{
char buf[128] = {0};
KV kv = {.k1 = 0, .k2 = 0};
kvSerial(&kv, buf);
char *v = rocksdb_get_cf(db, rOpt, cfHandle[1], buf, sizeof(kv), &vlen, &err);
printf("Get value %s, and len = %d, xxxx\n", v, (int)vlen);
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
rocksdb_iter_seek_to_first(iter);
int i = 0;
while (rocksdb_iter_valid(iter)) {
size_t klen, vlen;
const char *key = rocksdb_iter_key(iter, &klen);
const char *value = rocksdb_iter_value(iter, &vlen);
KV kv;
kvDeserial(&kv, (char *)key);
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
i++;
rocksdb_iter_next(iter);
}
rocksdb_iter_destroy(iter);
}
{
char buf[128] = {0};
KV kv = {.k1 = 0, .k2 = 0};
int len = kvSerial(&kv, buf);
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
rocksdb_iter_seek(iter, buf, len);
if (!rocksdb_iter_valid(iter)) {
printf("invalid iter");
}
{
char buf[128] = {0};
KV kv = {.k1 = 100, .k2 = 0};
int len = kvSerial(&kv, buf);
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
rocksdb_iter_seek(iter, buf, len);
if (!rocksdb_iter_valid(iter)) {
printf("invalid iter\n");
rocksdb_iter_seek_for_prev(iter, buf, len);
if (!rocksdb_iter_valid(iter)) {
printf("stay invalid iter\n");
} else {
size_t klen = 0, vlen = 0;
const char *key = rocksdb_iter_key(iter, &klen);
const char *value = rocksdb_iter_value(iter, &vlen);
KV kv;
kvDeserial(&kv, (char *)key);
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
}
}
}
}
}
// char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
// printf("Get value %s, and len = %d\n", v, (int)vlen);
rocksdb_column_family_handle_destroy(cfHandle[0]);
rocksdb_column_family_handle_destroy(cfHandle[1]);
rocksdb_close(db);
// {
// // rocksdb_options_t *Options = rocksdb_options_create();
// db = rocksdb_open(comm, path, &err);
// if (db != NULL) {
// rocksdb_options_t *cfo = rocksdb_options_create_copy(comm);
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
// rocksdb_options_set_comparator(cfo, cmp1);
// rocksdb_column_family_handle_t *handle = rocksdb_create_column_family(db, cfo, "cf1", &err);
// rocksdb_column_family_handle_destroy(handle);
// rocksdb_close(db);
// db = NULL;
// }
// }
// int ncf = 2;
// rocksdb_column_family_handle_t **pHandle = malloc(ncf * sizeof(rocksdb_column_family_handle_t *));
// {
// rocksdb_options_t *options = rocksdb_options_create_copy(comm);
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
// rocksdb_options_t *dbOpts1 = rocksdb_options_create_copy(comm);
// rocksdb_options_t *dbOpts2 = rocksdb_options_create_copy(comm);
// rocksdb_options_set_comparator(dbOpts2, cmp1);
// // rocksdb_column_family_handle_t *cf = rocksdb_create_column_family(db, dbOpts1, "cmp1", &err);
// const char *pName[] = {"default", "cf1"};
// const rocksdb_options_t **pOpts = malloc(ncf * sizeof(rocksdb_options_t *));
// pOpts[0] = dbOpts1;
// pOpts[1] = dbOpts2;
// rocksdb_options_t *allOptions = rocksdb_options_create_copy(comm);
// db = rocksdb_open_column_families(allOptions, "test", ncf, pName, pOpts, pHandle, &err);
// }
// // rocksdb_options_t *options = rocksdb_options_create();
// // rocksdb_options_set_create_if_missing(options, 1);
// // //rocksdb_open_column_families(const rocksdb_options_t *options, const char *name, int num_column_families,
// // const char *const *column_family_names,
// // const rocksdb_options_t *const *column_family_options,
// // rocksdb_column_family_handle_t **column_family_handles, char **errptr);
// for (int i = 0; i < 100; i++) {
// char buf[128] = {0};
// rocksdb_writeoptions_t *wopt = rocksdb_writeoptions_create();
// KV kv = {.k1 = i, .k2 = i};
// kvSerial(&kv, buf);
// rocksdb_put_cf(db, wopt, pHandle[0], buf, strlen(buf), (const char *)&i, sizeof(i), &err);
// }
// rocksdb_close(db);
// Write
// rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
// rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
//// Read
// rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
// size_t vallen = 0;
// char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
// printf("val:%s\n", val);
//// Update
//// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
//// Delete
// rocksdb_delete(db, writeoptions, "key", 3, &err);
//// Read again
// val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
// printf("val:%s\n", val);
// rocksdb_close(db);
return 0;
}
}

View File

@ -4,7 +4,7 @@ if(${BUILD_DOCS})
find_package(Doxygen)
if (DOXYGEN_FOUND)
# Build the doc
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/Doxyfile.in)
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/doxgen/Doxyfile.in)
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)

View File

@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
slug: /
---
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. Its written mainly for architects, developers, and system administrators.
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.

View File

@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
## Competitive Advantages
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengines core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
| Very large total processing capacity | | | √ | TDengines cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengines storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
## Comparison with other databases
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
## More readings
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)

View File

@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and wont build the index on any metrics stored. Column wise storage is used.
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.

View File

@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
This document describes how to install TDengine in a Docker container and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).

View File

@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
@ -102,7 +102,7 @@ sudo apt-get install tdengine
:::tip
This installation method is supported only for Debian and Ubuntu.
::::
:::
</TabItem>
<TabItem label="Windows" value="windows">
@ -208,6 +208,8 @@ The following `launchctl` commands can help you manage TDengine service:
- Check TDengine Server status: `sudo launchctl list | grep taosd`
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
:::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
- The administrator privilege is required for service management to enhance security.

View File

@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
```
More configuration about connectionplease refer to [Java Connector](/reference/connector/java)
More configuration about connection, please refer to [Java Connector](/reference/connector/java)

View File

@ -1,3 +1,3 @@
```php title="原生连接"
```php title=""native"
{{#include docs/examples/php/connect.php}}
```

View File

@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
Key differences
Key differences:
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```
@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
<TabItem label="R" value="r">
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
2. Install the dependency package `RJDBC`
2. Install the dependency package `RJDBC`:
```R
install.packages("RJDBC")
@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
</TabItem>
<TabItem label="PHP" value="php">
**Download Source Code Package and Unzip**
**Download Source Code Package and Unzip: **
```shell
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment**
**Non-Swoole Environment: **
```shell
phpize && ./configure && make -j && make install
```
**Specify TDengine Location**
**Specify TDengine Location: **
```shell
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
> `--with-tdengine-dir=` is followed by the TDengine installation location.
> This way is useful in case TDengine location can't be found automatically or macOS.
**Swoole Environment**
**Swoole Environment: **
```shell
phpize && ./configure --enable-swoole && make -j && make install
@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
</Tabs>
:::tip
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
:::

View File

@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
## Query Examples
If you want query the data of `location=California.LosAngeles,groupid=2`here is the query SQL:
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
```sql
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;

View File

@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
## Query Examples
If you want query the data of `location=California.LosAngeles groupid=3`here is the query SQL:
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;

View File

@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
## Query Examples
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}here is the query SQL:
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;

View File

@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
## Sample Programs
@ -98,7 +98,7 @@ The main Program is responsible for:
3. Start reading threads
4. Output writing speed every 10 seconds
The main program provides 4 parameters for tuning
The main program provides 4 parameters for tuning:
1. The number of reading threads, default value is 1
2. The number of writing threads, default value is 2
@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
If you want to launch the sample program on a remote server, please follow below steps:
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
```
mvn package
```
@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
pip3 install faster-fifo
```
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py``sql_writer.py` and `mockdatasource.py`.
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
4. Execute the program

View File

@ -1,4 +1,4 @@
### python Kafka 客户端
### python Kafka client
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
<details>
<summary>kafka_example_consumer</summary>
`kafka_example_consumer` is `consumer`which is responsible for consuming data from kafka and writing it to TDengine.
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
```py
{{#include docs/examples/python/kafka_example_consumer.py}}

View File

@ -0,0 +1,3 @@
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```

View File

@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
## Introduction
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
- Query on single column or multiple columns
- Filter on tags or data columns>, <, =, <\>, like
- Filter on tags or data columns: >, <, =, <\>, like
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
- Arithmetic on columns of numeric types or aggregate results
@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
:::note
1. With either REST connection or native connection, the above sample code works well.
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
:::

View File

@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
## Data Schema and API
@ -222,7 +222,7 @@ A database including one supertable and two subtables is created as follows:
```sql
DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb;
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
@ -285,16 +285,15 @@ You configure the following parameters when creating a consumer:
| Parameter | Type | Description | Remarks |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.ip` | string | IP address of the server side | |
| `td.connect.user` | string | User Name | |
| `td.connect.pass` | string | Password | |
| `td.connect.port` | string | Port of the server side | |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
| `client.id` | string | Client ID | Maximum length: 192. |
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
The method of specifying these parameters depends on the language used:
@ -312,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@ -327,6 +325,7 @@ Java programs use the following parameters:
| Parameter | Type | Description | Remarks |
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" |
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
@ -368,7 +367,6 @@ conf := &tmq.ConfigMap{
"td.connect.port": "6030",
"client.id": "test_tmq_c",
"enable.auto.commit": "false",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
}
consumer, err := NewConsumer(conf)
@ -402,23 +400,6 @@ from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
Python programs use the following parameters:
| Parameter | Type | Description | Remarks |
|:---------:|:----:|:-----------:|:-------:|
| `td.connect.ip` | string | Used in establishing a connection||
| `td.connect.user` | string | Used in establishing a connection||
| `td.connect.pass` | string | Used in establishing a connection||
| `td.connect.port` | string | Used in establishing a connection||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
| `client.id` | string | Client ID | Maximum length: 192 |
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
</TabItem>
<TabItem label="Node.JS" value="Node.JS">

View File

@ -6,10 +6,12 @@ description: This document describes how to create user-defined functions (UDF),
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
TDengine supports user-defined functions written in C or C++. This document describes the usage of user-defined functions.
User-defined functions can be scalar functions or aggregate functions. Scalar functions, such as `abs`, `sin`, and `concat`, output a value for every row of data. Aggregate functions, such as `avg` and `max` output one value for multiple rows of data.
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
## Implement a UDF in C
When you create a user-defined function, you must implement standard interface functions:
- For scalar functions, implement the `scalarfn` interface function.
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
@ -17,7 +19,7 @@ When you create a user-defined function, you must implement standard interface f
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
## Implementing a Scalar Function
### Implementing a Scalar Function in C
The implementation of a scalar function is described as follows:
```c
#include "taos.h"
@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
```
Replace `scalarfn` with the name of your function.
## Implementing an Aggregate Function
### Implementing an Aggregate Function in C
The implementation of an aggregate function is described as follows:
```c
@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
```
Replace `aggfn` with the name of your function.
## Interface Functions
### UDF Interface Definition in C
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
@ -108,17 +110,16 @@ Interface functions return a value that indicates whether the operation was succ
For information about the parameters for interface functions, see Data Model
### Interfaces for Scalar Functions
#### Scalar Interface
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
The parameters in the function are defined as follows:
- inputDataBlock: The data block to input.
- resultColumn: The column to output. The column to output.
- resultColumn: The column to output. The column to output.
### Interfaces for Aggregate Functions
#### Aggregate Interface
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
@ -126,7 +127,7 @@ The parameters in the function are defined as follows:
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and aggfn is called on each block to update the result. Finally, aggfn_finish is called to generate final results from the intermediate results. The final result contains only one or zero data points.
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and the `aggfn` function is called on each block to update the result. Finally, aggfn_finish is called to generate the final results from the intermediate results. The final result contains only one or zero data points.
The parameters in the function are defined as follows:
- interBuf: The intermediate result buffer.
@ -135,15 +136,15 @@ The parameters in the function are defined as follows:
- result: The final result.
### Initializing and Terminating User-Defined Functions
#### Initialization and Cleanup Interface
`int32_t udf_init()`
`int32_t udf_destroy()`
Replace `udf`with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
Replace `udf` with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
## Data Structure of User-Defined Functions
### Data Structures for UDF in C
```c
typedef struct SUdfColumnMeta {
int16_t type;
@ -193,17 +194,17 @@ typedef struct SUdfInterBuf {
```
The data structure is described as follows:
- The SUdfDataBlock block includes the number of rows (numOfRows) and number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
- SUdfInterBuf defines the intermediate structure `buffer` and the number of results in the buffer `numOfResult`.
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
## Compile UDF
### Compiling C UDF
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
To use your user-defined function in TDengine, first, compile it to a shared library.
For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
@ -213,12 +214,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
## Manage and Use User-Defined Functions
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
### UDF Sample Code in C
## Sample Code
### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
#### Scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
The bit_and function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_and function ignores null values.
@ -231,7 +229,7 @@ The bit_and function implements bitwise addition for multiple columns. If there
</details>
### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
#### Aggregate function 1: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
@ -243,3 +241,650 @@ The l2norm function finds the second-order norm for all data in the input column
```
</details>
#### Aggregate function 2: [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
The max_vol function returns a string concatenating the deviceId column, the row number and column number of the maximum voltage and the maximum voltage given several voltage columns as input.
Create Table:
```bash
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
```
Create the UDF:
```bash
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
Use the UDF in the query:
```bash
select max_vol(vol1,vol2,vol3,deviceid) from battery;
```
<details>
<summary>max_vol.c</summary>
```c
{{#include tests/script/sh/max_vol.c}}
```
</details>
## Implement a UDF in Python
### Prepare Environment
1. Prepare Python Environment
Please follow standard procedure of python environment preparation.
2. Install Python package `taospyudf`
```shell
pip3 install taospyudf
```
During this process, some C++ code needs to be compiled. So it's required to have `cmake` and `gcc` on your system. The compiled `libtaospyudf.so` will be automatically copied to `/usr/local/lib` path. If you are not root user, please use `sudo`. After installation is done, please check using the command below.
```shell
root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
```
Then execute the command below.
```shell
ldconfig
```
3. If you want to utilize some 3rd party python packages in your Python UDF, please set configuration parameter `UdfdLdLibPath` to the value of `PYTHONPATH` before starting `taosd`.
4. Launch `taosd` service
Please refer to [Get Started](../../get-started)
### Interface definition
#### Introduction to Interface
Implement the specified interface functions when implementing a UDF in Python.
- implement `process` function for the scalar UDF.
- implement `start`, `reduce`, `finish` for the aggregate UDF.
- implement `init` for initialization and `destroy` for termination.
#### Scalar UDF Interface
The implementation of a scalar UDF is described as follows:
```Python
def process(input: datablock) -> tuple[output_type]:
```
Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
#### Aggregate UDF Interface
The implementation of an aggregate function is described as follows:
```Python
def start() -> bytes:
def reduce(inputs: datablock, buf: bytes) -> bytes
def finish(buf: bytes) -> output_type:
```
Description: first the start() is invoked to generate the initial result `buffer`; then the input data is divided into multiple row blocks, and reduce() is invoked for each block `inputs` and current intermediate result `buf`; finally finish() is invoked to generate the final result from intermediate `buf`, the final result can only contains 0 or 1 data.
#### Initialization and Cleanup Interface
```python
def init()
def destroy()
```
Description: init() does the work of initialization before processing any data; destroy() does the work of cleanup after the data is processed.
### Python UDF Template
#### Scalar Template
```Python
def init():
# initialization
def destroy():
# destroy
def process(input: datablock) -> tuple[output_type]:
# process input datablock,
# datablock.data(row, col) is to access the python object in location(row,col)
# return tuple object consisted of object of type outputtype
```
Noteprocess() must be implemeted, init() and destroy() must be defined too but they can do nothing.
#### Aggregate Template
```Python
def init():
#initialization
def destroy():
#destroy
def start() -> bytes:
#return serialize(init_state)
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
# reduce the inputs and state into new_state.
# use inputs.data(i,j) to access python object of location(i,j)
# serialize new_state into new_state_bytes
return new_state_bytes
def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
### Data Mapping between TDengine SQL and Python UDF
The following table describes the mapping between TDengine SQL data type and Python UDF Data Type. The `NULL` value of all TDengine SQL types is mapped to the `None` value in Python.
| **TDengine SQL Data Type** | **Python Data Type** |
| :-----------------------: | ------------ |
|TINYINT / SMALLINT / INT / BIGINT | int |
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|FLOAT / DOUBLE | float |
|BOOL | bool |
|BINARY / VARCHAR / NCHAR | bytes|
|TIMESTAMP | int |
|JSON and other types | Not Supported |
### Development Guide
In this section we will demonstrate 5 examples of developing UDF in Python language. In this guide, you will learn the development skills from easy case to hard case, the examples include:
1. A scalar function which accepts only one integer as input and outputs ln(n^2 + 1)。
2. A scalar function which accepts n integers, likex1, x2, ..., xnand output the sum of the product of each input and its sequence number, i.e. x1 + 2 * x2 + ... + n * xn。
3. A scalar function which accepts a timestamp and output the next closest Sunday of the timestamp. In this case, we will demonstrate how to use 3rd party library `moment`.
4. An aggregate function which calculates the difference between the maximum and the minimum of a specific column, i.e. same functionality of built-in spread().
In the guide, some debugging skills of using Python UDF will be explained too.
We assume you are using Linux system and already have TDengine 3.0.4.0+ and Python 3.x.
Note:**You can't use print() function to output log inside a UDF, you have to write the log to a specific file or use logging module of Python.**
#### Sample 1: Simplest UDF
This scalar UDF accepts an integer as input and output ln(n^2 + 1).
Firstly, please compose a Python source code file in your system and save it, e.g. `/root/udf/myfun.py`, the code is like below.
```python
from math import log
def init():
pass
def destroy():
pass
def process(block):
rows, _ = block.shape()
return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
```
This program consists of 3 functions, init() and destroy() do nothing, but they have to be defined even though there is nothing to do in them because they are critical parts of a python UDF. The most important function is process(), which accepts a data block and the data block object has two methods:
1. shape() returns the number of rows and the number of columns of the data block
2. data(i, j) returns the value at (i,j) in the block
The output of the process() function of a scalar UDF returns exactly same number of data as the number of input rows. We will ignore the number of columns because we just want to compute on the first column.
Then, we create the UDF using the SQL command below.
```sql
create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
```
Here is the output example, it may change a little depending on your version being used.
```shell
taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
Create OK, 0 row(s) affected (0.005202s)
```
Then, we used the `show` command to prove the creation of the UDF is successful.
```text
taos> show functions;
name |
=================================
myfun |
Query OK, 1 row(s) in set (0.005767s)
```
Next, we can try to test the function. Before executing the UDF, we need to prepare some data using the command below in TDengine CLI.
```sql
create database test;
create table t(ts timestamp, v1 int, v2 int, v3 int);
insert into t values('2023-05-01 12:13:14', 1, 2, 3);
insert into t values('2023-05-03 08:09:10', 2, 3, 4);
insert into t values('2023-05-10 07:06:05', 3, 4, 5);
```
Execute the UDF to test it:
```sql
taos> select myfun(v1, v2) from t;
DB error: udf function execution failure (0.011088s)
```
Unfortunately, the UDF execution failed. We need to check the log `udfd` daemon to find out why.
```shell
tail -10 /var/log/taos/udfd.log
```
Below is the output.
```text
05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
```
From the error message we can find out that `libtaospyudf.so` was not loaded successfully. Please refer to the [Prepare Environment] section.
After correcting environment issues, execute the UDF:
```sql
taos> select myfun(v1) from t;
myfun(v1) |
============================
0.693147181 |
1.609437912 |
2.302585093 |
```
Now, we have finished the first PDF in Python, and learned some basic debugging skills.
#### Sample 2: Abnormal Processing
The `myfun` UDF example in sample 1 has passed, but it has two drawbacks.
1. It the program accepts only one column of data as input, but it doesn't throw exception if you passes multiple columns.
```sql
taos> select myfun(v1, v2) from t;
myfun(v1, v2) |
============================
0.693147181 |
1.609437912 |
2.302585093 |
```
2. `null` value is not processed. We expect the program to throw exception and terminate if `null` is passed as input.
So, we try to optimize the process() function as below.
```python
def process(block):
rows, cols = block.shape()
if cols > 1:
raise Exception(f"require 1 parameter but given {cols}")
return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
```
The update the UDF with command below.
```sql
create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
```
At this time, if we pass two arguments to `myfun`, the execution would fail.
```sql
taos> select myfun(v1, v2) from t;
DB error: udf function execution failure (0.014643s)
```
However, the exception is not shown to end user, but displayed in the log file `/var/log/taos/taospyudf.log`
```text
2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
At:
/var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
```
Now, we have learned how to update a UDF and check the log of a UDF.
Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart `taosd` service. After 3.0.5.0, restarting is not required.
#### Sample 3: UDF with n arguments
A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
```python
def init():
pass
def destroy():
pass
def process(block):
rows, cols = block.shape()
result = []
for i in range(rows):
total = 0
for j in range(cols):
v = block.data(i, j)
if v is None:
total = None
break
total += (j + 1) * block.data(i, j)
result.append(total)
return result
```
Crate and test the UDF:
```sql
create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
```
```sql
taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
Insert OK, 1 row(s) affected (0.003675s)
taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
================================================================================================
2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
Query OK, 4 row(s) in set (0.010653s)
```
#### Sample 4: Utilize 3rd party package
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty.
```shell
pip3 install moment
```
Then compose the Python code in /root/udf/nextsunday.py
```python
import moment
def init():
pass
def destroy():
pass
def process(block):
rows, cols = block.shape()
if cols > 1:
raise Exception("require only 1 parameter")
if not type(block.data(0, 0)) is int:
raise Exception("type error")
return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
for i in range(rows)]
```
UDF framework will map the TDengine timestamp to Python int type, so this function only accepts an integer representing millisecond. process() firstly validates the parameters, then use `moment` to replace the time, format the result and output.
Create and test the UDF.
```sql
create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
```
If your `taosd` is started using `systemd`, you may encounter the error below. Next we will show how to debug.
```sql
taos> select ts, nextsunday(ts) from t;
DB error: udf function execution failure (1.123615s)
```
```shell
tail -20 taospyudf.log
2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
```
This is because `moment` doesn't exist in the default library search path of python UDF, please check the log file `taosdpyudf.log`.
```shell
grep 'sys path' taospyudf.log | tail -1
```
```text
2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
```
You may find that the default library search path is `/lib/python3/dist-packages` (just for example, it may be different in your system), but `moment` is installed to `/usr/local/lib/python3.8/dist-packages` (for example, it may be different in your system). Then we change the library search path of python UDF.
Check `sys.path`, which must include the packages you install with pip3 command previously, as shown below:
```python
>>> import sys
>>> ":".join(sys.path)
'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
```
Copy the output and edit /var/taos/taos.cfg to add below configuration parameter.
```shell
UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
```
Save it, then restart `taosd`, using `systemctl restart taosd`, and test again, it will succeed this time.
Note: If your cluster consists of multiple `taosd` instances, you have to repeat same process for each of them.
```sql
taos> select ts, nextsunday(ts) from t;
ts | nextsunday(ts) |
===========================================
2023-05-01 12:13:14.000 | 2023-05-07 |
2023-05-03 08:09:10.000 | 2023-05-07 |
2023-05-10 07:06:05.000 | 2023-05-14 |
2023-05-25 09:09:15.000 | 2023-05-28 |
Query OK, 4 row(s) in set (1.011474s)
```
#### Sample 5: Aggregate Function
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`.
```python
import io
import math
import pickle
LOG_FILE: io.TextIOBase = None
def init():
global LOG_FILE
LOG_FILE = open("/var/log/taos/spread.log", "wt")
log("init function myspead success")
def log(o):
LOG_FILE.write(str(o) + '\n')
def destroy():
log("close log file: spread.log")
LOG_FILE.close()
def start():
return pickle.dumps((-math.inf, math.inf))
def reduce(block, buf):
max_number, min_number = pickle.loads(buf)
log(f"initial max_number={max_number}, min_number={min_number}")
rows, _ = block.shape()
for i in range(rows):
v = block.data(i, 0)
if v > max_number:
log(f"max_number={v}")
max_number = v
if v < min_number:
log(f"min_number={v}")
min_number = v
return pickle.dumps((max_number, min_number))
def finish(buf):
max_number, min_number = pickle.loads(buf)
return max_number - min_number
```
In this example, we implemented an aggregate function, and added some logging.
1. init() opens a file for logging
2. log() is the function for logging, it converts the input object to string and output with an end of line
3. destroy() closes the log file \
4. start() returns the initial buffer for storing the intermediate result
5. reduce() processes each daa block and aggregates the result
6. finish() converts the final buffer() to final result\
Create the UDF.
```sql
create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
```
This SQL command has two important different points from the command creating scalar UDF.
1. keyword `aggregate` is used
2. keyword `bufsize` is used to specify the memory size for storing the intermediate result. In this example, the result is 32 bytes, but we specified 128 bytes for `bufsize`. You can use the `python` CLI to print actual size.
```python
>>> len(pickle.dumps((12345.6789, 23456789.9877)))
32
```
Test this function, you can see the result is same as built-in spread() function. \
```sql
taos> select myspread(v1) from t;
myspread(v1) |
============================
5.000000000 |
Query OK, 1 row(s) in set (0.013486s)
taos> select spread(v1) from t;
spread(v1) |
============================
5.000000000 |
Query OK, 1 row(s) in set (0.005501s)
```
At last, check the log file, we can see that the reduce() function is executed 3 times, max value is updated 3 times and min value is updated only one time.
```shell
root@slave11 /var/log/taos $ cat spread.log
init function myspead success
initial max_number=-inf, min_number=inf
max_number=1
min_number=1
initial max_number=1, min_number=1
max_number=2
max_number=3
initial max_number=3, min_number=1
max_number=6
close log file: spread.log
```
### SQL Commands
1. Create Scalar UDF
```sql
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
```
2. Create Aggregate UDF
```sql
CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
```
3. Update Scalar UDF
```sql
CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
```
4. Update Aggregate UDF
```sql
CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
```
Note: If keyword `AGGREGATE` used, the UDF will be treated as aggregate UDF despite what it was before; Similarly, if there is no keyword `aggregate`, the UDF will be treated as scalar function despite what it was before.
5. Show the UDF
The version of a UDF is increased by one every time it's updated.
```sql
select * from ins_functions \G;
```
6. Show and Drop existing UDF
```sql
SHOW functions;
DROP FUNCTION function_name;
```
### More Python UDF Samples
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
<details>
<summary>pybitand.py</summary>
```Python
{{#include tests/script/sh/pybitand.py}}
```
</details>
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
The `pyl2norm` function finds the second-order norm for all data in the input columns. This squares the values, takes a cumulative sum, and finds the square root.
<details>
<summary>pyl2norm.py</summary>
```c
{{#include tests/script/sh/pyl2norm.py}}
```
</details>
#### Aggregate Function [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
The `pycumsum` function finds the cumulative sum for all data in the input columns.
<details>
<summary>pycumsum.py</summary>
```c
{{#include tests/script/sh/pycumsum.py}}
```
</details>
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).

View File

@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
- Internal function `NOW` can be used to get the current timestamp on the client side.
- The current timestamp of the client side is applied when `NOW` is used to insert data.
- Epoch Timetimestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
- Epoch Time: timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
@ -24,25 +24,24 @@ CREATE DATABASE db_name PRECISION 'ns';
In TDengine, the data types below can be used when specifying a column or tag.
| # | **type** | **Bytes** | **Description** |
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
| 16 | VARCHAR | User-defined | Alias of BINARY |
| # | **type** | **Bytes** | **Description** |
| --- | :---------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
| 10 | SMALLINT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
| 16 | VARCHAR | User-defined | Alias of BINARY |
:::note
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.

View File

@ -72,8 +72,8 @@ database_option: {
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
- TABLE_PREFIXThe prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIXThe suffix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.

View File

@ -33,7 +33,7 @@ column_definition:
SHOW STABLES [LIKE tb_name_wildcard];
```
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
The preceding SQL statement shows all supertables in the current TDengine database.
### View the CREATE Statement for a Supertable

View File

@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
```sql
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31;
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
## Automatically Create Table When Inserting

View File

@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...
@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
```
For sub-table and super table
For sub-table and super table:
```sql
SELECT *

View File

@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
## Introduction
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
## Syntax
1. The syntax of creating an index
```sql
CREATE INDEX index_name ON tbl_name (tagColName
CREATE INDEX index_name ON tbl_name (tagColName)
```
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
@ -48,4 +48,4 @@ You can also add filter conditions to limit the results.
6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.

View File

@ -5,9 +5,9 @@ description: This document describes the standard SQL functions available in TDe
toc_max_heading_level: 4
---
## Single Row Functions
## Scalar Functions
Single row functions return a result for each row.
Scalar functions return one result for each row.
### Mathematical Functions
@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
**More explanations**:
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm] For example, TO_ISO8601(1, "+00:00").
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
@ -459,12 +459,17 @@ TO_JSON(str_literal)
#### TO_UNIXTIMESTAMP
```sql
TO_UNIXTIMESTAMP(expr)
TO_UNIXTIMESTAMP(expr [, return_timestamp])
return_timestamp: {
0
| 1
}
```
**Description**: UNIX timestamp converted from a string of date/time format
**Return value type**: BIGINT
**Return value type**: BIGINT, TIMESTAMP
**Applicable column types**: VARCHAR and NCHAR
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted
- The precision of the returned timestamp is same as the precision set for the current data base in use
- return_timestamp indicates whether the returned value type is TIMESTAMP or not. If this parameter set to 1, function will return TIMESTAMP type. Otherwise function will return BIGINT type. If parameter is omitted, default return value type is BIGINT.
### Time and Date Functions
@ -620,7 +626,7 @@ algo_type: {
**Applicable table types**: standard tables and supertables
**Explanations**
**Explanations**:
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
@ -666,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit])
```
**Description**`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL;
@ -674,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
**Applicable tables**: table, STable, outer in nested query
**Explanations**
**Explanations**:
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
@ -752,7 +758,7 @@ SUM(expr)
HYPERLOGLOG(expr)
```
**Description**
**Description**:
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
@ -766,10 +772,10 @@ HYPERLOGLOG(expr)
### HISTOGRAM
```sql
HISTOGRAM(exprbin_type, bin_description, normalized)
HISTOGRAM(expr, bin_type, bin_description, normalized)
```
**Description**Returns count of data points in user-specified ranges.
**Description**: Returns count of data points in user-specified ranges.
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
@ -777,18 +783,18 @@ HISTOGRAM(exprbin_type, bin_description, normalized)
**Applicable table types**: table, STable
**Explanations**
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"
- bin_description: parameter to describe how to generate bucketscan be in the following JSON formats for each bin_type respectively:
**Explanations**:
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
- "user_input": "[1, 3, 5, 7]":
User specified bin values.
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add-inf, infas start/end point in generated set of bins.
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add-inf, infas start/end point in generated range of bins.
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
@ -861,10 +867,16 @@ FIRST(expr)
### INTERP
```sql
INTERP(expr)
INTERP(expr [, ignore_null_values])
ignore_null_values: {
0
| 1
}
```
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0.
**Return value type**: Same as the column being operated upon
@ -880,7 +892,7 @@ INTERP(expr)
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
@ -996,8 +1008,7 @@ SAMPLE(expr, k)
**More explanations**:
This function cannot be used in expression calculation.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
- This function cannot be used in expression calculation.
### TAIL
@ -1076,7 +1087,6 @@ CSUM(expr)
- Arithmetic operation can't be performed on the result of `csum` function
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### DERIVATIVE
@ -1100,8 +1110,7 @@ ignore_negative: {
**More explanation**:
- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
### DIFF
@ -1125,7 +1134,7 @@ ignore_negative: {
**More explanation**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
- It can be used together with a selected column. For example: select \_rowts, DIFF() from
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
### IRATE
@ -1163,7 +1172,6 @@ MAVG(expr, k)
- Arithmetic operation can't be performed on the result of `MAVG`.
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### STATECOUNT
@ -1177,7 +1185,7 @@ STATECOUNT(expr, oper, val)
**Applicable parameter values**:
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val Numeric types
- val: Numeric types
**Return value type**: Integer
@ -1189,7 +1197,6 @@ STATECOUNT(expr, oper, val)
**More explanations**:
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
@ -1204,7 +1211,7 @@ STATEDURATION(expr, oper, val, unit)
**Applicable parameter values**:
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val Numeric types
- val: Numeric types
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
**Return value type**: Integer
@ -1217,7 +1224,6 @@ STATEDURATION(expr, oper, val, unit)
**More explanations**:
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
@ -1235,7 +1241,6 @@ TWA(expr)
**Applicable table types**: standard tables and supertables
- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
## System Information Functions

View File

@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
1. NONE: No fill (the default fill mode)
2. VALUEFill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
3. PREVFill with the previous non-NULL value, `FILL(PREV)`
4. NULLFill with NULL, `FILL(NULL)`
5. LINEARFill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXTFill with the next non-NULL value, `FILL(NEXT)`
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
4. NULL: Fill with NULL, `FILL(NULL)`
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
1. NULL_F: Fill `NULL` by force
2. VALUE_F: Fill `VALUE` by force
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
There are two kinds of time windows: sliding window and flip time/tumbling window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] [t1s , t1e] [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
![TDengine Database Time Window](./timewindow-1.webp)
@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
### State Window
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:072019-04-28 14:22:10] and [2019-04-28 14:22:112019-04-28 14:22:12].
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
![TDengine Database Status Window](./timewindow-3.webp)
@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
### Session Window
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:102019-04-28 14:22:30] and [2019-04-28 14:23:102019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
![TDengine Database Session Window](./timewindow-2.webp)
@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
### Examples
A table of intelligent meters can be created by the SQL statement below
A table of intelligent meters can be created by the SQL statement below:
```
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);

View File

@ -13,8 +13,11 @@ Because stream processing is built in to TDengine, you are no longer reliant on
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
}
```
@ -109,7 +112,7 @@ SHOW STREAMS;
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggeringthe default value is AT_ONCE:
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
1. AT_ONCE: triggers on write
@ -141,3 +144,27 @@ The data in expired windows is tagged as expired. TDengine stream processing pro
2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned.
In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated).
## Supported functions
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
- [top](../function/#top)
- [bottom](../function/#bottom)
- [elapsed](../function/#elapsed)
- [interp](../function/#interp)
- [derivative](../function/#derivative)
- [irate](../function/#irate)
- [twa](../function/#twa)
- [histogram](../function/#histogram)
- [diff](../function/#diff)
- [statecount](../function/#statecount)
- [stateduration](../function/#stateduration)
- [csum](../function/#csum)
- [mavg](../function/#mavg)
- [sample](../function/#sample)
- [tail](../function/#tail)
- [unique](../function/#unique)
- [mode](../function/#mode)

View File

@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
- JSON format
- JSON format:
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.

View File

@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
1. If there are escape characters in identifiers (database name, table name, column name)
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
- Identifier quoted with `` Original content is kept, no escaping
- Identifier quoted with ``: Original content is kept, no escaping
2. If there are escape characters in values
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".

View File

@ -120,6 +120,9 @@ Provides information about user-defined functions.
| 5 | create_time | TIMESTAMP | Creation time |
| 6 | code_len | INT | Length of the source code |
| 7 | bufsize | INT | Buffer size |
| 8 | func_language | BINARY(31) | UDF programming language |
| 9 | func_body | BINARY(16384) | UDF function body |
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated|
## INS_INDEXES
@ -181,7 +184,7 @@ Provides information about standard tables and subtables.
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |

View File

@ -69,7 +69,7 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 1 | consumer_id | BIGINT | Consumer ID |
| 2 | consumer_group | BINARY(192) | Consumer group |
| 3 | client_id | BINARY(192) | Client ID (user-defined) |
| 4 | status | BINARY(20) | Consumer status |
| 4 | status | BINARY(20) | Consumer status. All possible status include: ready(consumer is in normal state), lost(the connection between consumer and mnode is broken), rebalance(the redistribution of vgroups that belongs to current consumer is now in progress), unknown(consumer is in invalid state)
| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. |
| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server |
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |

View File

@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
description: This document describes how to use the SHOW statement in TDengine.
---
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
## SHOW APPS
@ -129,6 +129,14 @@ SHOW QNODES;
Shows information about qnodes in the system.
## SHOW QUERIES
```sql
SHOW QUERIES;
```
Shows the queries in progress in the system.
## SHOW SCORES
```sql
@ -179,10 +187,10 @@ SHOW TABLE DISTRIBUTED table_name;
Shows how table data is distributed.
Examples Below is an example of this command to display the block distribution of table `d0` in detailed format.
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
```sql
show table distributed d0\G;
show table distributed d0\G;
```
<details>
@ -193,31 +201,31 @@ _block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Comp
Total_Blocks : Table `d0` contains total 5 blocks
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Average_size: The average size of each block is 18.73 KB
Compression_Ratio: The data compression rate is 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: Table `d0` contains 20,000 rows
Inmem_Rows The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
MinRows The minimum number of rows in a block is 3,616
MinRows: The minimum number of rows in a block is 3,616
MaxRows The maximum number of rows in a block is 4,096B
MaxRows: The maximum number of rows in a block is 4,096B
Average_Rows The average number of rows in a block is 4,000
Average_Rows: The average number of rows in a block is 4,000
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: The number of child tables, 1 in this example
Total_Tables: The number of child tables, 1 in this example
Total_Files The number of files storing the table's data, 2 in this example
Total_Files: The number of files storing the table's data, 2 in this example
*************************** 4.row ***************************
@ -353,7 +361,7 @@ SHOW VARIABLES;
SHOW DNODE dnode_id VARIABLES;
```
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
## SHOW VGROUPS
@ -361,7 +369,7 @@ Shows the working configuration of the parameters that must be the same on each
SHOW [db_name.]VGROUPS;
```
Shows information about all vgroups in the current database.
Shows information about all vgroups in the current database.
## SHOW VNODES

View File

@ -7,17 +7,18 @@ description: This document describes the SQL statements related to user-defined
You can create user-defined functions and import them into TDengine.
## Create UDF
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF is stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition.
- Create Scalar Function
```sql
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
```
- function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file).
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The scalar function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The data type of the results of the UDF.
For example, the following SQL statement can be used to create a UDF from `libbitand.so`.
@ -25,14 +26,20 @@ CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
```sql
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
```
For Example, the following SQL statement can be used to modify the existing function `bit_and`. The OUTPUT type is changed to BIGINT and the programming language is changed to Python.
```sql
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
```
- Create Aggregate Function
```sql
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
```
- function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file).
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The aggregate function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of the UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The output data type, the value is the literal string of the supported TDengine data type.
- buffer_size: The size of the intermediate buffer in bytes. This parameter is optional.
@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
```
For example, the following SQL statement modifies the buffer size of existing UDF `l2norm` to 64
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
## Manage UDF
@ -61,9 +73,9 @@ SHOW FUNCTIONS;
## Call UDF
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
The function name specified when creating UDF can be used directly in SQL statements, just like built-in functions. For example:
```sql
SELECT bit_and(c1,c2) FROM table;
```
The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
The above SQL statement invokes function X for columns c1 and c2 on the table. You can use query keywords like WHERE with user-defined functions.

View File

@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consistency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>

View File

@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
- Information that you input is given in lowercase.
- \[ \] means optional input, excluding [] itself.
- | means one of a few options, excluding | itself.
- means the item prior to it can be repeated multiple times.
- ... means the item prior to it can be repeated multiple times.
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:

View File

@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
chmod +x TDinsight.sh
```
Prepare
Prepare:
1. TDengine Server
- The URL of REST servicefor example `http://localhost:6041` if TDengine is deployed locally
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
- User name and password
2. Grafana Alert Notification
@ -36,9 +36,310 @@ You can use below command to setup Grafana alert notification.
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
```bash
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
```
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
## log database
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
### cluster\_info table
`cluster_info` table contains cluster information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|first\_ep|VARCHAR||first ep of cluster|
|first\_ep\_dnode\_id|INT||dnode id or first\_ep|
|version|VARCHAR||tdengine version. such as: 3.0.4.0|
|master\_uptime|FLOAT||days of master's uptime|
|monitor\_interval|INT||monitor interval in second|
|dbs\_total|INT||total number of databases in cluster|
|tbs\_total|BIGINT||total number of tables in cluster|
|stbs\_total|INT||total number of stables in cluster|
|dnodes\_total|INT||total number of dnodes in cluster|
|dnodes\_alive|INT||total number of dnodes in ready state|
|mnodes\_total|INT||total number of mnodes in cluster|
|mnodes\_alive|INT||total number of mnodes in ready state|
|vgroups\_total|INT||total number of vgroups in cluster|
|vgroups\_alive|INT||total number of vgroups in ready state|
|vnodes\_total|INT||total number of vnode in cluster|
|vnodes\_alive|INT||total number of vnode in ready state|
|connections\_total|INT||total number of connections to cluster|
|topics\_total|INT||total number of topics in cluster|
|streams\_total|INT||total number of streams in cluster|
|protocol|INT||protocol version|
|cluster\_id|NCHAR|TAG|cluster id|
### d\_info table
`d_info` table contains dnodes information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|status|VARCHAR||dnode status|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### m\_info table
`m_info` table contains mnode information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|role|VARCHAR||the role of mnode. leader or follower|
|mnode\_id|INT|TAG|master node id|
|mnode\_ep|NCHAR|TAG|master node endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### dnodes\_info table
`dnodes_info` table contains dnodes information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|cpu\_cores|FLOAT||cpu cores of server|
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|mem\_system|INT||available memory on the server|
|mem\_total|INT||total memory of server in `KB`|
|disk\_engine|INT|||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|req\_select|INT||number of select queries received per dnode|
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|req\_insert|INT||number of insert queries received per dnode|
|req\_insert\_success|INT||number of successfully insert queries received per dnode|
|req\_insert\_rate|FLOAT||number of insert queries received per dnode divided by monitor interval|
|req\_insert\_batch|INT||number of batch insertions|
|req\_insert\_batch\_success|INT||number of successful batch insertions|
|req\_insert\_batch\_rate|FLOAT||number of batch insertions divided by monitor interval|
|errors|INT||dnode errors|
|vnodes\_num|INT||number of vnodes per dnode|
|masters|INT||number of master vnodes|
|has\_mnode|INT||if the dnode has mnode|
|has\_qnode|INT||if the dnode has qnode|
|has\_snode|INT||if the dnode has snode|
|has\_bnode|INT||if the dnode has bnode|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### data\_dir table
`data_dir` table contains data directory information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data directory. default is `/var/lib/taos`|
|level|INT||level for multi-level storage|
|avail|BIGINT||available space for data directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_dir table
`log_dir` table contains log directory information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log directory. default is `/var/log/taos/`|
|avail|BIGINT||available space for log directory|
|used|BIGINT||used space for data directory|
|total|BIGINT||total space for data directory|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### temp\_dir table
`temp_dir` table contains temp dir information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp directory. default is `/tmp/`|
|avail|BIGINT||available space for temp directory|
|used|BIGINT||used space for temp directory|
|total|BIGINT||total space for temp directory|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vgroups\_info table
`vgroups_info` table contains vgroups information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vgroup\_id|INT||vgroup id|
|database\_name|VARCHAR||database for the vgroup|
|tables\_num|BIGINT||number of tables per vgroup|
|status|VARCHAR||status|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vnodes\_role table
`vnodes_role` table contains vnode role information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vnode\_role|VARCHAR||role. leader or follower|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### logs table
`logs` table contains login information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|level|VARCHAR||log level|
|content|NCHAR||log content|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_summary table
`log_summary` table contains log summary information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|error|INT||error count|
|info|INT||info count|
|debug|INT||debug count|
|trace|INT||trace count|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### grants\_info table
`grants_info` table contains grants information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|expire\_time|BIGINT||time until grants expire in seconds|
|timeseries\_used|BIGINT||timeseries used|
|timeseries\_total|BIGINT||total timeseries|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### keeper\_monitor table
`keeper_monitor` table contains keeper monitor information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|cpu|FLOAT||cpu usage|
|mem|FLOAT||memory usage|
|identify|NCHAR|TAG||
### taosadapter\_restful\_http\_request\_total table
`taosadapter_restful_http_request_total` table contains taosadapter rest request information record. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|gauge|DOUBLE||metric value|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_fail table
`taosadapter_restful_http_request_fail` table contains taosadapter failed rest request information record. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|gauge|DOUBLE||metric value|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_in\_flight table
`taosadapter_restful_http_request_in_flight` table contains taosadapter rest request information record in real time. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|gauge|DOUBLE||metric value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
`taosadapter_restful_http_request_summary_milliseconds` table contains the summary or rest information record. The timestamp column of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|count|DOUBLE|||
|sum|DOUBLE|||
|0.5|DOUBLE|||
|0.9|DOUBLE|||
|0.99|DOUBLE|||
|0.1|DOUBLE|||
|0.2|DOUBLE|||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
### taosadapter\_system\_mem\_percent table
`taosadapter_system_mem_percent` table contains taosadapter memory usage information. The timestamp of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|gauge|DOUBLE||metric value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_system\_cpu\_percent table
`taosadapter_system_cpu_percent` table contains taosadapter cup usage information. The timestamp of this table is `_ts`.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|gauge|DOUBLE||mertic value|
|endpoint|NCHAR|TAG|taosadpater endpoint|

View File

@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
Diagnostics for network connections can be executed between Linux/Windows/macOS.
Diagnostic steps
Diagnostic steps:
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
-l <pktlen\> The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:

View File

@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
- authentication information is shown below
- authentication information is shown below:
```text
Authorization: Taosd <TOKEN>
```
- Basic authentication information is shown below
- Basic authentication information is shown below:
```text
Authorization: Basic <TOKEN>

View File

@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
After TDengine server or client installation, `taos.h` is located at
- Linux`/usr/local/taos/include`
- Windows`C:\TDengine\include`
- macOS`/usr/local/include`
- Linux: usr/local/taos/include`
- Windows: C:\TDengine\include`
- macOS: usr/local/include`
The dynamic libraries for the TDengine client driver are located in.
@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
schemaless 其他相关的接口
schemaless interfaces:
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
@ -423,6 +424,6 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Description**
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.

View File

@ -36,23 +36,110 @@ REST connection supports all platforms that can run Java.
Please refer to [version support list](/reference/connector#version-support)
## Recent update logs
| taos-jdbcdriver version | major changes |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
| 3.2.0 | This version has been deprecated |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
| 3.0.0 | Support for TDengine 3.0 |
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
| 2.0.41 | fix decode method of username and password in REST connection |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
| 2.0.38 | JDBC REST connections add bulk pull function |
| 2.0.37 | Support json tags |
| 2.0.36 | Support schemaless writing |
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
### Handling exceptions
After an error is reported, the error message and error code can be obtained through SQLException.
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
There are four types of error codes that the JDBC connector can report:
- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350),
- Error code of the native connection method (error code between 0x2351 and 0x2360)
- Error code of the consumer method (error code between 0x2371 and 0x2380)
- Error code of other TDengine function modules.
For specific error codes, please refer to.
| Error Code | Description | Suggested Actions |
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
| 0x2301 | connection already closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. |
| 0x2302 | this operation is NOT supported currently! | The current interface does not support the connection. You can use another connection mode. |
| 0x2303 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size. |
| 0x2304 | statement is closed | The statement is closed. Check whether the statement is closed and used again, or whether the connection is normal. |
| 0x2305 | resultSet is closed | result set The result set is released. Check whether the result set is released and used again. |
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
| 0x2317 | | wrong request type was used in the REST connection. |
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
| 0x2319 | user is required | The user name information is missing when creating the connection |
| 0x231a | password is required | Password information is missing when creating a connection |
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
## TDengine DataType vs. Java DataType
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows:
| TDengine DataType | JDBCType |
| ----------------- | ---------------------------------- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
| TDengine DataType | JDBCType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
**Note**: Only TAG supports JSON types
@ -82,7 +169,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```
@ -97,7 +184,7 @@ cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.2.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
</TabItem>
</Tabs>
@ -227,7 +314,7 @@ In addition to getting the connection from the specified URL, you can use Proper
Note:
- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set.
- The following sample code is based on taos-jdbcdriver-3.0.0.
- The following sample code is based on taos-jdbcdriver-3.1.0.
```java
public Connection getConn() throws Exception{
@ -333,30 +420,6 @@ while(resultSet.next()){
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Handling exceptions
After an error is reported, the error message and error code can be obtained through SQLException.
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
There are three types of error codes that the JDBC connector can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2400), and - Error code of other TDengine function modules.
For specific error codes, please refer to.
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
@ -364,9 +427,12 @@ TDengine has significantly improved the bind APIs to support data writing (INSER
**Note:**
- JDBC REST connections do not currently support bind interface
- The following sample code is based on taos-jdbcdriver-3.0.0
- The following sample code is based on taos-jdbcdriver-3.2.1
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
- Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`.
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
```java
public class ParameterBindingDemo {
@ -594,21 +660,7 @@ public class ParameterBindingDemo {
}
```
The methods to set TAGS values:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
The methods to set VALUES columns:
@ -625,17 +677,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
</TabItem>
<TabItem value="ws" label="WebSocket connection">
```java
public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 30;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_ws_parabind");
stmt.execute("create database if not exists test_ws_parabind");
stmt.execute("use test_ws_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(4, random.nextLong());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
pstmt.setLong(5, random.nextLong());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(1, random.nextFloat());
pstmt.setTagDouble(2, random.nextDouble());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setFloat(2, random.nextFloat());
pstmt.setDouble(3, random.nextDouble());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(1, random.nextBoolean());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setBoolean(2, random.nextBoolean());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(1, new String("abc"));
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setString(2, "abc");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(1, "California.SanFrancisco");
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(0, new Timestamp(current + j));
pstmt.setNString(1, "California.SanFrancisco");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
}
```
</TabItem>
</Tabs>
The methods to set TAGS values:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
### Schemaless Writing
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
Note:
- JDBC REST connections do not currently support schemaless writes
- The following sample code is based on taos-jdbcdriver-3.0.0
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
```java
public class SchemalessInsertTest {
public class SchemalessJniTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
@ -663,6 +901,41 @@ public class SchemalessInsertTest {
}
```
</TabItem>
<TabItem value="ws" label="WebSocket connection">
```java
public class SchemalessWsTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url);
init(connection);
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
System.exit(0);
}
private static void init(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate("drop database if exists test_ws_schemaless");
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
stmt.executeUpdate("use test_ws_schemaless");
}
}
}
```
</TabItem>
</Tabs>
### Data Subscription
The TDengine Java Connector supports subscription functionality with the following application API.
@ -686,6 +959,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
```java
Properties config = new Properties();
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
@ -693,12 +967,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
TaosConsumer consumer = new TaosConsumer<>(config);
```
- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
- enable.auto.commit: Specifies whether to commit automatically.
- group.id: consumer: Specifies the group that the consumer is in.
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
- httpConnectTimeoutWebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeoutsocket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq).
#### Subscribe to consume data
@ -706,8 +982,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
```java
while(true) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
```
@ -741,10 +1018,20 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
config.setProperty("td.connect.type", "jni");
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group1");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@ -760,8 +1047,9 @@ public abstract class ConsumerLoop {
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
@ -815,12 +1103,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.type", "ws");
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group2");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@ -836,8 +1131,9 @@ public abstract class ConsumerLoop {
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
@ -963,20 +1259,6 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
## Recent update logs
| taos-jdbcdriver version | major changes |
| :---------------------: | :--------------------------------------------: |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
| 3.0.0 | Support for TDengine 3.0 |
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
| 2.0.41 | fix decode method of username and password in REST connection |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
| 2.0.38 | JDBC REST connections add bulk pull function |
| 2.0.37 | Support json tags |
| 2.0.36 | Support schemaless writing |
## Frequently Asked Questions
1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`?
@ -999,15 +1281,15 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
4. java.lang.NoSuchMethodError: setByteArray
**Cause**: taos-jbdcdriver 3.* only supports TDengine 3.0 and later.
**Cause**: taos-jbdcdriver 3.\* only supports TDengine 3.0 and later.
**Solution**: Use taos-jdbcdriver 2.* with your TDengine 2.* deployment.
**Solution**: Use taos-jdbcdriver 2.\* with your TDengine 2.\* deployment.
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
**Cause**taos-jdbcdriver 3.0.1 is compiled on JDK 11.
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
**Solution** Use taos-jdbcdriver 3.0.2.
**Solution**: Use taos-jdbcdriver 3.0.2.
For additional troubleshooting, see [FAQ](../../../train-faq/faq).

View File

@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
@ -120,7 +121,7 @@ The parameters are described as follows:
- **username/password**: Username and password used to create connections.
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
- **database**: Specify the default database to connect to. It's optional.
- **params**Optional parameters.
- **params**: Optional parameters.
A sample DSN description string is as follows:
@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se
<RustBind />
#### Schemaless Write
<RustSml />
### Query data
<RustQuery />

View File

@ -255,7 +255,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
- `url` The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `user`: TDengine user name. The default is `root`.
- `password`: TDengine user password. The default is `taosdata`.
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
@ -362,7 +362,7 @@ By using the optional req_id parameter, you can specify a request ID that can be
##### TaosConnection class
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
As the way to connect introduced above but add `req_id` argument.
```python title="execute method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
@ -372,13 +372,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
```
:::tip
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
:::
##### Use of TaosResult class
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
As the way to fetch data introduced above but add `req_id` argument.
```python title="blocks_iter method"
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
@ -391,17 +387,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
```
:::note
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
:::
</TabItem>
<TabItem value="rest" label="REST connection">
##### Use of TaosRestCursor class
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
As the way to connect introduced above but add `req_id` argument.
```python title="Use of TaosRestCursor"
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
@ -421,8 +412,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
As the way to connect introduced above but add `req_id` argument.
```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
```
@ -459,6 +453,56 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Schemaless Insert
Connector support schemaless insert.
<Tabs defaultValue="list">
<TabItem value="list" label="List Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |

View File

@ -321,18 +321,18 @@ let cursor = conn.cursor();
| package name | version | TDengine version | Description |
|------------------|---------|---------------------|------------------------------------------------------------------|
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-connector | 2.0.12 | 2.4.x2.5.x2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x2.5.x2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x2.5.x2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
### REST Connector
| package name | version | TDengine version | Description |
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-rest-connector | 1.0.7 | 2.4.x2.5.x2.6.x | Removed default port 6041。 |
| td2.0-rest-connector | 1.0.6 | 2.4.x2.5.x2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x2.5.x2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x2.5.x2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
## API Reference

View File

@ -165,7 +165,7 @@ The parameters are described as follows:
* **username/password**: Username and password used to create connections.
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
* **database**: Specify the default database to connect to. It's optional.
* **params**Optional parameters.
* **params**: Optional parameters.
A sample DSN description string is as follows:
@ -279,7 +279,7 @@ ws://localhost:6041/test
| TDengine.Connector | Description |
|--------------------|--------------------------------|
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
| 3.0.1 | Support WebSocket and CloudWith function query, insert, and parameter binding|
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
| 1.0.7 | Fixed TDengine.Query() memory leak. |
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |

View File

@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
PHP Connector relies on TDengine client driver.
Project Repository<https://github.com/Yurunsoft/php-tdengine>
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
After TDengine client or server is installed, `taos.h` is located at:
- Linux`/usr/local/taos/include`
- Windows`C:\TDengine\include`
- macOS`/usr/local/include`
- Linux: `/usr/local/taos/include`
- Windows: `C:\TDengine\include`
- macOS: `/usr/local/include`
TDengine client driver is located at:
- Linux: `/usr/local/taos/driver/libtaos.so`
- Windows: `C:\TDengine\taos.dll`
- macOS`/usr/local/lib/libtaos.dylib`
- macOS: `/usr/local/lib/libtaos.dylib`
## Supported Platforms
- Windows、Linux、MacOS
- Windows, Linux, and macOS
- PHP >= 7.4
@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
### Install php-tdengine
**Download Source Code Package and Unzip**
**Download Source Code Package and Unzip: **
```shell
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment**
**Non-Swoole Environment: **
```shell
phpize && ./configure && make -j && make install
```
**Specify TDengine location**
**Specify TDengine location: **
```shell
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
> `--with-tdengine-dir=` is followed by TDengine location.
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
**Swoole Environment**
**Swoole Environment: **
```shell
phpize && ./configure --enable-swoole && make -j && make install

View File

@ -54,94 +54,91 @@ Command-line arguments take precedence over environment variables over configura
```shell
Usage of taosAdapter:
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2)
--version Print the version and exit
```
Note:
@ -332,6 +329,10 @@ This parameter controls the number of results returned by the following interfac
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter.
## Configure whether schemaless writes automatically create DBs
Starting from version 3.0.4.0, the taosAdapter provides the parameter "smlAutoCreateDB" to control whether to automatically create DBs when writing with the schemaless protocol. The default value is false, which means that the DB will not be automatically created and the user needs to manually create the DB before performing schemaless writing.
## Troubleshooting
You can check the taosAdapter running status with the `systemctl status taosadapter` command.

View File

@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
 
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.

View File

@ -76,6 +76,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-A, --all-databases Dump all databases.
-D, --databases=DATABASES Dump listed databases. Use comma to separate
database names.
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump table schemas.
-y, --answer-yes Input yes for prompt. It will skip data file

View File

@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
Use the `uid` value obtained above as `-E` input.
```bash
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
![TDengine Database TDinsight 选择数据库](./assets/select_dashboard_db.webp)
![TDengine Database TDinsight select database](./assets/select_dashboard_db.webp)
## TDinsight dashboard details

View File

@ -45,19 +45,19 @@ The parameters described in this document by the effect that they have on the sy
### firstEp
| Attribute | Description |
| -------- | -------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
| Default | localhost:6030 |
| Attribute | Description |
| ---------- | ---------------------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
| Default | localhost:6030 |
### secondEp
| Attribute | Description |
| -------- | ------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
| Default | None |
| Attribute | Description |
| ---------- | ---------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
| Default | None |
### fqdn
@ -65,28 +65,28 @@ The parameters described in this document by the effect that they have on the sy
| ------------- | ------------------------------------------------------------------------ |
| Applicable | Server Only |
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
| Default Value | The first hostname configured for the host |
| Note | It should be within 96 bytes | |
| Default Value | The first hostname configured for the host |
| Note | It should be within 96 bytes | |
### serverPort
| Attribute | Description |
| -------- | ----------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | The port for external access after `taosd` is started |
| Default Value | 6030 |
| Attribute | Description |
| ------------- | ----------------------------------------------------- |
| Applicable | Server Only |
| Meaning | The port for external access after `taosd` is started |
| Default Value | 6030 |
:::note
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
:::
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
### maxShellConns
@ -99,22 +99,21 @@ The parameters described in this document by the effect that they have on the sy
### numOfRpcSessions
| Attribute | Description |
| ------------- | ---------------------------------------------------- |
| Applicable | Client/Server |
| Meaning | The maximum number of connection to create |
| Value Range | 100-100000 |
| Default Value | 10000 |
| Attribute | Description |
| ------------- | ------------------------------------------ |
| Applicable | Client/Server |
| Meaning | The maximum number of connection to create |
| Value Range | 100-100000 |
| Default Value | 10000 |
### timeToGetAvailableConn
| Attribute | Description |
| ------------- | ---------------------------------------------------- |
| Applicable | Client/Server |
| Meaning | The maximum waiting time to get avaliable conn |
| Value Range | 10-50000000(ms) |
| Default Value | 500000 |
| Attribute | Description |
| ------------- | ---------------------------------------------- |
| Applicable | Client/Server |
| Meaning | The maximum waiting time to get available conn |
| Value Range | 10-50000000(ms) |
| Default Value | 500000 |
## Monitoring Parameters
@ -123,114 +122,114 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
### monitor
| Attribute | Description |
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server only |
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
| Default | 0 |
| Attribute | Description |
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server only |
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`. |
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
| Default | 0 |
### monitorFqdn
| Attribute | Description |
| -------- | -------------------------- |
| Applicable | Server Only |
| Meaning | FQDN of taosKeeper monitoring service |
| Default | None |
| Attribute | Description |
| ---------- | ------------------------------------- |
| Applicable | Server Only |
| Meaning | FQDN of taosKeeper monitoring service |
| Default | None |
### monitorPort
| Attribute | Description |
| -------- | --------------------------- |
| Applicable | Server Only |
| Meaning | Port of taosKeeper monitoring service |
| Default Value | 6043 |
| Attribute | Description |
| ------------- | ------------------------------------- |
| Applicable | Server Only |
| Meaning | Port of taosKeeper monitoring service |
| Default Value | 6043 |
### monitorInterval
| Attribute | Description |
| -------- | -------------------------------------------- |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | ------------------------------------------ |
| Applicable | Server Only |
| Meaning | The interval of collecting system workload |
| Unit | second |
| Value Range | 1-200000 |
| Default Value | 30 |
| Value Range | 1-200000 |
| Default Value | 30 |
### telemetryReporting
| Attribute | Description |
| -------- | ---------------------------------------- |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | ---------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | Switch for allowing TDengine to collect and report service usage information |
| Value Range | 0: Not allowed; 1: Allowed |
| Default Value | 1 |
| Default Value | 1 |
### crashReporting
| Attribute | Description |
| -------- | -------------------------------------------- |
| Applicable | Server Only |
| Meaning |Switch for allowing TDengine to collect and report crash related information |
| Value Range | 0,1 0: Not allowed1allowed |
| Default Value | 1 |
| Attribute | Description |
| ------------- | ---------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | Switch for allowing TDengine to collect and report crash related information |
| Value Range | 0,1 0: Not allowed; 1: allowed |
| Default Value | 1 |
## Query Parameters
### queryPolicy
| Attribute | Description |
| -------- | ----------------------------- |
| Applicable | Client only |
| Meaning | Execution policy for query statements |
| Unit | None |
| Default | 1 |
| Attribute | Description |
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | Execution policy for query statements |
| Unit | None |
| Default | 1 |
| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
### querySmaOptimize
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Client only |
| Meaning | SMA index optimization policy |
| Unit | None |
| Default Value | 0 |
| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
| Attribute | Description |
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | SMA index optimization policy |
| Unit | None |
| Default Value | 0 |
| Notes | 0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results. |
### countAlwaysReturnValue
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Server only |
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
| Vlue Range | 0Return empty line1Return 0 |
| Default | 1 |
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
| Attribute | Description |
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server only |
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
| Vlue Range | 0: Return empty line, 1: Return 0 |
| Default | 1 |
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
### maxNumOfDistinctRes
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | -------------------------------------------- |
| Applicable | Server Only |
| Meaning | The maximum number of distinct rows returned |
| Value Range | [100,000 - 100,000,000] |
| Default Value | 100,000 |
### keepColumnName
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Client only |
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
| Value Range | 0 means including the function name, 1 means not including the function name. |
| Default Value | 0 |
| Attribute | Description |
| ------------- | --------------------------------------------------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
| Value Range | 0 means including the function name, 1 means not including the function name. |
| Default Value | 0 |
## Locale Parameters
### timezone
| Attribute | Description |
| -------- | ------------------------------ |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ------------------------------- |
| Applicable | Server and Client |
| Meaning | TimeZone |
| Default Value | TimeZone configured in the host |
@ -333,383 +332,404 @@ The charset that takes effect is UTF-8.
### dataDir
| Attribute | Description |
| -------- | ------------------------------------------ |
| Applicable | Server Only |
| Meaning | All data files are stored in this directory |
| Default Value | /var/lib/taos |
| Attribute | Description |
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | All data files are stored in this directory |
| Default Value | /var/lib/taos |
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
### tempDir
| Attribute | Description |
| -------- | ------------------------------------------ |
| Applicable | Server only |
| Meaning | The directory where to put all the temporary files generated during system running |
| Default | /tmp |
| Attribute | Description |
| ---------- | ---------------------------------------------------------------------------------- |
| Applicable | Server only |
| Meaning | The directory where to put all the temporary files generated during system running |
| Default | /tmp |
### minimalTmpDirGB
| Attribute | Description |
| -------- | ------------------------------------------------ |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ----------------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
| Unit | GB |
| Default Value | 1.0 |
| Unit | GB |
| Default Value | 1.0 |
### minimalDataDirGB
| Attribute | Description |
| -------- | ------------------------------------------------ |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | ------------------------------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
| Unit | GB |
| Default Value | 2.0 |
| Unit | GB |
| Default Value | 2.0 |
## Cluster Parameters
### supportVnodes
| Attribute | Description |
| -------- | --------------------------- |
| Applicable | Server Only |
| Meaning | Maximum number of vnodes per dnode |
| Value Range | 0-4096 |
| Default Value | 2x the CPU cores |
| Attribute | Description |
| ------------- | ---------------------------------- |
| Applicable | Server Only |
| Meaning | Maximum number of vnodes per dnode |
| Value Range | 0-4096 |
| Default Value | 2x the CPU cores |
## Performance Tuning
### numOfCommitThreads
| Attribute | Description |
| ------------- | ----------------------------------- |
| Applicable | Server Only |
| Meaning | Maximum number of threads to commit |
| Value Range | 0-1024 |
| Default Value | |
## Log Parameters
### logDir
| Attribute | Description |
| -------- | -------------------------------------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ----------------------------------- |
| Applicable | Server and Client |
| Meaning | The directory for writing log files |
| Default Value | /var/log/taos |
### minimalLogDirGB
| Attribute | Description |
| -------- | -------------------------------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | -------------------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
| Unit | GB |
| Default Value | 1.0 |
| Unit | GB |
| Default Value | 1.0 |
### numOfLogLines
| Attribute | Description |
| -------- | ---------------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ------------------------------------------ |
| Applicable | Server and Client |
| Meaning | Maximum number of lines in single log file |
| Default Value | 10000000 |
| Default Value | 10000000 |
### asyncLog
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ---------------------------- |
| Applicable | Server and Client |
| Meaning | The mode of writing log file |
| Value Range | 0: sync way; 1: async way |
| Default Value | 1 |
| Default Value | 1 |
### logKeepDays
| Attribute | Description |
| -------- | ----------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | The number of days for log files to be kept |
| Unit | day |
| Default Value | 0 |
| Unit | day |
| Default Value | 0 |
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
### debugFlag
| Attribute | Description |
| -------- | ------------------------------------------------------------------------------------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | --------------------------------------------------------- |
| Applicable | Server and Client |
| Meaning | Log level |
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
| Default Value | 131 or 135, depending on the module |
### tmrDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Attribute | Description |
| ------------- | ------------------------- |
| Applicable | Server and Client |
| Meaning | Log level of timer module |
| Value Range | same as debugFlag |
| Default Value | |
| Value Range | same as debugFlag |
| Default Value | |
### uDebugFlag
| Attribute | Description |
| -------- | ---------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | -------------------------- |
| Applicable | Server and Client |
| Meaning | Log level of common module |
| Value Range | same as debugFlag |
| Default Value | |
| Value Range | same as debugFlag |
| Default Value | |
### rpcDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ----------------------- |
| Applicable | Server and Client |
| Meaning | Log level of rpc module |
| Value Range | same as debugFlag |
| Default Value | |
| Value Range | same as debugFlag |
| Default Value | |
### jniDebugFlag
| Attribute | Description |
| -------- | ------------------ |
| Applicable | Client Only |
| Attribute | Description |
| ------------- | ----------------------- |
| Applicable | Client Only |
| Meaning | Log level of jni module |
| Value Range | same as debugFlag |
| Default Value | |
| Value Range | same as debugFlag |
| Default Value | |
### qDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Attribute | Description |
| ------------- | ------------------------- |
| Applicable | Server and Client |
| Meaning | Log level of query module |
| Value Range | same as debugFlag |
| Default Value | |
| Meaning | Log level of query module |
| Value Range | same as debugFlag |
| Default Value | |
### cDebugFlag
| Attribute | Description |
| -------- | --------------------- |
| Attribute | Description |
| ------------- | ------------------- |
| Applicable | Client Only |
| Meaning | Log level of Client |
| Value Range | same as debugFlag |
| Default Value | |
| Value Range | same as debugFlag |
| Default Value | |
### dDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | ------------------ |
| Applicable | Server Only |
| Meaning | Log level of dnode |
| Value Range | same as debugFlag |
| Default Value | 135 |
| Default Value | 135 |
### vDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | ------------------ |
| Applicable | Server Only |
| Meaning | Log level of vnode |
| Value Range | same as debugFlag |
| Default Value | |
| Default Value | |
### mDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server Only |
| Meaning | Log level of mnode module |
| Value Range | same as debugFlag |
| Default Value | 135 |
| Attribute | Description |
| ------------- | ------------------------- |
| Applicable | Server Only |
| Meaning | Log level of mnode module |
| Value Range | same as debugFlag |
| Default Value | 135 |
### wDebugFlag
| Attribute | Description |
| -------- | ------------------ |
| Applicable | Server Only |
| Meaning | Log level of WAL module |
| Value Range | same as debugFlag |
| Default Value | 135 |
| Attribute | Description |
| ------------- | ----------------------- |
| Applicable | Server Only |
| Meaning | Log level of WAL module |
| Value Range | same as debugFlag |
| Default Value | 135 |
### sDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server and Client |
| Attribute | Description |
| ------------- | ------------------------ |
| Applicable | Server and Client |
| Meaning | Log level of sync module |
| Value Range | same as debugFlag |
| Default Value | 135 |
| Value Range | same as debugFlag |
| Default Value | 135 |
### tsdbDebugFlag
| Attribute | Description |
| -------- | ------------------- |
| Applicable | Server Only |
| Meaning | Log level of TSDB module |
| Value Range | same as debugFlag |
| Default Value | |
| Attribute | Description |
| ------------- | ------------------------ |
| Applicable | Server Only |
| Meaning | Log level of TSDB module |
| Value Range | same as debugFlag |
| Default Value | |
### tqDebugFlag
| Attribute | Description |
| -------- | ----------------- |
| Applicable | Server only |
| Meaning | Log level of TQ module |
| Value Range | same as debugFlag |
| Default Value | |
| Attribute | Description |
| ------------- | ---------------------- |
| Applicable | Server only |
| Meaning | Log level of TQ module |
| Value Range | same as debugFlag |
| Default Value | |
### fsDebugFlag
| Attribute | Description |
| -------- | ----------------- |
| Applicable | Server only |
| Meaning | Log level of FS module |
| Value Range | same as debugFlag |
| Default Value | |
| Attribute | Description |
| ------------- | ---------------------- |
| Applicable | Server only |
| Meaning | Log level of FS module |
| Value Range | same as debugFlag |
| Default Value | |
### udfDebugFlag
| Attribute | Description |
| -------- | ------------------ |
| Applicable | Server Only |
| Attribute | Description |
| ------------- | ----------------------- |
| Applicable | Server Only |
| Meaning | Log level of UDF module |
| Value Range | same as debugFlag |
| Default Value | |
| Value Range | same as debugFlag |
| Default Value | |
### smaDebugFlag
| Attribute | Description |
| -------- | ------------------ |
| Applicable | Server Only |
| Meaning | Log level of SMA module |
| Value Range | same as debugFlag |
| Default Value | |
| Attribute | Description |
| ------------- | ----------------------- |
| Applicable | Server Only |
| Meaning | Log level of SMA module |
| Value Range | same as debugFlag |
| Default Value | |
### idxDebugFlag
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Server Only |
| Meaning | Log level of index module |
| Value Range | same as debugFlag |
| Default Value | |
| Attribute | Description |
| ------------- | ------------------------- |
| Applicable | Server Only |
| Meaning | Log level of index module |
| Value Range | same as debugFlag |
| Default Value | |
### tdbDebugFlag
| Attribute | Description |
| -------- | ------------------ |
| Applicable | Server Only |
| Meaning | Log level of TDB module |
| Value Range | same as debugFlag |
| Default Value | |
| Attribute | Description |
| ------------- | ----------------------- |
| Applicable | Server Only |
| Meaning | Log level of TDB module |
| Value Range | same as debugFlag |
| Default Value | |
## Schemaless Parameters
### smlChildTableName
| Attribute | Description |
| -------- | ------------------------- |
| Applicable | Client only |
| Meaning | Custom subtable name for schemaless writes |
| Type | String |
| Default Value | None |
| Attribute | Description |
| ------------- | ------------------------------------------ |
| Applicable | Client only |
| Meaning | Custom subtable name for schemaless writes |
| Type | String |
| Default Value | None |
### smlTagName
| Attribute | Description |
| -------- | ------------------------------------ |
| Applicable | Client only |
| Meaning | Default tag for schemaless writes without tag value specified |
| Type | String |
| Default Value | _tag_null |
| Attribute | Description |
| ------------- | ------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | Default tag for schemaless writes without tag value specified |
| Type | String |
| Default Value | _tag_null |
### smlDataFormat
| Attribute | Description |
| -------- | ----------------------------- |
| Applicable | Client only |
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
| Value Range | 0: not consistent; 1: consistent. |
| Default | 0 |
| Attribute | Description |
| ----------- | ----------------------------------------------------------------------------------- |
| Applicable | Client only |
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
| Value Range | 0: not consistent; 1: consistent. |
| Default | 0 |
## Compress Parameters
### compressMsgSize
| Attribute | Description |
| -------- | ----------------------------- |
| Applicable | Both Client and Server side |
| Meaning | Whether RPC message is compressed |
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
| Default | -1 |
| Attribute | Description |
| ----------- | ------------------------------------------------------------------------------------------------------------------ |
| Applicable | Both Client and Server side |
| Meaning | Whether RPC message is compressed |
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
| Default | -1 |
## Other Parameters
### enableCoreFile
| Attribute | Description |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Server and Client |
| Meaning | Whether to generate core file when server crashes |
| Value Range | 0: false, 1: true |
| Default Value | 1 |
| Attribute | Description |
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Server and Client |
| Meaning | Whether to generate core file when server crashes |
| Value Range | 0: false, 1: true |
| Default Value | 1 |
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
### enableScience
| Attribute | Description |
| ------------- | ------------------------------------------------------------- |
| Applicable | Only taos-CLI client |
| Meaning | Whether to show float and double with the scientific notation |
| Value Range | 0: false, 1: true |
| Default Value | 0 |
### udf
| Attribute | Description |
| -------- | ------------------ |
| Applicable | Server Only |
| Meaning | Whether the UDF service is enabled |
| Value Range | 0: disable UDF; 1: enabled UDF |
| Default Value | 1 |
| Attribute | Description |
| ------------- | ---------------------------------- |
| Applicable | Server Only |
| Meaning | Whether the UDF service is enabled |
| Value Range | 0: disable UDF; 1: enabled UDF |
| Default Value | 1 |
## 3.0 Parameters
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
| 1 | firstEp | Yes | Yes | |
| 2 | secondEp | Yes | Yes | |
| 3 | fqdn | Yes | Yes | |
| 4 | serverPort | Yes | Yes | |
| 5 | maxShellConns | Yes | Yes | |
| 6 | monitor | Yes | Yes | |
| 7 | monitorFqdn | No | Yes | |
| 8 | monitorPort | No | Yes | |
| 9 | monitorInterval | Yes | Yes | |
| 10 | queryPolicy | No | Yes | |
| 11 | querySmaOptimize | No | Yes | |
| 12 | maxNumOfDistinctRes | Yes | Yes | |
| 15 | countAlwaysReturnValue | Yes | Yes | |
| 16 | dataDir | Yes | Yes | |
| 17 | minimalDataDirGB | Yes | Yes | |
| 18 | supportVnodes | No | Yes | |
| 19 | tempDir | Yes | Yes | |
| 20 | minimalTmpDirGB | Yes | Yes | |
| 21 | smlChildTableName | Yes | Yes | |
| 22 | smlTagName | Yes | Yes | |
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
| 24 | statusInterval | Yes | Yes | |
| 25 | logDir | Yes | Yes | |
| 26 | minimalLogDirGB | Yes | Yes | |
| 27 | numOfLogLines | Yes | Yes | |
| 28 | asyncLog | Yes | Yes | |
| 29 | logKeepDays | Yes | Yes | |
| 30 | debugFlag | Yes | Yes | |
| 31 | tmrDebugFlag | Yes | Yes | |
| 32 | uDebugFlag | Yes | Yes | |
| 33 | rpcDebugFlag | Yes | Yes | |
| 34 | jniDebugFlag | Yes | Yes | |
| 35 | qDebugFlag | Yes | Yes | |
| 36 | cDebugFlag | Yes | Yes | |
| 37 | dDebugFlag | Yes | Yes | |
| 38 | vDebugFlag | Yes | Yes | |
| 39 | mDebugFlag | Yes | Yes | |
| 40 | wDebugFlag | Yes | Yes | |
| 41 | sDebugFlag | Yes | Yes | |
| 42 | tsdbDebugFlag | Yes | Yes | |
| 43 | tqDebugFlag | No | Yes | |
| 44 | fsDebugFlag | Yes | Yes | |
| 45 | udfDebugFlag | No | Yes | |
| 46 | smaDebugFlag | No | Yes | |
| 47 | idxDebugFlag | No | Yes | |
| 48 | tdbDebugFlag | No | Yes | |
| 49 | metaDebugFlag | No | Yes | |
| 50 | timezone | Yes | Yes | |
| 51 | locale | Yes | Yes | |
| 52 | charset | Yes | Yes | |
| 53 | udf | Yes | Yes | |
| 54 | enableCoreFile | Yes | Yes | |
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
| --- | :--------------------: | ---------------------- | ---------------------------- | ----------------------- |
| 1 | firstEp | Yes | Yes | |
| 2 | secondEp | Yes | Yes | |
| 3 | fqdn | Yes | Yes | |
| 4 | serverPort | Yes | Yes | |
| 5 | maxShellConns | Yes | Yes | |
| 6 | monitor | Yes | Yes | |
| 7 | monitorFqdn | No | Yes | |
| 8 | monitorPort | No | Yes | |
| 9 | monitorInterval | Yes | Yes | |
| 10 | queryPolicy | No | Yes | |
| 11 | querySmaOptimize | No | Yes | |
| 12 | maxNumOfDistinctRes | Yes | Yes | |
| 15 | countAlwaysReturnValue | Yes | Yes | |
| 16 | dataDir | Yes | Yes | |
| 17 | minimalDataDirGB | Yes | Yes | |
| 18 | supportVnodes | No | Yes | |
| 19 | tempDir | Yes | Yes | |
| 20 | minimalTmpDirGB | Yes | Yes | |
| 21 | smlChildTableName | Yes | Yes | |
| 22 | smlTagName | Yes | Yes | |
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
| 24 | statusInterval | Yes | Yes | |
| 25 | logDir | Yes | Yes | |
| 26 | minimalLogDirGB | Yes | Yes | |
| 27 | numOfLogLines | Yes | Yes | |
| 28 | asyncLog | Yes | Yes | |
| 29 | logKeepDays | Yes | Yes | |
| 30 | debugFlag | Yes | Yes | |
| 31 | tmrDebugFlag | Yes | Yes | |
| 32 | uDebugFlag | Yes | Yes | |
| 33 | rpcDebugFlag | Yes | Yes | |
| 34 | jniDebugFlag | Yes | Yes | |
| 35 | qDebugFlag | Yes | Yes | |
| 36 | cDebugFlag | Yes | Yes | |
| 37 | dDebugFlag | Yes | Yes | |
| 38 | vDebugFlag | Yes | Yes | |
| 39 | mDebugFlag | Yes | Yes | |
| 40 | wDebugFlag | Yes | Yes | |
| 41 | sDebugFlag | Yes | Yes | |
| 42 | tsdbDebugFlag | Yes | Yes | |
| 43 | tqDebugFlag | No | Yes | |
| 44 | fsDebugFlag | Yes | Yes | |
| 45 | udfDebugFlag | No | Yes | |
| 46 | smaDebugFlag | No | Yes | |
| 47 | idxDebugFlag | No | Yes | |
| 48 | tdbDebugFlag | No | Yes | |
| 49 | metaDebugFlag | No | Yes | |
| 50 | timezone | Yes | Yes | |
| 51 | locale | Yes | Yes | |
| 52 | charset | Yes | Yes | |
| 53 | udf | Yes | Yes | |
| 54 | enableCoreFile | Yes | Yes | |

View File

@ -108,7 +108,7 @@ The following `launchctl` commands can help you manage taoskeeper service:
#### Launch With Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/taoskeeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
$ taoskeeper -c <keeper config file>
@ -153,6 +153,10 @@ database = "log"
# standard tables to monitor
tables = ["normal_table"]
# database options for db storing metrics data
[metrics.databaseoptions]
cachemodel = "none"
```
### Obtain Monitoring Metrics
@ -203,7 +207,7 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
```
### check_health
### check\_health
```
$ curl -i http://127.0.0.1:6043/check_health
@ -219,3 +223,29 @@ Content-Length: 19
{"version":"1.0.0"}
```
### taoskeeper with Prometheus
There is `/metrics` api in taoskeeper provide TDengine metric data for Prometheus.
#### scrape config
Scrape config in Prometheus specifies a set of targets and parameters describing how to scrape metric data from endpoint. For more information, please reference to [Prometheus documents](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
```
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: "taoskeeper"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:6043"]
```
#### Dashboard
There is a dashboard named `TaosKeeper Prometheus Dashboard for 3.x`, which provides a monitoring dashboard similar to TInsight.
In Grafana, click the Dashboard menu and click `import`, enter the dashboard ID `18587` and click the `Load` button. Then finished importing `TaosKeeper Prometheus Dashboard for 3.x` dashboard.

View File

@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
:::note
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
:::
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp)
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
The example to query the average system memory usage for the specified interval on each server as follows.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard3.webp)
@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:

View File

@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
### Edit SQL fields
Copy SQL bellow and paste it to the SQL edit area
Copy SQL bellow and paste it to the SQL edit area:
```sql
SELECT
@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
### Edit "action"
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
```
Basic cm9vdDp0YW9zZGF0YQ==
```

View File

@ -46,15 +46,14 @@ Execute in any directory:
````
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
tar xzf confluent-7.1.1.tar.gz -C /opt/test
tar xzf confluent-7.1.1.tar.gz -C /opt/
````
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1
PATH=$CONFLUENT_HOME/bin
export PATH
export PATH=$CONFLUENT_HOME/bin:$PATH
```
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
@ -315,7 +314,6 @@ connection.backoff.ms=5000
topic.prefix=tdengine-source-
poll.interval.ms=1000
fetch.max.rows=100
out.format=line
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
@ -329,7 +327,15 @@ DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
USE test;
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
```
Use TDengine CLI to execute SQL script
@ -346,7 +352,7 @@ confluent local services connect connector load TDengineSourceConnector --config
### View topic data
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data.
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
````
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
@ -384,7 +390,7 @@ confluent local services connect connector status
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
````
confluent local services connect connector unload TDengineSourceConnector
confluent local services connect connector unload TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector
````
@ -417,11 +423,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine
### TDengine Source Connector specific configuration
1. `connection.database`: source database name, no default value.
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
## Other notes

View File

@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
![02](gds/gds-02.png.webp)
@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
![06](gds/gds-06.png.webp)
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data some examples are shown below.
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
![07](gds/gds-07.png.webp)
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, were sure youll be able to gain new insights and obtain even more value from your data.
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.

View File

@ -26,9 +26,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, a comp
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
**Computation node (qnode)** A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
**Stream Processing node (snode)** A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
@ -59,7 +59,7 @@ After obtaining the mnode EP list, the data node initiates the connection. It wi
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if its not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it's not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
### A Typical Data Writing Process
@ -107,7 +107,7 @@ For large-scale data management, to achieve scale-out, it is generally necessary
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables' quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
@ -132,9 +132,9 @@ Leader Vnode uses a writing process as follows:
<center> Figure 3: TDengine Leader writing process </center>
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `"wal_level"` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Leader vnode Writes the data into memory and add the record to “skip list”;
4. Leader vnode Writes the data into memory and add the record to "skip list";
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
@ -148,7 +148,7 @@ For a follower vnode, the write process as follows:
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
3. Write into memory and add the record to “skip list”.
3. Write into memory and add the record to "skip list".
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
@ -156,7 +156,7 @@ Compared with Leader vnode, follower vnode has no forwarding or reply confirmati
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. Its necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It's necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
### Synchronous Replication
@ -192,7 +192,7 @@ When data is written to disk, the system decides whether to compress the data ba
### Tiered Storage
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
dataDir format is as follows:
@ -202,7 +202,7 @@ dataDir data_path [tier_level]
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos

View File

@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup

View File

@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### Install TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup

View File

@ -200,7 +200,7 @@ After migrating via DataX, we found that we can significantly improve the effici
### 2. Manual data migration
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statementwritten to the database.
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement-written to the database.
Manual migration of data requires attention to the following two issues:
@ -258,7 +258,7 @@ Equivalent function: apercentile
Example:
```sql
Select apercentile(col1, 50, “t-digest”) from table_name
select apercentile(col1, 50, "t-digest") from table_name
```
Remark:

View File

@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
4. Install TDengine 3.0.
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/).
### 2. How can I resolve the "Unable to establish connection" error?

View File

@ -10,6 +10,18 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" />
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />

View File

@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />

View File

@ -8,7 +8,7 @@
#include <sys/time.h>
#include <taos.h>
typedef int16_t VarDataLenT;
typedef uint16_t VarDataLenT;
#define TSDB_NCHAR_SIZE sizeof(int32_t)
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
@ -78,7 +78,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
memcpy(str + len, row[i], charLen);
len += charLen;

View File

@ -6,7 +6,7 @@
#include <string.h>
#include <taos.h>
typedef int16_t VarDataLenT;
typedef uint16_t VarDataLenT;
#define TSDB_NCHAR_SIZE sizeof(int32_t)
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
@ -76,7 +76,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
memcpy(str + len, row[i], charLen);
len += charLen;

View File

@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.1.0</version>
<version>3.2.1</version>
</dependency>
<!-- ANCHOR_END: dep-->
<dependency>

View File

@ -6,39 +6,32 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
public class StmtInsertExample {
private static ArrayList<Long> tsToLongArray(String ts) {
ArrayList<Long> result = new ArrayList<>();
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS");
LocalDateTime localDateTime = LocalDateTime.parse(ts, formatter);
result.add(localDateTime.toInstant(ZoneOffset.of("+8")).toEpochMilli());
return result;
}
private static String datePattern = "yyyy-MM-dd HH:mm:ss.SSS";
private static DateTimeFormatter formatter = DateTimeFormatter.ofPattern(datePattern);
private static <T> ArrayList<T> toArray(T v) {
ArrayList<T> result = new ArrayList<>();
result.add(v);
return result;
}
private static List<String> getRawData() {
return Arrays.asList(
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
);
private static List<String> getRawData(int size) {
SimpleDateFormat format = new SimpleDateFormat(datePattern);
List<String> result = new ArrayList<>();
long current = System.currentTimeMillis();
Random random = new Random();
for (int i = 0; i < size; i++) {
String time = format.format(current + i);
int id = random.nextInt(10);
result.add("d" + id + "," + time + ",10.30000,219,0.31000,California.SanFrancisco,2");
}
return result.stream()
.sorted(Comparator.comparing(s -> s.split(",")[0])).collect(Collectors.toList());
}
private static Connection getConnection() throws SQLException {
@ -48,9 +41,9 @@ public class StmtInsertExample {
private static void createTable(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("CREATE DATABASE power KEEP 3650");
stmt.executeUpdate("USE power");
stmt.execute("CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " +
stmt.execute("CREATE DATABASE if not exists power KEEP 3650");
stmt.executeUpdate("use power");
stmt.execute("CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " +
"TAGS (location BINARY(64), groupId INT)");
}
}
@ -58,21 +51,54 @@ public class StmtInsertExample {
private static void insertData() throws SQLException {
try (Connection conn = getConnection()) {
createTable(conn);
String psql = "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)";
String psql = "INSERT INTO ? USING power.meters TAGS(?, ?) VALUES(?, ?, ?, ?)";
try (TSDBPreparedStatement pst = (TSDBPreparedStatement) conn.prepareStatement(psql)) {
for (String line : getRawData()) {
String tableName = null;
ArrayList<Long> ts = new ArrayList<>();
ArrayList<Float> current = new ArrayList<>();
ArrayList<Integer> voltage = new ArrayList<>();
ArrayList<Float> phase = new ArrayList<>();
for (String line : getRawData(100000)) {
String[] ps = line.split(",");
// bind table name and tags
pst.setTableName(ps[0]);
pst.setTagString(0, ps[5]);
pst.setTagInt(1, Integer.valueOf(ps[6]));
if (tableName == null) {
// bind table name and tags
tableName = "power." + ps[0];
pst.setTableName(ps[0]);
pst.setTagString(0, ps[5]);
pst.setTagInt(1, Integer.valueOf(ps[6]));
} else {
if (!tableName.equals(ps[0])) {
pst.setTimestamp(0, ts);
pst.setFloat(1, current);
pst.setInt(2, voltage);
pst.setFloat(3, phase);
pst.columnDataAddBatch();
pst.columnDataExecuteBatch();
// bind table name and tags
tableName = ps[0];
pst.setTableName(ps[0]);
pst.setTagString(0, ps[5]);
pst.setTagInt(1, Integer.valueOf(ps[6]));
ts.clear();
current.clear();
voltage.clear();
phase.clear();
}
}
// bind values
pst.setTimestamp(0, tsToLongArray(ps[1])); //ps[1] looks like: 2018-10-03 14:38:05.000
pst.setFloat(1, toArray(Float.valueOf(ps[2])));
pst.setInt(2, toArray(Integer.valueOf(ps[3])));
pst.setFloat(3, toArray(Float.valueOf(ps[4])));
pst.columnDataAddBatch();
// ps[1] looks like: 2018-10-03 14:38:05.000
LocalDateTime localDateTime = LocalDateTime.parse(ps[1], formatter);
ts.add(localDateTime.toInstant(ZoneOffset.of("+8")).toEpochMilli());
current.add(Float.valueOf(ps[2]));
voltage.add(Integer.valueOf(ps[3]));
phase.add(Float.valueOf(ps[4]));
}
pst.setTimestamp(0, ts);
pst.setFloat(1, current);
pst.setInt(2, voltage);
pst.setFloat(3, phase);
pst.columnDataAddBatch();
pst.columnDataExecuteBatch();
}
}

View File

@ -1,5 +1,6 @@
package com.taos.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer;
@ -52,19 +53,28 @@ public class SubscribeDemo {
// create consumer
Properties properties = new Properties();
properties.getProperty(TMQConstants.CONNECT_TYPE, "jni");
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030");
properties.setProperty(TMQConstants.CONNECT_USER, "root");
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test");
properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
properties.setProperty(TMQConstants.GROUP_ID, "test1");
properties.setProperty(TMQConstants.CLIENT_ID, "1");
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (Meters meter : meters) {
for (ConsumerRecord<Meters> r : meters) {
Meters meter = r.value();
System.out.println(meter);
}
}

View File

@ -1,5 +1,6 @@
package com.taos.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer;
@ -54,18 +55,26 @@ public class WebsocketSubscribeDemo {
Properties properties = new Properties();
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041");
properties.setProperty(TMQConstants.CONNECT_TYPE, "ws");
properties.setProperty(TMQConstants.CONNECT_USER, "root");
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test");
properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
properties.setProperty(TMQConstants.GROUP_ID, "test2");
properties.setProperty(TMQConstants.CLIENT_ID, "1");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (Meters meter : meters) {
for (ConsumerRecord<Meters> r : meters) {
Meters meter = (Meters) r.value();
System.out.println(meter);
}
}

View File

@ -0,0 +1,21 @@
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,74 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
res = conn.schemaless_insert_raw(lines, 1, 0)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except SchemalessError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,76 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,73 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,22 @@
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=1)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=2)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,22 @@
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,74 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_json() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
// SchemalessProtocol::Json
let data = [
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(300u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.ttl(1000)
.req_id(301u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.req_id(302u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}

View File

@ -0,0 +1,78 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_line() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"measurement,host=host1 field1=2i,field2=2.0 1577837300000",
"measurement,host=host1 field1=2i,field2=2.0 1577837400000",
"measurement,host=host1 field1=2i,field2=2.0 1577837500000",
"measurement,host=host1 field1=2i,field2=2.0 1577837600000",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(100u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.req_id(101u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl and req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(103u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}

View File

@ -0,0 +1,80 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_telnet() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco group=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles group=3",
"meters.current 1648432611250 11.3 location=California.LosAngeles group=3",
"meters.voltage 1648432611249 219 location=California.SanFrancisco group=2",
"meters.voltage 1648432611250 218 location=California.SanFrancisco group=2",
"meters.voltage 1648432611249 221 location=California.LosAngeles group=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles group=3",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(200u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.ttl(1000)
.req_id(201u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.req_id(202u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}

View File

@ -92,7 +92,7 @@ TDengine 的主要功能如下:
## 典型适用场景
作为一个高性能、分布式、支持 SQL 的时序数据库DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
作为一个高性能、分布式、支持 SQL 的时序数据库(Time-series DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求

View File

@ -100,7 +100,8 @@ sudo apt-get install tdengine
:::tip
apt-get 方式只适用于 Debian 或 Ubuntu 系统。
::::
:::
</TabItem>
<TabItem label="Windows 安装" value="windows">
@ -206,6 +207,8 @@ Active: inactive (dead)
- 查看服务状态:`sudo launchctl list | grep taosd`
- 查看服务详细信息:`launchctl print system/com.tdengine.taosd`
:::info
- `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。

View File

@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```

View File

@ -38,7 +38,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3 之后的版本 smlDataFormat 默认为 false从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)

View File

@ -32,7 +32,7 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码

View File

@ -47,7 +47,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
:::note
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::
## 示例代码

View File

@ -0,0 +1,3 @@
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```

View File

@ -161,7 +161,7 @@ Query OK, 6 rows in database (0.005515s)
:::note
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。除了在 REST 参数中指定数据库以外也可以在 SQL 语句中使用 <db_name>.<table_name> 来指定数据库。
:::

Some files were not shown because too many files have changed in this diff Show More