Merge remote-tracking branch 'origin/3.0' into fix/TD-24001
This commit is contained in:
commit
4c88fd08de
|
@ -131,3 +131,4 @@ tools/BUGS
|
||||||
tools/taos-tools
|
tools/taos-tools
|
||||||
tools/taosws-rs
|
tools/taosws-rs
|
||||||
tags
|
tags
|
||||||
|
.clangd
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
||||||
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
||||||
|
|
||||||
如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。
|
如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。
|
||||||
|
|
||||||
## 给贡献者的礼品
|
## 给贡献者的礼品
|
||||||
|
|
||||||
|
@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。
|
||||||
|
|
||||||
## 联系我们
|
## 联系我们
|
||||||
|
|
||||||
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO
|
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。
|
||||||
|
|
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
||||||
### Ubuntu 18.04 及以上版本 & Debian:
|
### Ubuntu 18.04 及以上版本 & Debian:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 为 taos-tools 安装编译需要的软件
|
#### 为 taos-tools 安装编译需要的软件
|
||||||
|
@ -352,4 +352,4 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
|
||||||
|
|
||||||
# 加入技术交流群
|
# 加入技术交流群
|
||||||
|
|
||||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
|
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine1",加小 T 为好友,即可入群。
|
||||||
|
|
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
||||||
### Ubuntu 18.04 and above or Debian
|
### Ubuntu 18.04 and above or Debian
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install build dependencies for taosTools
|
#### Install build dependencies for taosTools
|
||||||
|
|
|
@ -117,17 +117,14 @@ ELSE ()
|
||||||
|
|
||||||
IF (${BUILD_SANITIZER})
|
IF (${BUILD_SANITIZER})
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||||
ELSE ()
|
ELSEIF (${BUILD_RELEASE})
|
||||||
MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN})
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
IF (${TD_DARWIN})
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
|
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
ENDIF ()
|
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
# disable all assert
|
# disable all assert
|
||||||
|
|
|
@ -171,3 +171,8 @@ option(
|
||||||
ON
|
ON
|
||||||
)
|
)
|
||||||
|
|
||||||
|
option(
|
||||||
|
BUILD_RELEASE
|
||||||
|
"If build release version"
|
||||||
|
OFF
|
||||||
|
)
|
||||||
|
|
|
@ -162,6 +162,14 @@ ELSE ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF(APPLE)
|
||||||
|
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
|
||||||
|
set(CMAKE_HAVE_THREADS_LIBRARY 1)
|
||||||
|
set(CMAKE_USE_WIN32_THREADS_INIT 0)
|
||||||
|
set(CMAKE_USE_PTHREADS 1)
|
||||||
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||||
|
|
||||||
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
|
|
||||||
# rocksdb
|
# rocksdb
|
||||||
ExternalProject_Add(rocksdb
|
ExternalProject_Add(rocksdb
|
||||||
GIT_REPOSITORY https://github.com/taosdata-contrib/rocksdb.git
|
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
|
||||||
GIT_TAG v6.23.3
|
GIT_TAG v8.1.1
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||||
CONFIGURE_COMMAND ""
|
CONFIGURE_COMMAND ""
|
||||||
BUILD_COMMAND ""
|
BUILD_COMMAND ""
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG ae8d51c
|
GIT_TAG 565ca21
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG ffc2e6f
|
GIT_TAG 4378702
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -223,31 +223,53 @@ endif(${BUILD_WITH_LEVELDB})
|
||||||
# rocksdb
|
# rocksdb
|
||||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
if(${TD_LINUX})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||||
|
endif(${TD_LINUX})
|
||||||
|
|
||||||
|
if(${TD_DARWIN})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
if (${TD_WINDOWS})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
|
if(${TD_DARWIN})
|
||||||
|
option(HAVE_THREAD_LOCAL "" OFF)
|
||||||
|
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||||
|
option(WITH_PERF_CONTEXT "" OFF)
|
||||||
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
if(${TD_WINDOWS})
|
||||||
|
option(WITH_JNI "" ON)
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
if(${TD_WINDOWS})
|
||||||
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
|
option(WITH_FALLOCATE "" OFF)
|
||||||
|
option(WITH_JEMALLOC "" OFF)
|
||||||
|
option(WITH_GFLAGS "" OFF)
|
||||||
|
option(PORTABLE "" ON)
|
||||||
|
option(WITH_LIBURING "" OFF)
|
||||||
|
option(FAIL_ON_WARNINGS OFF)
|
||||||
|
|
||||||
option(WITH_TESTS "" OFF)
|
option(WITH_TESTS "" OFF)
|
||||||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||||
option(WITH_TOOLS "" OFF)
|
option(WITH_TOOLS "" OFF)
|
||||||
option(WITH_LIBURING "" OFF)
|
option(WITH_LIBURING "" OFF)
|
||||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
|
||||||
option(WITH_PERF_CONTEXT "" OFF)
|
|
||||||
option(FAIL_ON_WARNINGS "" OFF)
|
|
||||||
#option(WITH_JEMALLOC "" ON)
|
|
||||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||||
IF (${TD_WINDOWS})
|
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
|
||||||
endif(${TD_WINDOWS})
|
|
||||||
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
rocksdb
|
rocksdb
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||||
)
|
)
|
||||||
IF (${TD_DARWIN})
|
|
||||||
target_compile_options(
|
|
||||||
rocksdb
|
|
||||||
PRIVATE -Wno-unused-private-field
|
|
||||||
)
|
|
||||||
endif(${TD_DARWIN})
|
|
||||||
endif(${BUILD_WITH_ROCKSDB})
|
endif(${BUILD_WITH_ROCKSDB})
|
||||||
|
|
||||||
# lucene
|
# lucene
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <bits/stdint-uintn.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
@ -9,40 +10,307 @@
|
||||||
const char DBPath[] = "rocksdb_c_simple_example";
|
const char DBPath[] = "rocksdb_c_simple_example";
|
||||||
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
|
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
|
||||||
|
|
||||||
|
static const int32_t endian_test_var = 1;
|
||||||
|
#define IS_LITTLE_ENDIAN() (*(uint8_t *)(&endian_test_var) != 0)
|
||||||
|
#define TD_RT_ENDIAN() (IS_LITTLE_ENDIAN() ? TD_LITTLE_ENDIAN : TD_BIG_ENDIAN)
|
||||||
|
|
||||||
|
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
|
||||||
|
static void *taosDecodeFixedU64(const void *buf, uint64_t *value) {
|
||||||
|
if (IS_LITTLE_ENDIAN()) {
|
||||||
|
memcpy(value, buf, sizeof(*value));
|
||||||
|
} else {
|
||||||
|
((uint8_t *)value)[7] = ((uint8_t *)buf)[0];
|
||||||
|
((uint8_t *)value)[6] = ((uint8_t *)buf)[1];
|
||||||
|
((uint8_t *)value)[5] = ((uint8_t *)buf)[2];
|
||||||
|
((uint8_t *)value)[4] = ((uint8_t *)buf)[3];
|
||||||
|
((uint8_t *)value)[3] = ((uint8_t *)buf)[4];
|
||||||
|
((uint8_t *)value)[2] = ((uint8_t *)buf)[5];
|
||||||
|
((uint8_t *)value)[1] = ((uint8_t *)buf)[6];
|
||||||
|
((uint8_t *)value)[0] = ((uint8_t *)buf)[7];
|
||||||
|
}
|
||||||
|
|
||||||
|
return POINTER_SHIFT(buf, sizeof(*value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Fixed U64
|
||||||
|
static int32_t taosEncodeFixedU64(void **buf, uint64_t value) {
|
||||||
|
if (buf != NULL) {
|
||||||
|
if (IS_LITTLE_ENDIAN()) {
|
||||||
|
memcpy(*buf, &value, sizeof(value));
|
||||||
|
} else {
|
||||||
|
((uint8_t *)(*buf))[0] = value & 0xff;
|
||||||
|
((uint8_t *)(*buf))[1] = (value >> 8) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[2] = (value >> 16) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[3] = (value >> 24) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[4] = (value >> 32) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[5] = (value >> 40) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[6] = (value >> 48) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[7] = (value >> 56) & 0xff;
|
||||||
|
}
|
||||||
|
|
||||||
|
*buf = POINTER_SHIFT(*buf, sizeof(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
return (int32_t)sizeof(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct KV {
|
||||||
|
uint64_t k1;
|
||||||
|
uint64_t k2;
|
||||||
|
} KV;
|
||||||
|
|
||||||
|
int kvSerial(KV *kv, char *buf) {
|
||||||
|
int len = 0;
|
||||||
|
len += taosEncodeFixedU64((void **)&buf, kv->k1);
|
||||||
|
len += taosEncodeFixedU64((void **)&buf, kv->k2);
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
const char *kvDBName(void *name) { return "kvDBname"; }
|
||||||
|
int kvDBComp(void *state, const char *aBuf, size_t aLen, const char *bBuf, size_t bLen) {
|
||||||
|
KV w1, w2;
|
||||||
|
|
||||||
|
memset(&w1, 0, sizeof(w1));
|
||||||
|
memset(&w2, 0, sizeof(w2));
|
||||||
|
|
||||||
|
char *p1 = (char *)aBuf;
|
||||||
|
char *p2 = (char *)bBuf;
|
||||||
|
// p1 += 1;
|
||||||
|
// p2 += 1;
|
||||||
|
|
||||||
|
p1 = taosDecodeFixedU64(p1, &w1.k1);
|
||||||
|
p2 = taosDecodeFixedU64(p2, &w2.k1);
|
||||||
|
|
||||||
|
p1 = taosDecodeFixedU64(p1, &w1.k2);
|
||||||
|
p2 = taosDecodeFixedU64(p2, &w2.k2);
|
||||||
|
|
||||||
|
if (w1.k1 < w2.k1) {
|
||||||
|
return -1;
|
||||||
|
} else if (w1.k1 > w2.k1) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (w1.k2 < w2.k2) {
|
||||||
|
return -1;
|
||||||
|
} else if (w1.k2 > w2.k2) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int kvDeserial(KV *kv, char *buf) {
|
||||||
|
char *p1 = (char *)buf;
|
||||||
|
// p1 += 1;
|
||||||
|
p1 = taosDecodeFixedU64(p1, &kv->k1);
|
||||||
|
p1 = taosDecodeFixedU64(p1, &kv->k2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char const *argv[]) {
|
int main(int argc, char const *argv[]) {
|
||||||
rocksdb_t *db;
|
rocksdb_t *db;
|
||||||
rocksdb_backup_engine_t *be;
|
rocksdb_backup_engine_t *be;
|
||||||
rocksdb_options_t * options = rocksdb_options_create();
|
|
||||||
rocksdb_options_set_create_if_missing(options, 1);
|
|
||||||
|
|
||||||
// open DB
|
|
||||||
char *err = NULL;
|
char *err = NULL;
|
||||||
db = rocksdb_open(options, DBPath, &err);
|
const char *path = "/tmp/db";
|
||||||
|
|
||||||
// Write
|
rocksdb_options_t *opt = rocksdb_options_create();
|
||||||
rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
|
rocksdb_options_set_create_if_missing(opt, 1);
|
||||||
rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
|
rocksdb_options_set_create_missing_column_families(opt, 1);
|
||||||
|
|
||||||
// Read
|
// Read
|
||||||
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||||
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||||
|
int len = 1;
|
||||||
char buf[256] = {0};
|
char buf[256] = {0};
|
||||||
size_t vallen = 0;
|
size_t vallen = 0;
|
||||||
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||||
snprintf(buf, vallen + 5, "val:%s", val);
|
snprintf(buf, vallen + 5, "val:%s", val);
|
||||||
printf("%ld %ld %s\n", strlen(val), vallen, buf);
|
printf("%ld %ld %s\n", strlen(val), vallen, buf);
|
||||||
|
|
||||||
// Update
|
char **cfName = calloc(len, sizeof(char *));
|
||||||
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
|
for (int i = 0; i < len; i++) {
|
||||||
|
cfName[i] = "test";
|
||||||
|
}
|
||||||
|
const rocksdb_options_t **cfOpt = malloc(len * sizeof(rocksdb_options_t *));
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
cfOpt[i] = rocksdb_options_create_copy(opt);
|
||||||
|
if (i != 0) {
|
||||||
|
rocksdb_comparator_t *comp = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||||
|
rocksdb_options_set_comparator((rocksdb_options_t *)cfOpt[i], comp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Delete
|
rocksdb_column_family_handle_t **cfHandle = malloc(len * sizeof(rocksdb_column_family_handle_t *));
|
||||||
rocksdb_delete(db, writeoptions, "key", 3, &err);
|
db = rocksdb_open_column_families(opt, path, len, (const char *const *)cfName, cfOpt, cfHandle, &err);
|
||||||
|
|
||||||
// Read again
|
{
|
||||||
val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||||
printf("val:%s\n", val);
|
size_t vlen = 0;
|
||||||
|
|
||||||
|
char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
|
||||||
|
printf("Get value %s, and len = %d\n", v, (int)vlen);
|
||||||
|
}
|
||||||
|
|
||||||
|
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
|
||||||
|
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
|
||||||
|
rocksdb_writebatch_put_cf(wBatch, cfHandle[0], "key", strlen("key"), "value", strlen("value"));
|
||||||
|
rocksdb_write(db, wOpt, wBatch, &err);
|
||||||
|
|
||||||
|
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||||
|
size_t vlen = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
|
||||||
|
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = (100 - i) % 26, .k2 = i % 26};
|
||||||
|
kvSerial(&kv, buf);
|
||||||
|
rocksdb_writebatch_put_cf(wBatch, cfHandle[1], buf, sizeof(kv), "value", strlen("value"));
|
||||||
|
}
|
||||||
|
rocksdb_write(db, wOpt, wBatch, &err);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
{
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = 0, .k2 = 0};
|
||||||
|
kvSerial(&kv, buf);
|
||||||
|
char *v = rocksdb_get_cf(db, rOpt, cfHandle[1], buf, sizeof(kv), &vlen, &err);
|
||||||
|
printf("Get value %s, and len = %d, xxxx\n", v, (int)vlen);
|
||||||
|
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||||
|
rocksdb_iter_seek_to_first(iter);
|
||||||
|
int i = 0;
|
||||||
|
while (rocksdb_iter_valid(iter)) {
|
||||||
|
size_t klen, vlen;
|
||||||
|
const char *key = rocksdb_iter_key(iter, &klen);
|
||||||
|
const char *value = rocksdb_iter_value(iter, &vlen);
|
||||||
|
KV kv;
|
||||||
|
kvDeserial(&kv, (char *)key);
|
||||||
|
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
|
||||||
|
i++;
|
||||||
|
rocksdb_iter_next(iter);
|
||||||
|
}
|
||||||
|
rocksdb_iter_destroy(iter);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = 0, .k2 = 0};
|
||||||
|
int len = kvSerial(&kv, buf);
|
||||||
|
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||||
|
rocksdb_iter_seek(iter, buf, len);
|
||||||
|
if (!rocksdb_iter_valid(iter)) {
|
||||||
|
printf("invalid iter");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = 100, .k2 = 0};
|
||||||
|
int len = kvSerial(&kv, buf);
|
||||||
|
|
||||||
|
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||||
|
rocksdb_iter_seek(iter, buf, len);
|
||||||
|
if (!rocksdb_iter_valid(iter)) {
|
||||||
|
printf("invalid iter\n");
|
||||||
|
rocksdb_iter_seek_for_prev(iter, buf, len);
|
||||||
|
if (!rocksdb_iter_valid(iter)) {
|
||||||
|
printf("stay invalid iter\n");
|
||||||
|
} else {
|
||||||
|
size_t klen = 0, vlen = 0;
|
||||||
|
const char *key = rocksdb_iter_key(iter, &klen);
|
||||||
|
const char *value = rocksdb_iter_value(iter, &vlen);
|
||||||
|
KV kv;
|
||||||
|
kvDeserial(&kv, (char *)key);
|
||||||
|
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
|
||||||
|
// printf("Get value %s, and len = %d\n", v, (int)vlen);
|
||||||
|
|
||||||
|
rocksdb_column_family_handle_destroy(cfHandle[0]);
|
||||||
|
rocksdb_column_family_handle_destroy(cfHandle[1]);
|
||||||
rocksdb_close(db);
|
rocksdb_close(db);
|
||||||
|
|
||||||
|
// {
|
||||||
|
// // rocksdb_options_t *Options = rocksdb_options_create();
|
||||||
|
// db = rocksdb_open(comm, path, &err);
|
||||||
|
// if (db != NULL) {
|
||||||
|
// rocksdb_options_t *cfo = rocksdb_options_create_copy(comm);
|
||||||
|
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||||
|
// rocksdb_options_set_comparator(cfo, cmp1);
|
||||||
|
|
||||||
|
// rocksdb_column_family_handle_t *handle = rocksdb_create_column_family(db, cfo, "cf1", &err);
|
||||||
|
|
||||||
|
// rocksdb_column_family_handle_destroy(handle);
|
||||||
|
// rocksdb_close(db);
|
||||||
|
// db = NULL;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// int ncf = 2;
|
||||||
|
|
||||||
|
// rocksdb_column_family_handle_t **pHandle = malloc(ncf * sizeof(rocksdb_column_family_handle_t *));
|
||||||
|
|
||||||
|
// {
|
||||||
|
// rocksdb_options_t *options = rocksdb_options_create_copy(comm);
|
||||||
|
|
||||||
|
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||||
|
// rocksdb_options_t *dbOpts1 = rocksdb_options_create_copy(comm);
|
||||||
|
// rocksdb_options_t *dbOpts2 = rocksdb_options_create_copy(comm);
|
||||||
|
// rocksdb_options_set_comparator(dbOpts2, cmp1);
|
||||||
|
// // rocksdb_column_family_handle_t *cf = rocksdb_create_column_family(db, dbOpts1, "cmp1", &err);
|
||||||
|
|
||||||
|
// const char *pName[] = {"default", "cf1"};
|
||||||
|
|
||||||
|
// const rocksdb_options_t **pOpts = malloc(ncf * sizeof(rocksdb_options_t *));
|
||||||
|
// pOpts[0] = dbOpts1;
|
||||||
|
// pOpts[1] = dbOpts2;
|
||||||
|
|
||||||
|
// rocksdb_options_t *allOptions = rocksdb_options_create_copy(comm);
|
||||||
|
// db = rocksdb_open_column_families(allOptions, "test", ncf, pName, pOpts, pHandle, &err);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// // rocksdb_options_t *options = rocksdb_options_create();
|
||||||
|
// // rocksdb_options_set_create_if_missing(options, 1);
|
||||||
|
|
||||||
|
// // //rocksdb_open_column_families(const rocksdb_options_t *options, const char *name, int num_column_families,
|
||||||
|
// // const char *const *column_family_names,
|
||||||
|
// // const rocksdb_options_t *const *column_family_options,
|
||||||
|
// // rocksdb_column_family_handle_t **column_family_handles, char **errptr);
|
||||||
|
|
||||||
|
// for (int i = 0; i < 100; i++) {
|
||||||
|
// char buf[128] = {0};
|
||||||
|
|
||||||
|
// rocksdb_writeoptions_t *wopt = rocksdb_writeoptions_create();
|
||||||
|
// KV kv = {.k1 = i, .k2 = i};
|
||||||
|
// kvSerial(&kv, buf);
|
||||||
|
// rocksdb_put_cf(db, wopt, pHandle[0], buf, strlen(buf), (const char *)&i, sizeof(i), &err);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// rocksdb_close(db);
|
||||||
|
// Write
|
||||||
|
// rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
|
||||||
|
// rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
|
||||||
|
|
||||||
|
//// Read
|
||||||
|
// rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||||
|
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||||
|
// size_t vallen = 0;
|
||||||
|
// char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||||
|
// printf("val:%s\n", val);
|
||||||
|
|
||||||
|
//// Update
|
||||||
|
//// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
|
||||||
|
|
||||||
|
//// Delete
|
||||||
|
// rocksdb_delete(db, writeoptions, "key", 3, &err);
|
||||||
|
|
||||||
|
//// Read again
|
||||||
|
// val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||||
|
// printf("val:%s\n", val);
|
||||||
|
|
||||||
|
// rocksdb_close(db);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
|
||||||
slug: /
|
slug: /
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
|
||||||
|
|
||||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
|
||||||
|
|
||||||
## Competitive Advantages
|
## Competitive Advantages
|
||||||
|
|
||||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
|
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
|
||||||
|
|
||||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
|
||||||
|
|
||||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||||
|
|
||||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||||
|
|
||||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||||
|
|
||||||
|
@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
||||||
|
|
||||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||||
|
|
||||||
### System Maintenance Requirements
|
### System Maintenance Requirements
|
||||||
|
@ -123,11 +123,10 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
||||||
|
|
||||||
## Comparison with other databases
|
## Comparison with other databases
|
||||||
|
|
||||||
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
|
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
|
||||||
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
|
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
|
||||||
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
||||||
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
||||||
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
|
|
||||||
|
|
||||||
## More readings
|
## More readings
|
||||||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||||
|
|
|
@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
|
||||||
|
|
||||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||||
|
|
||||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
|
||||||
|
|
||||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
|
||||||
|
|
||||||
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||||
|
|
||||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||||
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
|
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
|
||||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
||||||
|
|
||||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||||
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
||||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ sudo apt-get install tdengine
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
This installation method is supported only for Debian and Ubuntu.
|
This installation method is supported only for Debian and Ubuntu.
|
||||||
::::
|
:::
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="Windows" value="windows">
|
<TabItem label="Windows" value="windows">
|
||||||
|
|
||||||
|
|
|
@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
||||||
```
|
```
|
||||||
|
|
||||||
More configuration about connection,please refer to [Java Connector](/reference/connector/java)
|
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```php title="原生连接"
|
```php title=""native"
|
||||||
{{#include docs/examples/php/connect.php}}
|
{{#include docs/examples/php/connect.php}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
|
||||||
|
|
||||||
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
|
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
|
||||||
|
|
||||||
Key differences:
|
Key differences:
|
||||||
|
|
||||||
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
||||||
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
||||||
|
@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
|
||||||
<TabItem label="R" value="r">
|
<TabItem label="R" value="r">
|
||||||
|
|
||||||
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
||||||
2. Install the dependency package `RJDBC`:
|
2. Install the dependency package `RJDBC`:
|
||||||
|
|
||||||
```R
|
```R
|
||||||
install.packages("RJDBC")
|
install.packages("RJDBC")
|
||||||
|
@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="PHP" value="php">
|
<TabItem label="PHP" value="php">
|
||||||
|
|
||||||
**Download Source Code Package and Unzip:**
|
**Download Source Code Package and Unzip: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||||
|
@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
||||||
|
|
||||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||||
|
|
||||||
**Non-Swoole Environment:**
|
**Non-Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure && make -j && make install
|
phpize && ./configure && make -j && make install
|
||||||
```
|
```
|
||||||
|
|
||||||
**Specify TDengine Location:**
|
**Specify TDengine Location: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||||
|
@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
||||||
> `--with-tdengine-dir=` is followed by the TDengine installation location.
|
> `--with-tdengine-dir=` is followed by the TDengine installation location.
|
||||||
> This way is useful in case TDengine location can't be found automatically or macOS.
|
> This way is useful in case TDengine location can't be found automatically or macOS.
|
||||||
|
|
||||||
**Swoole Environment:**
|
**Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --enable-swoole && make -j && make install
|
phpize && ./configure --enable-swoole && make -j && make install
|
||||||
|
@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
|
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
|
@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
||||||
|
|
||||||
## Query Examples
|
## Query Examples
|
||||||
|
|
||||||
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query SQL:
|
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
||||||
|
|
|
@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
|
||||||
|
|
||||||
## Query Examples
|
## Query Examples
|
||||||
|
|
||||||
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query SQL:
|
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||||
|
|
|
@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
|
||||||
|
|
||||||
## Query Examples
|
## Query Examples
|
||||||
|
|
||||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query SQL:
|
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||||
|
|
|
@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
|
||||||
|
|
||||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||||
|
|
||||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
|
||||||
|
|
||||||
## Sample Programs
|
## Sample Programs
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ The main Program is responsible for:
|
||||||
3. Start reading threads
|
3. Start reading threads
|
||||||
4. Output writing speed every 10 seconds
|
4. Output writing speed every 10 seconds
|
||||||
|
|
||||||
The main program provides 4 parameters for tuning:
|
The main program provides 4 parameters for tuning:
|
||||||
|
|
||||||
1. The number of reading threads, default value is 1
|
1. The number of reading threads, default value is 1
|
||||||
2. The number of writing threads, default value is 2
|
2. The number of writing threads, default value is 2
|
||||||
|
@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
||||||
|
|
||||||
If you want to launch the sample program on a remote server, please follow below steps:
|
If you want to launch the sample program on a remote server, please follow below steps:
|
||||||
|
|
||||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
|
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
|
||||||
```
|
```
|
||||||
mvn package
|
mvn package
|
||||||
```
|
```
|
||||||
|
@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
|
||||||
pip3 install faster-fifo
|
pip3 install faster-fifo
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
|
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
|
||||||
|
|
||||||
4. Execute the program
|
4. Execute the program
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
### python Kafka 客户端
|
### python Kafka client
|
||||||
|
|
||||||
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
|
||||||
<details>
|
<details>
|
||||||
<summary>kafka_example_consumer</summary>
|
<summary>kafka_example_consumer</summary>
|
||||||
|
|
||||||
`kafka_example_consumer` is `consumer`,which is responsible for consuming data from kafka and writing it to TDengine.
|
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
{{#include docs/examples/python/kafka_example_consumer.py}}
|
{{#include docs/examples/python/kafka_example_consumer.py}}
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
```rust
|
||||||
|
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
|
||||||
|
```
|
|
@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
||||||
|
|
||||||
- Query on single column or multiple columns
|
- Query on single column or multiple columns
|
||||||
- Filter on tags or data columns:>, <, =, <\>, like
|
- Filter on tags or data columns: >, <, =, <\>, like
|
||||||
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
|
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
|
||||||
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
|
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
|
||||||
- Arithmetic on columns of numeric types or aggregate results
|
- Arithmetic on columns of numeric types or aggregate results
|
||||||
|
@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
1. With either REST connection or native connection, the above sample code works well.
|
1. With either REST connection or native connection, the above sample code works well.
|
||||||
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
|
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
||||||
|
|
||||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||||
|
|
||||||
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
|
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||||
|
|
||||||
## Data Schema and API
|
## Data Schema and API
|
||||||
|
|
||||||
|
@ -285,16 +285,15 @@ You configure the following parameters when creating a consumer:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||||
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.ip` | string | IP address of the server side | |
|
||||||
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.user` | string | User Name | |
|
||||||
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.pass` | string | Password | |
|
||||||
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.port` | string | Port of the server side | |
|
||||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
||||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||||
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
|
|
||||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||||
|
|
||||||
The method of specifying these parameters depends on the language used:
|
The method of specifying these parameters depends on the language used:
|
||||||
|
@ -312,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
||||||
tmq_conf_set(conf, "td.connect.user", "root");
|
tmq_conf_set(conf, "td.connect.user", "root");
|
||||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
|
||||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||||
|
|
||||||
|
@ -327,6 +325,7 @@ Java programs use the following parameters:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" |
|
||||||
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
||||||
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
||||||
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
||||||
|
@ -368,7 +367,6 @@ conf := &tmq.ConfigMap{
|
||||||
"td.connect.port": "6030",
|
"td.connect.port": "6030",
|
||||||
"client.id": "test_tmq_c",
|
"client.id": "test_tmq_c",
|
||||||
"enable.auto.commit": "false",
|
"enable.auto.commit": "false",
|
||||||
"experimental.snapshot.enable": "true",
|
|
||||||
"msg.with.table.name": "true",
|
"msg.with.table.name": "true",
|
||||||
}
|
}
|
||||||
consumer, err := NewConsumer(conf)
|
consumer, err := NewConsumer(conf)
|
||||||
|
@ -402,23 +400,6 @@ from taos.tmq import Consumer
|
||||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
```
|
```
|
||||||
|
|
||||||
Python programs use the following parameters:
|
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
|
||||||
|:---------:|:----:|:-----------:|:-------:|
|
|
||||||
| `td.connect.ip` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.user` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.pass` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.port` | string | Used in establishing a connection||
|
|
||||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
|
||||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
|
||||||
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
|
||||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
|
||||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
|
||||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
|
||||||
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
|
||||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem label="Node.JS" value="Node.JS">
|
<TabItem label="Node.JS" value="Node.JS">
|
||||||
|
|
|
@ -254,7 +254,7 @@ Create the UDF:
|
||||||
```bash
|
```bash
|
||||||
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
||||||
```
|
```
|
||||||
Use the UDF in the query:
|
Use the UDF in the query:
|
||||||
```bash
|
```bash
|
||||||
select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||||
```
|
```
|
||||||
|
@ -271,9 +271,9 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||||
## Implement a UDF in Python
|
## Implement a UDF in Python
|
||||||
|
|
||||||
Implement the specified interface functions when implementing a UDF in Python.
|
Implement the specified interface functions when implementing a UDF in Python.
|
||||||
- implement `process` function for the scalar UDF。
|
- implement `process` function for the scalar UDF.
|
||||||
- implement `start`, `reduce`, `finish` for the aggregate UDF。
|
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
||||||
- implement `init` for initialization and `destroy` for termination。
|
- implement `init` for initialization and `destroy` for termination.
|
||||||
|
|
||||||
### Implement a Scalar UDF in Python
|
### Implement a Scalar UDF in Python
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
|
||||||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
||||||
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
||||||
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
||||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
- Epoch Time: timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||||
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||||
|
|
||||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||||
|
@ -25,7 +25,7 @@ CREATE DATABASE db_name PRECISION 'ns';
|
||||||
In TDengine, the data types below can be used when specifying a column or tag.
|
In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
|
|
||||||
| # | **type** | **Bytes** | **Description** |
|
| # | **type** | **Bytes** | **Description** |
|
||||||
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| --- | :---------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||||
|
@ -35,7 +35,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||||
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
| 10 | SMALLINT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||||
|
|
|
@ -72,8 +72,8 @@ database_option: {
|
||||||
- 0: The database can contain multiple supertables.
|
- 0: The database can contain multiple supertables.
|
||||||
- 1: The database can contain only one supertable.
|
- 1: The database can contain only one supertable.
|
||||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||||
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TABLE_SUFFIX:The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
||||||
SHOW STABLES [LIKE tb_name_wildcard];
|
SHOW STABLES [LIKE tb_name_wildcard];
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
|
The preceding SQL statement shows all supertables in the current TDengine database.
|
||||||
|
|
||||||
### View the CREATE Statement for a Supertable
|
### View the CREATE Statement for a Supertable
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
## Automatically Create Table When Inserting
|
## Automatically Create Table When Inserting
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
|
||||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||||
```
|
```
|
||||||
|
|
||||||
For sub-table and super table:
|
For sub-table and super table:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
|
|
|
@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
Prior to TDengine 3.0.3.0 (excluded),only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
|
||||||
1. The syntax of creating an index
|
1. The syntax of creating an index
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
||||||
|
|
|
@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
|
||||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||||
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
||||||
|
|
||||||
|
@ -626,7 +626,7 @@ algo_type: {
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||||
|
@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
|
||||||
ELAPSED(ts_primary_key [, time_unit])
|
ELAPSED(ts_primary_key [, time_unit])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||||
|
|
||||||
**Return value type**: Double if the input value is not NULL;
|
**Return value type**: Double if the input value is not NULL;
|
||||||
|
|
||||||
|
@ -680,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
||||||
|
|
||||||
**Applicable tables**: table, STable, outer in nested query
|
**Applicable tables**: table, STable, outer in nested query
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||||
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
||||||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||||
|
@ -758,7 +758,7 @@ SUM(expr)
|
||||||
HYPERLOGLOG(expr)
|
HYPERLOGLOG(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:
|
**Description**:
|
||||||
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
||||||
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||||
|
|
||||||
|
@ -772,10 +772,10 @@ HYPERLOGLOG(expr)
|
||||||
### HISTOGRAM
|
### HISTOGRAM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
HISTOGRAM(expr,bin_type, bin_description, normalized)
|
HISTOGRAM(expr, bin_type, bin_description, normalized)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:Returns count of data points in user-specified ranges.
|
**Description**: Returns count of data points in user-specified ranges.
|
||||||
|
|
||||||
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
|
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
|
||||||
|
|
||||||
|
@ -783,18 +783,18 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||||
|
|
||||||
**Applicable table types**: table, STable
|
**Applicable table types**: table, STable
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
|
||||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
|
||||||
- "user_input": "[1, 3, 5, 7]":
|
- "user_input": "[1, 3, 5, 7]":
|
||||||
User specified bin values.
|
User specified bin values.
|
||||||
|
|
||||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
|
||||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
||||||
|
|
||||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
|
||||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
||||||
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
|
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
|
||||||
|
|
||||||
|
@ -867,10 +867,16 @@ FIRST(expr)
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INTERP(expr)
|
INTERP(expr [, ignore_null_values])
|
||||||
|
|
||||||
|
ignore_null_values: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0.
|
||||||
|
|
||||||
|
|
||||||
**Return value type**: Same as the column being operated upon
|
**Return value type**: Same as the column being operated upon
|
||||||
|
|
||||||
|
@ -886,7 +892,7 @@ INTERP(expr)
|
||||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||||
|
|
||||||
|
@ -1107,7 +1113,7 @@ ignore_negative: {
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
||||||
|
|
||||||
### DIFF
|
### DIFF
|
||||||
|
|
||||||
|
@ -1131,7 +1137,7 @@ ignore_negative: {
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
|
||||||
|
|
||||||
|
|
||||||
### IRATE
|
### IRATE
|
||||||
|
@ -1183,7 +1189,7 @@ STATECOUNT(expr, oper, val)
|
||||||
**Applicable parameter values**:
|
**Applicable parameter values**:
|
||||||
|
|
||||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||||
- val : Numeric types
|
- val: Numeric types
|
||||||
|
|
||||||
**Return value type**: Integer
|
**Return value type**: Integer
|
||||||
|
|
||||||
|
@ -1210,7 +1216,7 @@ STATEDURATION(expr, oper, val, unit)
|
||||||
**Applicable parameter values**:
|
**Applicable parameter values**:
|
||||||
|
|
||||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||||
- val : Numeric types
|
- val: Numeric types
|
||||||
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
||||||
|
|
||||||
**Return value type**: Integer
|
**Return value type**: Integer
|
||||||
|
|
|
@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
|
||||||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||||
|
|
||||||
1. NONE: No fill (the default fill mode)
|
1. NONE: No fill (the default fill mode)
|
||||||
2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
||||||
3. PREV:Fill with the previous non-NULL value, `FILL(PREV)`
|
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||||
4. NULL:Fill with NULL, `FILL(NULL)`
|
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||||
5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)`
|
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||||
6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)`
|
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
|
||||||
|
|
||||||
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
||||||
|
|
||||||
1. NULL_F: Fill `NULL` by force
|
1. NULL_F: Fill `NULL` by force
|
||||||
2. VALUE_F: Fill `VALUE` by force
|
2. VALUE_F: Fill `VALUE` by force
|
||||||
|
|
||||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
|
||||||
|
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||||
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
||||||
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
||||||
|
|
||||||
|
@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
|
||||||
|
|
||||||
There are two kinds of time windows: sliding window and flip time/tumbling window.
|
There are two kinds of time windows: sliding window and flip time/tumbling window.
|
||||||
|
|
||||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
|
||||||
|
|
||||||
### State Window
|
### State Window
|
||||||
|
|
||||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
|
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
|
||||||
|
|
||||||
### Session Window
|
### Session Window
|
||||||
|
|
||||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
A table of intelligent meters can be created by the SQL statement below:
|
A table of intelligent meters can be created by the SQL statement below:
|
||||||
|
|
||||||
```
|
```
|
||||||
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
|
|
|
@ -112,7 +112,7 @@ SHOW STREAMS;
|
||||||
|
|
||||||
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
||||||
|
|
||||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
|
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
|
||||||
|
|
||||||
1. AT_ONCE: triggers on write
|
1. AT_ONCE: triggers on write
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
|
||||||
|
|
||||||
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
|
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
|
||||||
|
|
||||||
- JSON format:
|
- JSON format:
|
||||||
|
|
||||||
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
||||||
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
||||||
|
|
|
@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
|
||||||
|
|
||||||
1. If there are escape characters in identifiers (database name, table name, column name)
|
1. If there are escape characters in identifiers (database name, table name, column name)
|
||||||
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
|
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
|
||||||
- Identifier quoted with ``: Original content is kept, no escaping
|
- Identifier quoted with ``: Original content is kept, no escaping
|
||||||
2. If there are escape characters in values
|
2. If there are escape characters in values
|
||||||
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
|
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
|
||||||
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
|
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
|
||||||
|
|
|
@ -184,7 +184,7 @@ Provides information about standard tables and subtables.
|
||||||
|
|
||||||
## INS_COLUMNS
|
## INS_COLUMNS
|
||||||
|
|
||||||
| # | **列名** | **数据类型** | **说明** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------- | ---------------------- |
|
| --- | :---------: | ------------- | ---------------------- |
|
||||||
| 1 | table_name | BINARY(192) | Table name |
|
| 1 | table_name | BINARY(192) | Table name |
|
||||||
| 2 | db_name | BINARY(64) | Database name |
|
| 2 | db_name | BINARY(64) | Database name |
|
||||||
|
|
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
||||||
|
|
||||||
Shows information about qnodes in the system.
|
Shows information about qnodes in the system.
|
||||||
|
|
||||||
|
## SHOW QUERIES
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW QUERIES;
|
||||||
|
```
|
||||||
|
|
||||||
|
Shows the queries in progress in the system.
|
||||||
|
|
||||||
## SHOW SCORES
|
## SHOW SCORES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -179,7 +187,7 @@ SHOW TABLE DISTRIBUTED table_name;
|
||||||
|
|
||||||
Shows how table data is distributed.
|
Shows how table data is distributed.
|
||||||
|
|
||||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
show table distributed d0\G;
|
show table distributed d0\G;
|
||||||
|
@ -189,7 +197,7 @@ show table distributed d0\G;
|
||||||
<summary> Show Example </summary>
|
<summary> Show Example </summary>
|
||||||
<pre><code>
|
<pre><code>
|
||||||
*************************** 1.row ***************************
|
*************************** 1.row ***************************
|
||||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||||
|
|
||||||
Total_Blocks : Table `d0` contains total 5 blocks
|
Total_Blocks : Table `d0` contains total 5 blocks
|
||||||
|
|
||||||
|
@ -204,20 +212,20 @@ _block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Ave
|
||||||
|
|
||||||
Total_Rows: Table `d0` contains 20,000 rows
|
Total_Rows: Table `d0` contains 20,000 rows
|
||||||
|
|
||||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||||
|
|
||||||
MinRows: The minimum number of rows in a block is 3,616
|
MinRows: The minimum number of rows in a block is 3,616
|
||||||
|
|
||||||
MaxRows: The maximum number of rows in a block is 4,096B
|
MaxRows: The maximum number of rows in a block is 4,096B
|
||||||
|
|
||||||
Average_Rows: The average number of rows in a block is 4,000
|
Average_Rows: The average number of rows in a block is 4,000
|
||||||
|
|
||||||
*************************** 3.row ***************************
|
*************************** 3.row ***************************
|
||||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||||
|
|
||||||
Total_Tables: The number of child tables, 1 in this example
|
Total_Tables: The number of child tables, 1 in this example
|
||||||
|
|
||||||
Total_Files: The number of files storing the table's data, 2 in this example
|
Total_Files: The number of files storing the table's data, 2 in this example
|
||||||
|
|
||||||
*************************** 4.row ***************************
|
*************************** 4.row ***************************
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ
|
||||||
```
|
```
|
||||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||||
- function_name: The scalar function name to be used in the SQL statement
|
- function_name: The scalar function name to be used in the SQL statement
|
||||||
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
|
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language.
|
||||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||||
- output_type: The data type of the results of the UDF.
|
- output_type: The data type of the results of the UDF.
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
|
||||||
- Information that you input is given in lowercase.
|
- Information that you input is given in lowercase.
|
||||||
- \[ \] means optional input, excluding [] itself.
|
- \[ \] means optional input, excluding [] itself.
|
||||||
- | means one of a few options, excluding | itself.
|
- | means one of a few options, excluding | itself.
|
||||||
- … means the item prior to it can be repeated multiple times.
|
- ... means the item prior to it can be repeated multiple times.
|
||||||
|
|
||||||
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||||
|
|
||||||
|
|
|
@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
|
||||||
chmod +x TDinsight.sh
|
chmod +x TDinsight.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Prepare:
|
Prepare:
|
||||||
|
|
||||||
1. TDengine Server
|
1. TDengine Server
|
||||||
|
|
||||||
- The URL of REST service:for example `http://localhost:6041` if TDengine is deployed locally
|
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
|
||||||
- User name and password
|
- User name and password
|
||||||
|
|
||||||
2. Grafana Alert Notification
|
2. Grafana Alert Notification
|
||||||
|
@ -36,7 +36,7 @@ You can use below command to setup Grafana alert notification.
|
||||||
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||||
|
@ -274,7 +274,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -288,7 +288,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -302,7 +302,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||||
|
@ -330,7 +330,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent table
|
### taosadapter\_system\_cpu\_percent table
|
||||||
|
@ -340,6 +340,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||mertic value|
|
|gauge|DOUBLE||mertic value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
|
|
|
@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
|
||||||
|
|
||||||
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||||
|
|
||||||
Diagnostic steps:
|
Diagnostic steps:
|
||||||
|
|
||||||
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
||||||
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
||||||
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
||||||
|
|
||||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||||
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
||||||
|
|
||||||
Output of the server side for the example is below:
|
Output of the server side for the example is below:
|
||||||
|
|
|
@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
|
||||||
|
|
||||||
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
||||||
|
|
||||||
- authentication information is shown below:
|
- authentication information is shown below:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Authorization: Taosd <TOKEN>
|
Authorization: Taosd <TOKEN>
|
||||||
```
|
```
|
||||||
|
|
||||||
- Basic authentication information is shown below:
|
- Basic authentication information is shown below:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Authorization: Basic <TOKEN>
|
Authorization: Basic <TOKEN>
|
||||||
|
|
|
@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
|
||||||
|
|
||||||
After TDengine server or client installation, `taos.h` is located at
|
After TDengine server or client installation, `taos.h` is located at
|
||||||
|
|
||||||
- Linux:`/usr/local/taos/include`
|
- Linux: usr/local/taos/include`
|
||||||
- Windows:`C:\TDengine\include`
|
- Windows: C:\TDengine\include`
|
||||||
- macOS:`/usr/local/include`
|
- macOS: usr/local/include`
|
||||||
|
|
||||||
The dynamic libraries for the TDengine client driver are located in.
|
The dynamic libraries for the TDengine client driver are located in.
|
||||||
|
|
||||||
|
@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
||||||
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
||||||
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
||||||
|
|
||||||
schemaless 其他相关的接口
|
schemaless interfaces:
|
||||||
|
|
||||||
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
||||||
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
||||||
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
||||||
|
|
|
@ -36,6 +36,93 @@ REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
Please refer to [version support list](/reference/connector#version-support)
|
||||||
|
|
||||||
|
## Recent update logs
|
||||||
|
|
||||||
|
| taos-jdbcdriver version | major changes |
|
||||||
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||||
|
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
|
||||||
|
| 3.2.0 | This version has been deprecated |
|
||||||
|
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||||
|
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||||
|
| 3.0.0 | Support for TDengine 3.0 |
|
||||||
|
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||||
|
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||||
|
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||||
|
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||||
|
| 2.0.37 | Support json tags |
|
||||||
|
| 2.0.36 | Support schemaless writing |
|
||||||
|
|
||||||
|
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||||
|
|
||||||
|
### Handling exceptions
|
||||||
|
|
||||||
|
After an error is reported, the error message and error code can be obtained through SQLException.
|
||||||
|
|
||||||
|
```java
|
||||||
|
try (Statement statement = connection.createStatement()) {
|
||||||
|
// executeQuery
|
||||||
|
ResultSet resultSet = statement.executeQuery(sql);
|
||||||
|
// print result
|
||||||
|
printResult(resultSet);
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.println("ERROR Message: " + e.getMessage());
|
||||||
|
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
There are four types of error codes that the JDBC connector can report:
|
||||||
|
|
||||||
|
- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350),
|
||||||
|
- Error code of the native connection method (error code between 0x2351 and 0x2360)
|
||||||
|
- Error code of the consumer method (error code between 0x2371 and 0x2380)
|
||||||
|
- Error code of other TDengine function modules.
|
||||||
|
|
||||||
|
For specific error codes, please refer to.
|
||||||
|
|
||||||
|
| Error Code | Description | Suggested Actions |
|
||||||
|
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| 0x2301 | connection already closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. |
|
||||||
|
| 0x2302 | this operation is NOT supported currently! | The current interface does not support the connection. You can use another connection mode. |
|
||||||
|
| 0x2303 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size. |
|
||||||
|
| 0x2304 | statement is closed | The statement is closed. Check whether the statement is closed and used again, or whether the connection is normal. |
|
||||||
|
| 0x2305 | resultSet is closed | result set The result set is released. Check whether the result set is released and used again. |
|
||||||
|
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
|
||||||
|
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
|
||||||
|
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
|
||||||
|
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
|
||||||
|
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
|
||||||
|
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
|
||||||
|
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
|
||||||
|
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
|
||||||
|
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
|
||||||
|
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
|
||||||
|
| 0x2317 | | wrong request type was used in the REST connection. |
|
||||||
|
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
|
||||||
|
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||||
|
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||||
|
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||||
|
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||||
|
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||||
|
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||||
|
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
|
||||||
|
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
|
||||||
|
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
|
||||||
|
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||||
|
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
|
||||||
|
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
|
||||||
|
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
|
||||||
|
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
|
||||||
|
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
|
||||||
|
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||||
|
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||||
|
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||||
|
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||||
|
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||||
|
|
||||||
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
|
||||||
## TDengine DataType vs. Java DataType
|
## TDengine DataType vs. Java DataType
|
||||||
|
|
||||||
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows:
|
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows:
|
||||||
|
@ -82,7 +169,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.1.0</version>
|
<version>3.2.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -97,7 +184,7 @@ cd taos-connector-jdbc
|
||||||
mvn clean install -Dmaven.test.skip=true
|
mvn clean install -Dmaven.test.skip=true
|
||||||
```
|
```
|
||||||
|
|
||||||
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
|
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.2.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
@ -333,35 +420,6 @@ while(resultSet.next()){
|
||||||
|
|
||||||
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
||||||
|
|
||||||
### Handling exceptions
|
|
||||||
|
|
||||||
After an error is reported, the error message and error code can be obtained through SQLException.
|
|
||||||
|
|
||||||
```java
|
|
||||||
try (Statement statement = connection.createStatement()) {
|
|
||||||
// executeQuery
|
|
||||||
ResultSet resultSet = statement.executeQuery(sql);
|
|
||||||
// print result
|
|
||||||
printResult(resultSet);
|
|
||||||
} catch (SQLException e) {
|
|
||||||
System.out.println("ERROR Message: " + e.getMessage());
|
|
||||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
There are four types of error codes that the JDBC connector can report:
|
|
||||||
|
|
||||||
- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350),
|
|
||||||
- Error code of the native connection method (error code between 0x2351 and 0x2360)
|
|
||||||
- Error code of the consumer method (error code between 0x2371 and 0x2380)
|
|
||||||
- Error code of other TDengine function modules.
|
|
||||||
|
|
||||||
For specific error codes, please refer to.
|
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
|
||||||
|
|
||||||
### Writing data via parameter binding
|
### Writing data via parameter binding
|
||||||
|
|
||||||
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
||||||
|
@ -369,9 +427,12 @@ TDengine has significantly improved the bind APIs to support data writing (INSER
|
||||||
**Note:**
|
**Note:**
|
||||||
|
|
||||||
- JDBC REST connections do not currently support bind interface
|
- JDBC REST connections do not currently support bind interface
|
||||||
- The following sample code is based on taos-jdbcdriver-3.1.0
|
- The following sample code is based on taos-jdbcdriver-3.2.1
|
||||||
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
|
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
|
||||||
- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
- Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`.
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
```java
|
```java
|
||||||
public class ParameterBindingDemo {
|
public class ParameterBindingDemo {
|
||||||
|
@ -599,21 +660,7 @@ public class ParameterBindingDemo {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The methods to set TAGS values:
|
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||||
|
|
||||||
```java
|
|
||||||
public void setTagNull(int index, int type)
|
|
||||||
public void setTagBoolean(int index, boolean value)
|
|
||||||
public void setTagInt(int index, int value)
|
|
||||||
public void setTagByte(int index, byte value)
|
|
||||||
public void setTagShort(int index, short value)
|
|
||||||
public void setTagLong(int index, long value)
|
|
||||||
public void setTagTimestamp(int index, long value)
|
|
||||||
public void setTagFloat(int index, float value)
|
|
||||||
public void setTagDouble(int index, double value)
|
|
||||||
public void setTagString(int index, String value)
|
|
||||||
public void setTagNString(int index, String value)
|
|
||||||
```
|
|
||||||
|
|
||||||
The methods to set VALUES columns:
|
The methods to set VALUES columns:
|
||||||
|
|
||||||
|
@ -630,17 +677,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
|
||||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="ws" label="WebSocket connection">
|
||||||
|
|
||||||
|
```java
|
||||||
|
public class ParameterBindingDemo {
|
||||||
|
private static final String host = "127.0.0.1";
|
||||||
|
private static final Random random = new Random(System.currentTimeMillis());
|
||||||
|
private static final int BINARY_COLUMN_SIZE = 30;
|
||||||
|
private static final String[] schemaList = {
|
||||||
|
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||||
|
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||||
|
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||||
|
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||||
|
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||||
|
};
|
||||||
|
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||||
|
|
||||||
|
public static void main(String[] args) throws SQLException {
|
||||||
|
|
||||||
|
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||||
|
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||||
|
|
||||||
|
init(conn);
|
||||||
|
|
||||||
|
bindInteger(conn);
|
||||||
|
|
||||||
|
bindFloat(conn);
|
||||||
|
|
||||||
|
bindBoolean(conn);
|
||||||
|
|
||||||
|
bindBytes(conn);
|
||||||
|
|
||||||
|
bindString(conn);
|
||||||
|
|
||||||
|
conn.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void init(Connection conn) throws SQLException {
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute("drop database if exists test_ws_parabind");
|
||||||
|
stmt.execute("create database if not exists test_ws_parabind");
|
||||||
|
stmt.execute("use test_ws_parabind");
|
||||||
|
for (int i = 0; i < schemaList.length; i++) {
|
||||||
|
stmt.execute(schemaList[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindInteger(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t1_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||||
|
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||||
|
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||||
|
pstmt.setTagLong(4, random.nextLong());
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||||
|
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||||
|
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||||
|
pstmt.setLong(5, random.nextLong());
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindFloat(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||||
|
|
||||||
|
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t2_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagFloat(1, random.nextFloat());
|
||||||
|
pstmt.setTagDouble(2, random.nextDouble());
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setFloat(2, random.nextFloat());
|
||||||
|
pstmt.setDouble(3, random.nextDouble());
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindBoolean(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t3_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setBoolean(2, random.nextBoolean());
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindBytes(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t4_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagString(1, new String("abc"));
|
||||||
|
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setString(2, "abc");
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindString(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t5_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||||
|
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||||
|
pstmt.setNString(1, "California.SanFrancisco");
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
The methods to set TAGS values:
|
||||||
|
|
||||||
|
```java
|
||||||
|
public void setTagNull(int index, int type)
|
||||||
|
public void setTagBoolean(int index, boolean value)
|
||||||
|
public void setTagInt(int index, int value)
|
||||||
|
public void setTagByte(int index, byte value)
|
||||||
|
public void setTagShort(int index, short value)
|
||||||
|
public void setTagLong(int index, long value)
|
||||||
|
public void setTagTimestamp(int index, long value)
|
||||||
|
public void setTagFloat(int index, float value)
|
||||||
|
public void setTagDouble(int index, double value)
|
||||||
|
public void setTagString(int index, String value)
|
||||||
|
public void setTagNString(int index, String value)
|
||||||
|
```
|
||||||
|
|
||||||
### Schemaless Writing
|
### Schemaless Writing
|
||||||
|
|
||||||
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
|
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
|
||||||
|
|
||||||
Note:
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
- JDBC REST connections do not currently support schemaless writes
|
|
||||||
- The following sample code is based on taos-jdbcdriver-3.1.0
|
|
||||||
|
|
||||||
```java
|
```java
|
||||||
public class SchemalessInsertTest {
|
public class SchemalessJniTest {
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||||
|
@ -668,6 +901,41 @@ public class SchemalessInsertTest {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="ws" label="WebSocket connection">
|
||||||
|
|
||||||
|
```java
|
||||||
|
public class SchemalessWsTest {
|
||||||
|
private static final String host = "127.0.0.1";
|
||||||
|
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||||
|
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||||
|
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||||
|
|
||||||
|
public static void main(String[] args) throws SQLException {
|
||||||
|
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||||
|
Connection connection = DriverManager.getConnection(url);
|
||||||
|
init(connection);
|
||||||
|
|
||||||
|
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||||
|
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||||
|
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||||
|
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void init(Connection connection) throws SQLException {
|
||||||
|
try (Statement stmt = connection.createStatement()) {
|
||||||
|
stmt.executeUpdate("drop database if exists test_ws_schemaless");
|
||||||
|
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
|
||||||
|
stmt.executeUpdate("use test_ws_schemaless");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
### Data Subscription
|
### Data Subscription
|
||||||
|
|
||||||
The TDengine Java Connector supports subscription functionality with the following application API.
|
The TDengine Java Connector supports subscription functionality with the following application API.
|
||||||
|
@ -702,8 +970,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
- group.id: consumer: Specifies the group that the consumer is in.
|
- group.id: consumer: Specifies the group that the consumer is in.
|
||||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||||
- httpConnectTimeout:WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||||
- messageWaitTimeout:socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||||
|
|
||||||
#### Subscribe to consume data
|
#### Subscribe to consume data
|
||||||
|
@ -711,8 +979,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
```java
|
```java
|
||||||
while(true) {
|
while(true) {
|
||||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ResultBean record : records) {
|
for (ConsumerRecord<ResultBean> record : records) {
|
||||||
process(record);
|
ResultBean bean = record.value();
|
||||||
|
process(bean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
@ -765,8 +1034,9 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ResultBean record : records) {
|
for (ConsumerRecord<ResultBean> record : records) {
|
||||||
process(record);
|
ResultBean bean = record.value();
|
||||||
|
process(bean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
consumer.unsubscribe();
|
consumer.unsubscribe();
|
||||||
|
@ -841,8 +1111,9 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ResultBean record : records) {
|
for (ConsumerRecord<ResultBean> record : records) {
|
||||||
process(record);
|
ResultBean bean = record.value();
|
||||||
|
process(bean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
consumer.unsubscribe();
|
consumer.unsubscribe();
|
||||||
|
@ -968,20 +1239,6 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
|
|
||||||
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||||
|
|
||||||
## Recent update logs
|
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes |
|
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
|
||||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
|
||||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
|
||||||
| 3.0.0 | Support for TDengine 3.0 |
|
|
||||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
|
||||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
|
||||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
|
||||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
|
||||||
| 2.0.37 | Support json tags |
|
|
||||||
| 2.0.36 | Support schemaless writing |
|
|
||||||
|
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`?
|
1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`?
|
||||||
|
@ -1010,9 +1267,9 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
|
|
||||||
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
||||||
|
|
||||||
**Cause**:taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
||||||
|
|
||||||
**Solution**: Use taos-jdbcdriver 3.0.2.
|
**Solution**: Use taos-jdbcdriver 3.0.2.
|
||||||
|
|
||||||
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
|
||||||
import Preparition from "./_preparation.mdx"
|
import Preparition from "./_preparation.mdx"
|
||||||
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
||||||
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||||
|
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
|
||||||
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
||||||
|
|
||||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||||
|
@ -120,7 +121,7 @@ The parameters are described as follows:
|
||||||
- **username/password**: Username and password used to create connections.
|
- **username/password**: Username and password used to create connections.
|
||||||
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
||||||
- **database**: Specify the default database to connect to. It's optional.
|
- **database**: Specify the default database to connect to. It's optional.
|
||||||
- **params**:Optional parameters.
|
- **params**: Optional parameters.
|
||||||
|
|
||||||
A sample DSN description string is as follows:
|
A sample DSN description string is as follows:
|
||||||
|
|
||||||
|
@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se
|
||||||
|
|
||||||
<RustBind />
|
<RustBind />
|
||||||
|
|
||||||
|
#### Schemaless Write
|
||||||
|
|
||||||
|
<RustSml />
|
||||||
|
|
||||||
### Query data
|
### Query data
|
||||||
|
|
||||||
<RustQuery />
|
<RustQuery />
|
||||||
|
|
|
@ -255,7 +255,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
|
||||||
|
|
||||||
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
|
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
|
||||||
|
|
||||||
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
||||||
- `user`: TDengine user name. The default is `root`.
|
- `user`: TDengine user name. The default is `root`.
|
||||||
- `password`: TDengine user password. The default is `taosdata`.
|
- `password`: TDengine user password. The default is `taosdata`.
|
||||||
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
||||||
|
|
|
@ -321,18 +321,18 @@ let cursor = conn.cursor();
|
||||||
| package name | version | TDengine version | Description |
|
| package name | version | TDengine version | Description |
|
||||||
|------------------|---------|---------------------|------------------------------------------------------------------|
|
|------------------|---------|---------------------|------------------------------------------------------------------|
|
||||||
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||||
| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. |
|
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
|
||||||
| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
||||||
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
||||||
### REST Connector
|
### REST Connector
|
||||||
|
|
||||||
| package name | version | TDengine version | Description |
|
| package name | version | TDengine version | Description |
|
||||||
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
|
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
|
||||||
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||||
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 |
|
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
|
||||||
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
||||||
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token |
|
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
|
||||||
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
||||||
|
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ The parameters are described as follows:
|
||||||
* **username/password**: Username and password used to create connections.
|
* **username/password**: Username and password used to create connections.
|
||||||
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
||||||
* **database**: Specify the default database to connect to. It's optional.
|
* **database**: Specify the default database to connect to. It's optional.
|
||||||
* **params**:Optional parameters.
|
* **params**: Optional parameters.
|
||||||
|
|
||||||
A sample DSN description string is as follows:
|
A sample DSN description string is as follows:
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ ws://localhost:6041/test
|
||||||
| TDengine.Connector | Description |
|
| TDengine.Connector | Description |
|
||||||
|--------------------|--------------------------------|
|
|--------------------|--------------------------------|
|
||||||
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
|
||||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||||
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
||||||
|
|
|
@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
|
||||||
|
|
||||||
PHP Connector relies on TDengine client driver.
|
PHP Connector relies on TDengine client driver.
|
||||||
|
|
||||||
Project Repository:<https://github.com/Yurunsoft/php-tdengine>
|
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
|
||||||
|
|
||||||
After TDengine client or server is installed, `taos.h` is located at:
|
After TDengine client or server is installed, `taos.h` is located at:
|
||||||
|
|
||||||
- Linux:`/usr/local/taos/include`
|
- Linux: `/usr/local/taos/include`
|
||||||
- Windows:`C:\TDengine\include`
|
- Windows: `C:\TDengine\include`
|
||||||
- macOS:`/usr/local/include`
|
- macOS: `/usr/local/include`
|
||||||
|
|
||||||
TDengine client driver is located at:
|
TDengine client driver is located at:
|
||||||
|
|
||||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||||
- Windows: `C:\TDengine\taos.dll`
|
- Windows: `C:\TDengine\taos.dll`
|
||||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
- Windows、Linux、MacOS
|
- Windows, Linux, and macOS
|
||||||
|
|
||||||
- PHP >= 7.4
|
- PHP >= 7.4
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
|
||||||
|
|
||||||
### Install php-tdengine
|
### Install php-tdengine
|
||||||
|
|
||||||
**Download Source Code Package and Unzip:**
|
**Download Source Code Package and Unzip: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||||
|
@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
||||||
|
|
||||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||||
|
|
||||||
**Non-Swoole Environment:**
|
**Non-Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure && make -j && make install
|
phpize && ./configure && make -j && make install
|
||||||
```
|
```
|
||||||
|
|
||||||
**Specify TDengine location:**
|
**Specify TDengine location: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||||
|
@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
||||||
> `--with-tdengine-dir=` is followed by TDengine location.
|
> `--with-tdengine-dir=` is followed by TDengine location.
|
||||||
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
|
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
|
||||||
|
|
||||||
**Swoole Environment:**
|
**Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --enable-swoole && make -j && make install
|
phpize && ./configure --enable-swoole && make -j && make install
|
||||||
|
|
|
@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
|
||||||
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||||
|
|
||||||
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
||||||
|
|
||||||
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
||||||
|
|
||||||
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
||||||
|
|
|
@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
|
||||||
|
|
||||||
To deploy TDinsight, we need
|
To deploy TDinsight, we need
|
||||||
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
||||||
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
|
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
|
||||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
|
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
|
||||||
|
|
||||||
Please record
|
Please record
|
||||||
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
||||||
|
@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
Use the `uid` value obtained above as `-E` input.
|
Use the `uid` value obtained above as `-E` input.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||||
|
@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
|
||||||
|
|
||||||
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## TDinsight dashboard details
|
## TDinsight dashboard details
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### firstEp
|
### firstEp
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------------------- |
|
| ---------- | ---------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
||||||
| Default | localhost:6030 |
|
| Default | localhost:6030 |
|
||||||
|
@ -54,7 +54,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### secondEp
|
### secondEp
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------- |
|
| ---------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
||||||
| Default | None |
|
| Default | None |
|
||||||
|
@ -71,7 +71,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### serverPort
|
### serverPort
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------------------------------------------------------------------------------------------------- |
|
| ------------- | ----------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The port for external access after `taosd` is started |
|
| Meaning | The port for external access after `taosd` is started |
|
||||||
| Default Value | 6030 |
|
| Default Value | 6030 |
|
||||||
|
@ -80,12 +80,12 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
||||||
:::
|
:::
|
||||||
| Protocol | Default Port | Description | How to configure |
|
| Protocol | Default Port | Description | How to configure |
|
||||||
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||||
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
||||||
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
||||||
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
||||||
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
|
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
|
||||||
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
|
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
|
||||||
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
||||||
|
|
||||||
### maxShellConns
|
### maxShellConns
|
||||||
|
@ -97,6 +97,24 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Value Range | 10-50000000 |
|
| Value Range | 10-50000000 |
|
||||||
| Default Value | 5000 |
|
| Default Value | 5000 |
|
||||||
|
|
||||||
|
### numOfRpcSessions
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ------------------------------------------ |
|
||||||
|
| Applicable | Client/Server |
|
||||||
|
| Meaning | The maximum number of connection to create |
|
||||||
|
| Value Range | 100-100000 |
|
||||||
|
| Default Value | 10000 |
|
||||||
|
|
||||||
|
### timeToGetAvailableConn
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ---------------------------------------------- |
|
||||||
|
| Applicable | Client/Server |
|
||||||
|
| Meaning | The maximum waiting time to get avaliable conn |
|
||||||
|
| Value Range | 10-50000000(ms) |
|
||||||
|
| Default Value | 500000 |
|
||||||
|
|
||||||
## Monitoring Parameters
|
## Monitoring Parameters
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -105,16 +123,16 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### monitor
|
### monitor
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
|
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`. |
|
||||||
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
||||||
| Default | 0 |
|
| Default | 0 |
|
||||||
|
|
||||||
### monitorFqdn
|
### monitorFqdn
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------- |
|
| ---------- | ------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | FQDN of taosKeeper monitoring service |
|
| Meaning | FQDN of taosKeeper monitoring service |
|
||||||
| Default | None |
|
| Default | None |
|
||||||
|
@ -122,7 +140,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### monitorPort
|
### monitorPort
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------------- |
|
| ------------- | ------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Port of taosKeeper monitoring service |
|
| Meaning | Port of taosKeeper monitoring service |
|
||||||
| Default Value | 6043 |
|
| Default Value | 6043 |
|
||||||
|
@ -130,7 +148,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### monitorInterval
|
### monitorInterval
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The interval of collecting system workload |
|
| Meaning | The interval of collecting system workload |
|
||||||
| Unit | second |
|
| Unit | second |
|
||||||
|
@ -140,7 +158,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### telemetryReporting
|
### telemetryReporting
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------------------------- |
|
| ------------- | ---------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||||
| Value Range | 0: Not allowed; 1: Allowed |
|
| Value Range | 0: Not allowed; 1: Allowed |
|
||||||
|
@ -148,10 +166,10 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### crashReporting
|
### crashReporting
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | ---------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Switch for allowing TDengine to collect and report crash related information |
|
| Meaning | Switch for allowing TDengine to collect and report crash related information |
|
||||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
|
||||||
|
@ -160,7 +178,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### queryPolicy
|
### queryPolicy
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Execution policy for query statements |
|
| Meaning | Execution policy for query statements |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
|
@ -170,7 +188,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | SMA index optimization policy |
|
| Meaning | SMA index optimization policy |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
|
@ -180,17 +198,17 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||||
| Vlue Range | 0:Return empty line,1:Return 0 |
|
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||||
|
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ------------- | -------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The maximum number of distinct rows returned |
|
| Meaning | The maximum number of distinct rows returned |
|
||||||
| Value Range | [100,000 - 100,000,000] |
|
| Value Range | [100,000 - 100,000,000] |
|
||||||
|
@ -199,7 +217,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### keepColumnName
|
### keepColumnName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||||
|
@ -210,7 +228,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### timezone
|
### timezone
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------ |
|
| ------------- | ------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | TimeZone |
|
| Meaning | TimeZone |
|
||||||
| Default Value | TimeZone configured in the host |
|
| Default Value | TimeZone configured in the host |
|
||||||
|
@ -315,7 +333,7 @@ The charset that takes effect is UTF-8.
|
||||||
### dataDir
|
### dataDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------ |
|
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | All data files are stored in this directory |
|
| Meaning | All data files are stored in this directory |
|
||||||
| Default Value | /var/lib/taos |
|
| Default Value | /var/lib/taos |
|
||||||
|
@ -324,7 +342,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tempDir
|
### tempDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------ |
|
| ---------- | ---------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The directory where to put all the temporary files generated during system running |
|
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||||
| Default | /tmp |
|
| Default | /tmp |
|
||||||
|
@ -332,7 +350,7 @@ The charset that takes effect is UTF-8.
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------ |
|
| ------------- | ----------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
|
@ -341,7 +359,7 @@ The charset that takes effect is UTF-8.
|
||||||
### minimalDataDirGB
|
### minimalDataDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
|
@ -352,7 +370,7 @@ The charset that takes effect is UTF-8.
|
||||||
### supportVnodes
|
### supportVnodes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------------- |
|
| ------------- | ---------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Maximum number of vnodes per dnode |
|
| Meaning | Maximum number of vnodes per dnode |
|
||||||
| Value Range | 0-4096 |
|
| Value Range | 0-4096 |
|
||||||
|
@ -374,7 +392,7 @@ The charset that takes effect is UTF-8.
|
||||||
### logDir
|
### logDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------- |
|
| ------------- | ----------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The directory for writing log files |
|
| Meaning | The directory for writing log files |
|
||||||
| Default Value | /var/log/taos |
|
| Default Value | /var/log/taos |
|
||||||
|
@ -382,7 +400,7 @@ The charset that takes effect is UTF-8.
|
||||||
### minimalLogDirGB
|
### minimalLogDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | -------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
|
@ -391,7 +409,7 @@ The charset that takes effect is UTF-8.
|
||||||
### numOfLogLines
|
### numOfLogLines
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Maximum number of lines in single log file |
|
| Meaning | Maximum number of lines in single log file |
|
||||||
| Default Value | 10000000 |
|
| Default Value | 10000000 |
|
||||||
|
@ -399,7 +417,7 @@ The charset that takes effect is UTF-8.
|
||||||
### asyncLog
|
### asyncLog
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ---------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The mode of writing log file |
|
| Meaning | The mode of writing log file |
|
||||||
| Value Range | 0: sync way; 1: async way |
|
| Value Range | 0: sync way; 1: async way |
|
||||||
|
@ -408,7 +426,7 @@ The charset that takes effect is UTF-8.
|
||||||
### logKeepDays
|
### logKeepDays
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------------------------------------------------------------- |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The number of days for log files to be kept |
|
| Meaning | The number of days for log files to be kept |
|
||||||
| Unit | day |
|
| Unit | day |
|
||||||
|
@ -418,7 +436,7 @@ The charset that takes effect is UTF-8.
|
||||||
### debugFlag
|
### debugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------- |
|
| ------------- | --------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level |
|
| Meaning | Log level |
|
||||||
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
||||||
|
@ -427,7 +445,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tmrDebugFlag
|
### tmrDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of timer module |
|
| Meaning | Log level of timer module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -436,7 +454,7 @@ The charset that takes effect is UTF-8.
|
||||||
### uDebugFlag
|
### uDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------- |
|
| ------------- | -------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of common module |
|
| Meaning | Log level of common module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -445,7 +463,7 @@ The charset that takes effect is UTF-8.
|
||||||
### rpcDebugFlag
|
### rpcDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of rpc module |
|
| Meaning | Log level of rpc module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -454,7 +472,7 @@ The charset that takes effect is UTF-8.
|
||||||
### jniDebugFlag
|
### jniDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Client Only |
|
| Applicable | Client Only |
|
||||||
| Meaning | Log level of jni module |
|
| Meaning | Log level of jni module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -463,7 +481,7 @@ The charset that takes effect is UTF-8.
|
||||||
### qDebugFlag
|
### qDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of query module |
|
| Meaning | Log level of query module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -472,7 +490,7 @@ The charset that takes effect is UTF-8.
|
||||||
### cDebugFlag
|
### cDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------- |
|
| ------------- | ------------------- |
|
||||||
| Applicable | Client Only |
|
| Applicable | Client Only |
|
||||||
| Meaning | Log level of Client |
|
| Meaning | Log level of Client |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -481,7 +499,7 @@ The charset that takes effect is UTF-8.
|
||||||
### dDebugFlag
|
### dDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of dnode |
|
| Meaning | Log level of dnode |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -490,7 +508,7 @@ The charset that takes effect is UTF-8.
|
||||||
### vDebugFlag
|
### vDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of vnode |
|
| Meaning | Log level of vnode |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -499,7 +517,7 @@ The charset that takes effect is UTF-8.
|
||||||
### mDebugFlag
|
### mDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of mnode module |
|
| Meaning | Log level of mnode module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -508,7 +526,7 @@ The charset that takes effect is UTF-8.
|
||||||
### wDebugFlag
|
### wDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of WAL module |
|
| Meaning | Log level of WAL module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -517,7 +535,7 @@ The charset that takes effect is UTF-8.
|
||||||
### sDebugFlag
|
### sDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of sync module |
|
| Meaning | Log level of sync module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -526,7 +544,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tsdbDebugFlag
|
### tsdbDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------- |
|
| ------------- | ------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of TSDB module |
|
| Meaning | Log level of TSDB module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -535,7 +553,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tqDebugFlag
|
### tqDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------- |
|
| ------------- | ---------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | Log level of TQ module |
|
| Meaning | Log level of TQ module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -544,7 +562,7 @@ The charset that takes effect is UTF-8.
|
||||||
### fsDebugFlag
|
### fsDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------- |
|
| ------------- | ---------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | Log level of FS module |
|
| Meaning | Log level of FS module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -553,7 +571,7 @@ The charset that takes effect is UTF-8.
|
||||||
### udfDebugFlag
|
### udfDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of UDF module |
|
| Meaning | Log level of UDF module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -562,7 +580,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smaDebugFlag
|
### smaDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of SMA module |
|
| Meaning | Log level of SMA module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -571,7 +589,7 @@ The charset that takes effect is UTF-8.
|
||||||
### idxDebugFlag
|
### idxDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of index module |
|
| Meaning | Log level of index module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -580,7 +598,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tdbDebugFlag
|
### tdbDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of TDB module |
|
| Meaning | Log level of TDB module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -591,7 +609,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smlChildTableName
|
### smlChildTableName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Custom subtable name for schemaless writes |
|
| Meaning | Custom subtable name for schemaless writes |
|
||||||
| Type | String |
|
| Type | String |
|
||||||
|
@ -600,7 +618,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smlTagName
|
### smlTagName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------ |
|
| ------------- | ------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Default tag for schemaless writes without tag value specified |
|
| Meaning | Default tag for schemaless writes without tag value specified |
|
||||||
| Type | String |
|
| Type | String |
|
||||||
|
@ -609,7 +627,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smlDataFormat
|
### smlDataFormat
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | ----------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
|
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
|
||||||
| Value Range | 0: not consistent; 1: consistent. |
|
| Value Range | 0: not consistent; 1: consistent. |
|
||||||
|
@ -620,7 +638,7 @@ The charset that takes effect is UTF-8.
|
||||||
### compressMsgSize
|
### compressMsgSize
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||||
| Applicable | Both Client and Server side |
|
| Applicable | Both Client and Server side |
|
||||||
| Meaning | Whether RPC message is compressed |
|
| Meaning | Whether RPC message is compressed |
|
||||||
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||||
|
@ -632,7 +650,7 @@ The charset that takes effect is UTF-8.
|
||||||
### enableCoreFile
|
### enableCoreFile
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Whether to generate core file when server crashes |
|
| Meaning | Whether to generate core file when server crashes |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
|
@ -642,7 +660,7 @@ The charset that takes effect is UTF-8.
|
||||||
### enableScience
|
### enableScience
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------- |
|
||||||
| Applicable | Only taos-CLI client |
|
| Applicable | Only taos-CLI client |
|
||||||
| Meaning | Whether to show float and double with the scientific notation |
|
| Meaning | Whether to show float and double with the scientific notation |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
|
@ -652,7 +670,7 @@ The charset that takes effect is UTF-8.
|
||||||
### udf
|
### udf
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ---------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Whether the UDF service is enabled |
|
| Meaning | Whether the UDF service is enabled |
|
||||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||||
|
@ -661,8 +679,8 @@ The charset that takes effect is UTF-8.
|
||||||
|
|
||||||
## 3.0 Parameters
|
## 3.0 Parameters
|
||||||
|
|
||||||
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :--------------------: | ---------------------- | ---------------------------- | ----------------------- |
|
||||||
| 1 | firstEp | Yes | Yes | |
|
| 1 | firstEp | Yes | Yes | |
|
||||||
| 2 | secondEp | Yes | Yes | |
|
| 2 | secondEp | Yes | Yes | |
|
||||||
| 3 | fqdn | Yes | Yes | |
|
| 3 | fqdn | Yes | Yes | |
|
||||||
|
|
|
@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
|
||||||
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
||||||
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
|
|
||||||
The example to query the average system memory usage for the specified interval on each server as follows.
|
The example to query the average system memory usage for the specified interval on each server as follows.
|
||||||
|
|
||||||

|

|
||||||
|
@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||||
|
|
||||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
|
||||||
|
|
||||||
### Edit SQL fields
|
### Edit SQL fields
|
||||||
|
|
||||||
Copy SQL bellow and paste it to the SQL edit area:
|
Copy SQL bellow and paste it to the SQL edit area:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
|
||||||
|
|
||||||
### Edit "action"
|
### Edit "action"
|
||||||
|
|
||||||
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
||||||
|
|
||||||
```
|
```
|
||||||
Basic cm9vdDp0YW9zZGF0YQ==
|
Basic cm9vdDp0YW9zZGF0YQ==
|
||||||
```
|
```
|
||||||
|
|
|
@ -46,15 +46,14 @@ Execute in any directory:
|
||||||
|
|
||||||
````
|
````
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/test
|
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||||
````
|
````
|
||||||
|
|
||||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||||
PATH=$CONFLUENT_HOME/bin
|
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||||
export PATH
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||||
|
@ -315,7 +314,6 @@ connection.backoff.ms=5000
|
||||||
topic.prefix=tdengine-source-
|
topic.prefix=tdengine-source-
|
||||||
poll.interval.ms=1000
|
poll.interval.ms=1000
|
||||||
fetch.max.rows=100
|
fetch.max.rows=100
|
||||||
out.format=line
|
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
```
|
```
|
||||||
|
@ -329,7 +327,15 @@ DROP DATABASE IF EXISTS test;
|
||||||
CREATE DATABASE test;
|
CREATE DATABASE test;
|
||||||
USE test;
|
USE test;
|
||||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
|
||||||
|
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
|
||||||
|
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||||
```
|
```
|
||||||
|
|
||||||
Use TDengine CLI to execute SQL script
|
Use TDengine CLI to execute SQL script
|
||||||
|
@ -346,7 +352,7 @@ confluent local services connect connector load TDengineSourceConnector --config
|
||||||
|
|
||||||
### View topic data
|
### View topic data
|
||||||
|
|
||||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data.
|
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||||
|
|
||||||
````
|
````
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||||
|
@ -384,7 +390,7 @@ confluent local services connect connector status
|
||||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||||
|
|
||||||
````
|
````
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSourceConnector
|
||||||
````
|
````
|
||||||
|
|
||||||
|
@ -417,11 +423,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
||||||
### TDengine Source Connector specific configuration
|
### TDengine Source Connector specific configuration
|
||||||
|
|
||||||
1. `connection.database`: source database name, no default value.
|
1. `connection.database`: source database name, no default value.
|
||||||
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
|
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
|
||||||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
|
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||||
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
|
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
|
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||||
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
|
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
|
||||||
|
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Other notes
|
## Other notes
|
||||||
|
|
|
@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
|
||||||
|
|
||||||
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
||||||
|
|
||||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
|
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
|
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
|
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.
|
||||||
|
|
|
@ -26,9 +26,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, a comp
|
||||||
|
|
||||||
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
|
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
|
||||||
|
|
||||||
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
||||||
|
|
||||||
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
||||||
|
|
||||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
|
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ After obtaining the mnode EP list, the data node initiates the connection. It wi
|
||||||
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
||||||
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||||
|
|
||||||
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it's not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
||||||
|
|
||||||
### A Typical Data Writing Process
|
### A Typical Data Writing Process
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ For large-scale data management, to achieve scale-out, it is generally necessary
|
||||||
|
|
||||||
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
|
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
|
||||||
|
|
||||||
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables' quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
||||||
|
|
||||||
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
|
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
|
||||||
|
|
||||||
|
@ -132,9 +132,9 @@ Leader Vnode uses a writing process as follows:
|
||||||
<center> Figure 3: TDengine Leader writing process </center>
|
<center> Figure 3: TDengine Leader writing process </center>
|
||||||
|
|
||||||
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
||||||
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `"wal_level"` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
||||||
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||||
4. Leader vnode Writes the data into memory and add the record to “skip list”;
|
4. Leader vnode Writes the data into memory and add the record to "skip list";
|
||||||
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
||||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ For a follower vnode, the write process as follows:
|
||||||
|
|
||||||
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
||||||
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
|
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
|
||||||
3. Write into memory and add the record to “skip list”.
|
3. Write into memory and add the record to "skip list".
|
||||||
|
|
||||||
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
|
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
|
||||||
|
|
||||||
|
@ -156,7 +156,7 @@ Compared with Leader vnode, follower vnode has no forwarding or reply confirmati
|
||||||
|
|
||||||
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
||||||
|
|
||||||
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It's necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
||||||
|
|
||||||
### Synchronous Replication
|
### Synchronous Replication
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ When data is written to disk, the system decides whether to compress the data ba
|
||||||
|
|
||||||
### Tiered Storage
|
### Tiered Storage
|
||||||
|
|
||||||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
|
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
|
||||||
|
|
||||||
dataDir format is as follows:
|
dataDir format is as follows:
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ dataDir data_path [tier_level]
|
||||||
|
|
||||||
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
|
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
|
||||||
|
|
||||||
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
dataDir /mnt/disk1/taos
|
dataDir /mnt/disk1/taos
|
||||||
|
|
|
@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
|
||||||
|
|
||||||
### TDengine
|
### TDengine
|
||||||
|
|
||||||
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
|
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
|
||||||
|
|
||||||
## Data Connection Setup
|
## Data Connection Setup
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
|
||||||
|
|
||||||
### Install TDengine
|
### Install TDengine
|
||||||
|
|
||||||
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
|
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
|
||||||
|
|
||||||
## Data Connection Setup
|
## Data Connection Setup
|
||||||
|
|
||||||
|
|
|
@ -200,7 +200,7 @@ After migrating via DataX, we found that we can significantly improve the effici
|
||||||
|
|
||||||
### 2. Manual data migration
|
### 2. Manual data migration
|
||||||
|
|
||||||
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement—written to the database.
|
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement-written to the database.
|
||||||
|
|
||||||
Manual migration of data requires attention to the following two issues:
|
Manual migration of data requires attention to the following two issues:
|
||||||
|
|
||||||
|
@ -258,7 +258,7 @@ Equivalent function: apercentile
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
Select apercentile(col1, 50, “t-digest”) from table_name
|
select apercentile(col1, 50, "t-digest") from table_name
|
||||||
```
|
```
|
||||||
|
|
||||||
Remark:
|
Remark:
|
||||||
|
|
|
@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
|
||||||
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
|
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
|
||||||
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
|
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
|
||||||
4. Install TDengine 3.0.
|
4. Install TDengine 3.0.
|
||||||
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
|
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/).
|
||||||
|
|
||||||
### 2. How can I resolve the "Unable to establish connection" error?
|
### 2. How can I resolve the "Unable to establish connection" error?
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.1.0</version>
|
<version>3.2.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- ANCHOR_END: dep-->
|
<!-- ANCHOR_END: dep-->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
package com.taos.example;
|
package com.taos.example;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||||
import com.taosdata.jdbc.tmq.TMQConstants;
|
import com.taosdata.jdbc.tmq.TMQConstants;
|
||||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||||
|
@ -64,7 +65,8 @@ public class SubscribeDemo {
|
||||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||||
for (Meters meter : meters) {
|
for (ConsumerRecord<Meters> recode : meters) {
|
||||||
|
Meters meter = recode.value();
|
||||||
System.out.println(meter);
|
System.out.println(meter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
use taos_query::common::SchemalessPrecision;
|
||||||
|
use taos_query::common::SchemalessProtocol;
|
||||||
|
use taos_query::common::SmlDataBuilder;
|
||||||
|
|
||||||
|
use crate::AsyncQueryable;
|
||||||
|
use crate::AsyncTBuilder;
|
||||||
|
use crate::TaosBuilder;
|
||||||
|
|
||||||
|
async fn put_json() -> anyhow::Result<()> {
|
||||||
|
// std::env::set_var("RUST_LOG", "taos=trace");
|
||||||
|
std::env::set_var("RUST_LOG", "taos=debug");
|
||||||
|
pretty_env_logger::init();
|
||||||
|
let dsn =
|
||||||
|
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
|
||||||
|
log::debug!("dsn: {:?}", &dsn);
|
||||||
|
|
||||||
|
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||||
|
|
||||||
|
let db = "demo_schemaless_ws";
|
||||||
|
|
||||||
|
client.exec(format!("drop database if exists {db}")).await?;
|
||||||
|
|
||||||
|
client
|
||||||
|
.exec(format!("create database if not exists {db}"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// should specify database before insert
|
||||||
|
client.exec(format!("use {db}")).await?;
|
||||||
|
|
||||||
|
// SchemalessProtocol::Json
|
||||||
|
let data = [
|
||||||
|
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
|
||||||
|
]
|
||||||
|
.map(String::from)
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
// demo with all fields
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Json)
|
||||||
|
.precision(SchemalessPrecision::Millisecond)
|
||||||
|
.data(data.clone())
|
||||||
|
.ttl(1000)
|
||||||
|
.req_id(300u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default precision
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Json)
|
||||||
|
.data(data.clone())
|
||||||
|
.ttl(1000)
|
||||||
|
.req_id(301u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default ttl
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Json)
|
||||||
|
.data(data.clone())
|
||||||
|
.req_id(302u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default req_id
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Json)
|
||||||
|
.data(data.clone())
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
client.exec(format!("drop database if exists {db}")).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -0,0 +1,78 @@
|
||||||
|
use taos_query::common::SchemalessPrecision;
|
||||||
|
use taos_query::common::SchemalessProtocol;
|
||||||
|
use taos_query::common::SmlDataBuilder;
|
||||||
|
|
||||||
|
use crate::AsyncQueryable;
|
||||||
|
use crate::AsyncTBuilder;
|
||||||
|
use crate::TaosBuilder;
|
||||||
|
|
||||||
|
async fn put_line() -> anyhow::Result<()> {
|
||||||
|
// std::env::set_var("RUST_LOG", "taos=trace");
|
||||||
|
std::env::set_var("RUST_LOG", "taos=debug");
|
||||||
|
pretty_env_logger::init();
|
||||||
|
|
||||||
|
let dsn =
|
||||||
|
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
|
||||||
|
log::debug!("dsn: {:?}", &dsn);
|
||||||
|
|
||||||
|
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||||
|
|
||||||
|
let db = "demo_schemaless_ws";
|
||||||
|
|
||||||
|
client.exec(format!("drop database if exists {db}")).await?;
|
||||||
|
|
||||||
|
client
|
||||||
|
.exec(format!("create database if not exists {db}"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// should specify database before insert
|
||||||
|
client.exec(format!("use {db}")).await?;
|
||||||
|
|
||||||
|
let data = [
|
||||||
|
"measurement,host=host1 field1=2i,field2=2.0 1577837300000",
|
||||||
|
"measurement,host=host1 field1=2i,field2=2.0 1577837400000",
|
||||||
|
"measurement,host=host1 field1=2i,field2=2.0 1577837500000",
|
||||||
|
"measurement,host=host1 field1=2i,field2=2.0 1577837600000",
|
||||||
|
]
|
||||||
|
.map(String::from)
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
// demo with all fields
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Line)
|
||||||
|
.precision(SchemalessPrecision::Millisecond)
|
||||||
|
.data(data.clone())
|
||||||
|
.ttl(1000)
|
||||||
|
.req_id(100u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default ttl
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Line)
|
||||||
|
.precision(SchemalessPrecision::Millisecond)
|
||||||
|
.data(data.clone())
|
||||||
|
.req_id(101u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default ttl and req_id
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Line)
|
||||||
|
.precision(SchemalessPrecision::Millisecond)
|
||||||
|
.data(data.clone())
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default precision
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Line)
|
||||||
|
.data(data)
|
||||||
|
.req_id(103u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
client.exec(format!("drop database if exists {db}")).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
use taos_query::common::SchemalessPrecision;
|
||||||
|
use taos_query::common::SchemalessProtocol;
|
||||||
|
use taos_query::common::SmlDataBuilder;
|
||||||
|
|
||||||
|
use crate::AsyncQueryable;
|
||||||
|
use crate::AsyncTBuilder;
|
||||||
|
use crate::TaosBuilder;
|
||||||
|
|
||||||
|
async fn put_telnet() -> anyhow::Result<()> {
|
||||||
|
// std::env::set_var("RUST_LOG", "taos=trace");
|
||||||
|
std::env::set_var("RUST_LOG", "taos=debug");
|
||||||
|
pretty_env_logger::init();
|
||||||
|
let dsn =
|
||||||
|
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
|
||||||
|
log::debug!("dsn: {:?}", &dsn);
|
||||||
|
|
||||||
|
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||||
|
|
||||||
|
let db = "demo_schemaless_ws";
|
||||||
|
|
||||||
|
client.exec(format!("drop database if exists {db}")).await?;
|
||||||
|
|
||||||
|
client
|
||||||
|
.exec(format!("create database if not exists {db}"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// should specify database before insert
|
||||||
|
client.exec(format!("use {db}")).await?;
|
||||||
|
|
||||||
|
let data = [
|
||||||
|
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
|
||||||
|
"meters.current 1648432611250 12.6 location=California.SanFrancisco group=2",
|
||||||
|
"meters.current 1648432611249 10.8 location=California.LosAngeles group=3",
|
||||||
|
"meters.current 1648432611250 11.3 location=California.LosAngeles group=3",
|
||||||
|
"meters.voltage 1648432611249 219 location=California.SanFrancisco group=2",
|
||||||
|
"meters.voltage 1648432611250 218 location=California.SanFrancisco group=2",
|
||||||
|
"meters.voltage 1648432611249 221 location=California.LosAngeles group=3",
|
||||||
|
"meters.voltage 1648432611250 217 location=California.LosAngeles group=3",
|
||||||
|
]
|
||||||
|
.map(String::from)
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
// demo with all fields
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Telnet)
|
||||||
|
.precision(SchemalessPrecision::Millisecond)
|
||||||
|
.data(data.clone())
|
||||||
|
.ttl(1000)
|
||||||
|
.req_id(200u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default precision
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Telnet)
|
||||||
|
.data(data.clone())
|
||||||
|
.ttl(1000)
|
||||||
|
.req_id(201u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default ttl
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Telnet)
|
||||||
|
.data(data.clone())
|
||||||
|
.req_id(202u64)
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
// demo with default req_id
|
||||||
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Telnet)
|
||||||
|
.data(data.clone())
|
||||||
|
.build()?;
|
||||||
|
assert_eq!(client.put(&sml_data).await?, ());
|
||||||
|
|
||||||
|
client.exec(format!("drop database if exists {db}")).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -92,7 +92,7 @@ TDengine 的主要功能如下:
|
||||||
|
|
||||||
## 典型适用场景
|
## 典型适用场景
|
||||||
|
|
||||||
作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
|
作为一个高性能、分布式、支持 SQL 的时序数据库(Time-series Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
|
||||||
|
|
||||||
### 数据源特点和需求
|
### 数据源特点和需求
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
```rust
|
||||||
|
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
|
||||||
|
```
|
|
@ -161,7 +161,7 @@ Query OK, 6 rows in database (0.005515s)
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
|
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
|
||||||
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。
|
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。除了在 REST 参数中指定数据库以外也可以在 SQL 语句中使用 <db_name>.<table_name> 来指定数据库。
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,8 @@ import CDemo from "./_sub_c.mdx";
|
||||||
|
|
||||||
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
||||||
|
|
||||||
注意:默认是从wal消费数据,如果wal被删除,消费到的数据会不全,此时可以将参数 experimental.snapshot.enable 设置为true,从tsdb获取全部数据,但是这样的话就不能保证数据的消费顺序。所以建议根据自己的消费情况合理的设置wal的保留策略,保证可以从wal里订阅到全部数据。
|
注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。
|
||||||
|
|
||||||
## 主要数据结构和 API
|
## 主要数据结构和 API
|
||||||
|
|
||||||
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
|
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
|
||||||
|
@ -284,16 +285,15 @@ CREATE TOPIC topic_name AS DATABASE db_name;
|
||||||
|
|
||||||
| 参数名称 | 类型 | 参数说明 | 备注 |
|
| 参数名称 | 类型 | 参数说明 | 备注 |
|
||||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||||
| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.ip` | string | 服务端的 IP 地址 | |
|
||||||
| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.user` | string | 用户名 | |
|
||||||
| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.pass` | string | 密码 | |
|
||||||
| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.port` | integer | 服务端的端口号 | |
|
||||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
|
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
|
||||||
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
||||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||||
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
||||||
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
||||||
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据。当其关闭时,只能消费依据 WAL 保留策略仍然在WAL中的数据;当其打开时,除WAL中的数据以外,也能够消费已经从WAL中删除但落盘到TSDB中的数据 | 实验功能,默认关闭 |
|
|
||||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
||||||
|
|
||||||
对于不同编程语言,其设置方式如下:
|
对于不同编程语言,其设置方式如下:
|
||||||
|
@ -311,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
||||||
tmq_conf_set(conf, "td.connect.user", "root");
|
tmq_conf_set(conf, "td.connect.user", "root");
|
||||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
|
||||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||||
|
|
||||||
|
@ -322,10 +321,11 @@ tmq_conf_destroy(conf);
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="java" label="Java">
|
<TabItem value="java" label="Java">
|
||||||
|
|
||||||
对于 Java 程序,使用如下配置项:
|
对于 Java 程序,还可以使用如下配置项:
|
||||||
|
|
||||||
| 参数名称 | 类型 | 参数说明 |
|
| 参数名称 | 类型 | 参数说明 |
|
||||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `td.connect.type` | string | 连接类型,"jni" 指原生连接,"ws" 指 websocket 连接,默认值为 "jni" |
|
||||||
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
|
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
|
||||||
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
|
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
|
||||||
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
|
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
|
||||||
|
@ -367,7 +367,6 @@ conf := &tmq.ConfigMap{
|
||||||
"td.connect.port": "6030",
|
"td.connect.port": "6030",
|
||||||
"client.id": "test_tmq_c",
|
"client.id": "test_tmq_c",
|
||||||
"enable.auto.commit": "false",
|
"enable.auto.commit": "false",
|
||||||
"experimental.snapshot.enable": "true",
|
|
||||||
"msg.with.table.name": "true",
|
"msg.with.table.name": "true",
|
||||||
}
|
}
|
||||||
consumer, err := NewConsumer(conf)
|
consumer, err := NewConsumer(conf)
|
||||||
|
@ -403,22 +402,6 @@ from taos.tmq import Consumer
|
||||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
```
|
```
|
||||||
|
|
||||||
其中,`configs` 为 dict 类型,传递创建 Consumer 的参数。可以配置的参数有:
|
|
||||||
|
|
||||||
| 参数名称 | 类型 | 参数说明 | 备注 |
|
|
||||||
|:------:|:----:|:-------:|:---:|
|
|
||||||
| `td.connect.ip` | string | 用于创建连接||
|
|
||||||
| `td.connect.user` | string | 用于创建连接||
|
|
||||||
| `td.connect.pass` | string | 用于创建连接||
|
|
||||||
| `td.connect.port` | string | 用于创建连接||
|
|
||||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 |
|
|
||||||
| `client.id` | string | 客户端 ID | 最大长度:192 |
|
|
||||||
| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` |
|
|
||||||
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
|
|
||||||
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
|
||||||
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
|
||||||
| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem label="Node.JS" value="Node.JS">
|
<TabItem label="Node.JS" value="Node.JS">
|
||||||
|
|
|
@ -36,6 +36,93 @@ REST 连接支持所有能运行 Java 的平台。
|
||||||
|
|
||||||
请参考[版本支持列表](../#版本支持)
|
请参考[版本支持列表](../#版本支持)
|
||||||
|
|
||||||
|
## 最近更新记录
|
||||||
|
|
||||||
|
| taos-jdbcdriver 版本 | 主要变化 |
|
||||||
|
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||||
|
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
|
||||||
|
| 3.2.0 | 存在连接问题,不推荐使用 |
|
||||||
|
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
||||||
|
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
||||||
|
| 3.0.0 | 支持 TDengine 3.0 |
|
||||||
|
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||||
|
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||||
|
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||||
|
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||||
|
| 2.0.37 | 增加对 json tag 支持 |
|
||||||
|
| 2.0.36 | 增加对 schemaless 写入支持 |
|
||||||
|
|
||||||
|
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||||
|
|
||||||
|
## 处理异常
|
||||||
|
|
||||||
|
在报错后,通过 SQLException 可以获取到错误的信息和错误码:
|
||||||
|
|
||||||
|
```java
|
||||||
|
try (Statement statement = connection.createStatement()) {
|
||||||
|
// executeQuery
|
||||||
|
ResultSet resultSet = statement.executeQuery(sql);
|
||||||
|
// print result
|
||||||
|
printResult(resultSet);
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.println("ERROR Message: " + e.getMessage());
|
||||||
|
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
JDBC 连接器可能报错的错误码包括 4 种:
|
||||||
|
|
||||||
|
- JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间)
|
||||||
|
- 原生连接方法的报错(错误码在 0x2351 到 0x2360 之间)
|
||||||
|
- 数据订阅的报错(错误码在 0x2371 到 0x2380 之间)
|
||||||
|
- TDengine 其他功能模块的报错。
|
||||||
|
|
||||||
|
具体的错误码请参考:
|
||||||
|
|
||||||
|
| Error Code | Description | Suggested Actions |
|
||||||
|
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
||||||
|
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||||
|
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||||
|
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||||
|
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||||
|
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||||
|
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||||
|
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||||
|
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||||
|
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||||
|
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||||
|
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||||
|
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||||
|
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||||
|
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||||
|
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||||
|
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||||
|
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||||
|
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||||
|
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||||
|
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||||
|
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||||
|
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||||
|
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||||
|
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||||
|
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||||
|
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||||
|
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||||
|
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||||
|
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||||
|
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||||
|
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||||
|
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||||
|
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||||
|
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||||
|
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||||
|
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||||
|
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||||
|
|
||||||
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
|
||||||
## TDengine DataType 和 Java DataType
|
## TDengine DataType 和 Java DataType
|
||||||
|
|
||||||
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
||||||
|
@ -82,7 +169,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.1.0</version>
|
<version>3.2.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -97,7 +184,7 @@ cd taos-connector-jdbc
|
||||||
mvn clean install -Dmaven.test.skip=true
|
mvn clean install -Dmaven.test.skip=true
|
||||||
```
|
```
|
||||||
|
|
||||||
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.\*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
编译后,在 target 目录下会产生 taos-jdbcdriver-3.2.\*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
@ -336,35 +423,6 @@ while(resultSet.next()){
|
||||||
|
|
||||||
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
||||||
|
|
||||||
### 处理异常
|
|
||||||
|
|
||||||
在报错后,通过 SQLException 可以获取到错误的信息和错误码:
|
|
||||||
|
|
||||||
```java
|
|
||||||
try (Statement statement = connection.createStatement()) {
|
|
||||||
// executeQuery
|
|
||||||
ResultSet resultSet = statement.executeQuery(sql);
|
|
||||||
// print result
|
|
||||||
printResult(resultSet);
|
|
||||||
} catch (SQLException e) {
|
|
||||||
System.out.println("ERROR Message: " + e.getMessage());
|
|
||||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
JDBC 连接器可能报错的错误码包括 4 种:
|
|
||||||
|
|
||||||
- JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间)
|
|
||||||
- 原生连接方法的报错(错误码在 0x2351 到 0x2360 之间)
|
|
||||||
- 数据订阅的报错(错误码在 0x2371 到 0x2380 之间)
|
|
||||||
- TDengine 其他功能模块的报错。
|
|
||||||
|
|
||||||
具体的错误码请参考:
|
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
|
||||||
|
|
||||||
### 通过参数绑定写入数据
|
### 通过参数绑定写入数据
|
||||||
|
|
||||||
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
|
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
|
||||||
|
@ -372,9 +430,12 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
|
||||||
**注意**:
|
**注意**:
|
||||||
|
|
||||||
- JDBC REST 连接目前不支持参数绑定
|
- JDBC REST 连接目前不支持参数绑定
|
||||||
- 以下示例代码基于 taos-jdbcdriver-3.1.0
|
- 以下示例代码基于 taos-jdbcdriver-3.2.1
|
||||||
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
|
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
|
||||||
- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
|
- 预处理语句中指定数据库与子表名称不要使用 `db.?`,应直接使用 `?`,然后在 setTableName 中指定数据库,如:`prepareStatement.setTableName("db.t1")`。
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="原生连接">
|
||||||
|
|
||||||
```java
|
```java
|
||||||
public class ParameterBindingDemo {
|
public class ParameterBindingDemo {
|
||||||
|
@ -602,21 +663,7 @@ public class ParameterBindingDemo {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
用于设定 TAGS 取值的方法总共有:
|
**注**:setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
|
||||||
|
|
||||||
```java
|
|
||||||
public void setTagNull(int index, int type)
|
|
||||||
public void setTagBoolean(int index, boolean value)
|
|
||||||
public void setTagInt(int index, int value)
|
|
||||||
public void setTagByte(int index, byte value)
|
|
||||||
public void setTagShort(int index, short value)
|
|
||||||
public void setTagLong(int index, long value)
|
|
||||||
public void setTagTimestamp(int index, long value)
|
|
||||||
public void setTagFloat(int index, float value)
|
|
||||||
public void setTagDouble(int index, double value)
|
|
||||||
public void setTagString(int index, String value)
|
|
||||||
public void setTagNString(int index, String value)
|
|
||||||
```
|
|
||||||
|
|
||||||
用于设定 VALUES 数据列的取值的方法总共有:
|
用于设定 VALUES 数据列的取值的方法总共有:
|
||||||
|
|
||||||
|
@ -633,17 +680,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
|
||||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="rest" label="WebSocket 连接">
|
||||||
|
|
||||||
|
```java
|
||||||
|
public class ParameterBindingDemo {
|
||||||
|
private static final String host = "127.0.0.1";
|
||||||
|
private static final Random random = new Random(System.currentTimeMillis());
|
||||||
|
private static final int BINARY_COLUMN_SIZE = 30;
|
||||||
|
private static final String[] schemaList = {
|
||||||
|
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||||
|
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||||
|
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||||
|
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||||
|
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||||
|
};
|
||||||
|
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||||
|
|
||||||
|
public static void main(String[] args) throws SQLException {
|
||||||
|
|
||||||
|
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||||
|
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||||
|
|
||||||
|
init(conn);
|
||||||
|
|
||||||
|
bindInteger(conn);
|
||||||
|
|
||||||
|
bindFloat(conn);
|
||||||
|
|
||||||
|
bindBoolean(conn);
|
||||||
|
|
||||||
|
bindBytes(conn);
|
||||||
|
|
||||||
|
bindString(conn);
|
||||||
|
|
||||||
|
conn.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void init(Connection conn) throws SQLException {
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute("drop database if exists test_ws_parabind");
|
||||||
|
stmt.execute("create database if not exists test_ws_parabind");
|
||||||
|
stmt.execute("use test_ws_parabind");
|
||||||
|
for (int i = 0; i < schemaList.length; i++) {
|
||||||
|
stmt.execute(schemaList[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindInteger(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t1_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||||
|
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||||
|
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||||
|
pstmt.setTagLong(4, random.nextLong());
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||||
|
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||||
|
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||||
|
pstmt.setLong(5, random.nextLong());
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindFloat(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||||
|
|
||||||
|
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t2_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagFloat(1, random.nextFloat());
|
||||||
|
pstmt.setTagDouble(2, random.nextDouble());
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setFloat(2, random.nextFloat());
|
||||||
|
pstmt.setDouble(3, random.nextDouble());
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindBoolean(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t3_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setBoolean(2, random.nextBoolean());
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindBytes(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t4_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagString(1, new String("abc"));
|
||||||
|
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||||
|
pstmt.setString(2, "abc");
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void bindString(Connection conn) throws SQLException {
|
||||||
|
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||||
|
|
||||||
|
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||||
|
|
||||||
|
for (int i = 1; i <= numOfSubTable; i++) {
|
||||||
|
// set table name
|
||||||
|
pstmt.setTableName("t5_" + i);
|
||||||
|
// set tags
|
||||||
|
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||||
|
|
||||||
|
// set columns
|
||||||
|
long current = System.currentTimeMillis();
|
||||||
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
|
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||||
|
pstmt.setNString(1, "California.SanFrancisco");
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
pstmt.executeBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
用于设定 TAGS 取值的方法总共有:
|
||||||
|
|
||||||
|
```java
|
||||||
|
public void setTagNull(int index, int type)
|
||||||
|
public void setTagBoolean(int index, boolean value)
|
||||||
|
public void setTagInt(int index, int value)
|
||||||
|
public void setTagByte(int index, byte value)
|
||||||
|
public void setTagShort(int index, short value)
|
||||||
|
public void setTagLong(int index, long value)
|
||||||
|
public void setTagTimestamp(int index, long value)
|
||||||
|
public void setTagFloat(int index, float value)
|
||||||
|
public void setTagDouble(int index, double value)
|
||||||
|
public void setTagString(int index, String value)
|
||||||
|
public void setTagNString(int index, String value)
|
||||||
|
```
|
||||||
|
|
||||||
### 无模式写入
|
### 无模式写入
|
||||||
|
|
||||||
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
|
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
|
||||||
|
|
||||||
**注意**:
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="原生连接">
|
||||||
- JDBC REST 连接目前不支持无模式写入
|
|
||||||
- 以下示例代码基于 taos-jdbcdriver-3.1.0
|
|
||||||
|
|
||||||
```java
|
```java
|
||||||
public class SchemalessInsertTest {
|
public class SchemalessJniTest {
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||||
|
@ -671,6 +904,41 @@ public class SchemalessInsertTest {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="ws" label="WebSocket 连接">
|
||||||
|
|
||||||
|
```java
|
||||||
|
public class SchemalessWsTest {
|
||||||
|
private static final String host = "127.0.0.1";
|
||||||
|
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||||
|
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||||
|
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||||
|
|
||||||
|
public static void main(String[] args) throws SQLException {
|
||||||
|
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||||
|
Connection connection = DriverManager.getConnection(url);
|
||||||
|
init(connection);
|
||||||
|
|
||||||
|
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||||
|
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||||
|
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||||
|
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void init(Connection connection) throws SQLException {
|
||||||
|
try (Statement stmt = connection.createStatement()) {
|
||||||
|
stmt.executeUpdate("drop database if exists test_ws_schemaless");
|
||||||
|
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
|
||||||
|
stmt.executeUpdate("use test_ws_schemaless");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
### 数据订阅
|
### 数据订阅
|
||||||
|
|
||||||
TDengine Java 连接器支持订阅功能,应用 API 如下:
|
TDengine Java 连接器支持订阅功能,应用 API 如下:
|
||||||
|
@ -714,8 +982,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
```java
|
```java
|
||||||
while(true) {
|
while(true) {
|
||||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ResultBean record : records) {
|
for (ConsumerRecord<ResultBean> record : records) {
|
||||||
process(record);
|
ResultBean bean = record.value();
|
||||||
|
process(bean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
@ -766,8 +1035,9 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ResultBean record : records) {
|
for (ConsumerRecord<ResultBean> record : records) {
|
||||||
process(record);
|
ResultBean bean = record.value();
|
||||||
|
process(bean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
consumer.unsubscribe();
|
consumer.unsubscribe();
|
||||||
|
@ -844,8 +1114,9 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ResultBean record : records) {
|
for (ConsumerRecord<ResultBean> record : records) {
|
||||||
process(record);
|
ResultBean bean = record.value();
|
||||||
|
process(bean);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
consumer.unsubscribe();
|
consumer.unsubscribe();
|
||||||
|
@ -971,20 +1242,6 @@ public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||||
|
|
||||||
## 最近更新记录
|
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | 主要变化 |
|
|
||||||
| :------------------: | :--------------------------------------------------------------------------------------------: |
|
|
||||||
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
|
||||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
|
||||||
| 3.0.0 | 支持 TDengine 3.0 |
|
|
||||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
|
||||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
|
||||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
|
||||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
|
||||||
| 2.0.37 | 增加对 json tag 支持 |
|
|
||||||
| 2.0.36 | 增加对 schemaless 写入支持 |
|
|
||||||
|
|
||||||
## 常见问题
|
## 常见问题
|
||||||
|
|
||||||
1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升?
|
1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升?
|
||||||
|
|
|
@ -10,6 +10,7 @@ import TabItem from '@theme/TabItem';
|
||||||
import Preparation from "./_preparation.mdx"
|
import Preparation from "./_preparation.mdx"
|
||||||
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
|
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
|
||||||
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
|
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||||
|
import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx"
|
||||||
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
||||||
|
|
||||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||||
|
@ -230,6 +231,10 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
||||||
|
|
||||||
<RustBind />
|
<RustBind />
|
||||||
|
|
||||||
|
#### Schemaless 写入
|
||||||
|
|
||||||
|
<RustSml />
|
||||||
|
|
||||||
### 查询数据
|
### 查询数据
|
||||||
|
|
||||||
<RustQuery />
|
<RustQuery />
|
||||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
||||||
SHOW STABLES [LIKE tb_name_wildcard];
|
SHOW STABLES [LIKE tb_name_wildcard];
|
||||||
```
|
```
|
||||||
|
|
||||||
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
|
查看数据库内全部超级表。
|
||||||
|
|
||||||
### 显示一个超级表的创建语句
|
### 显示一个超级表的创建语句
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
|
|
@ -869,10 +869,15 @@ FIRST(expr)
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INTERP(expr)
|
INTERP(expr [, ignore_null_values])
|
||||||
|
|
||||||
|
ignore_null_values: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:返回指定时间截面指定列的记录值或插值。
|
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。
|
||||||
|
|
||||||
**返回数据类型**:同字段类型。
|
**返回数据类型**:同字段类型。
|
||||||
|
|
||||||
|
@ -888,7 +893,7 @@ INTERP(expr)
|
||||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||||
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
||||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
||||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
||||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
||||||
|
|
||||||
|
|
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
||||||
|
|
||||||
显示当前系统中 QNODE (查询节点)的信息。
|
显示当前系统中 QNODE (查询节点)的信息。
|
||||||
|
|
||||||
|
## SHOW QUERIES
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW QUERIES;
|
||||||
|
```
|
||||||
|
|
||||||
|
显示当前系统中正在进行的查询。
|
||||||
|
|
||||||
## SHOW SCORES
|
## SHOW SCORES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -189,7 +197,7 @@ SHOW TABLE DISTRIBUTED table_name;
|
||||||
|
|
||||||
*************************** 1.row ***************************
|
*************************** 1.row ***************************
|
||||||
|
|
||||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||||
|
|
||||||
Total_Blocks: 表 d0 占用的 block 个数为 5 个
|
Total_Blocks: 表 d0 占用的 block 个数为 5 个
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
|
||||||
```
|
```
|
||||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||||
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
||||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。
|
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言(v3.7+)。
|
||||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
||||||
- output_type:此函数计算结果的数据类型名称;
|
- output_type:此函数计算结果的数据类型名称;
|
||||||
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
||||||
|
|
|
@ -177,7 +177,7 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
||||||
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||||
```
|
```
|
||||||
|
|
||||||
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
||||||
|
@ -189,7 +189,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
使用上面获取的 `uid` 值作为 `-E` 输入。
|
使用上面获取的 `uid` 值作为 `-E` 输入。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
||||||
|
|
|
@ -80,7 +80,7 @@ taos --dump-config
|
||||||
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
|
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
|
||||||
:::
|
:::
|
||||||
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
||||||
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
|
| :--- | :------- | :----------------------------------------------- | :------------------------------------------------------------------------------------------------- |
|
||||||
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
|
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
|
||||||
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
||||||
| TCP | 6043 | taosKeeper 监控服务端口。 | 随 taosKeeper 启动参数设置变化。 |
|
| TCP | 6043 | taosKeeper 监控服务端口。 | 随 taosKeeper 启动参数设置变化。 |
|
||||||
|
@ -97,6 +97,24 @@ taos --dump-config
|
||||||
| 取值范围 | 10-50000000 |
|
| 取值范围 | 10-50000000 |
|
||||||
| 缺省值 | 5000 |
|
| 缺省值 | 5000 |
|
||||||
|
|
||||||
|
### numOfRpcSessions
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | ---------------------------- |
|
||||||
|
| 适用范围 | 客户端和服务端都适用 |
|
||||||
|
| 含义 | 一个客户端能创建的最大连接数 |
|
||||||
|
| 取值范围 | 100-100000 |
|
||||||
|
| 缺省值 | 10000 |
|
||||||
|
|
||||||
|
### timeToGetAvailableConn
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------- |
|
||||||
|
| 适用范围 | 客户端和服务端都适用 |
|
||||||
|
| 含义 | 获得可用连接的最长等待时间 |
|
||||||
|
| 取值范围 | 10-50000000(单位为毫秒) |
|
||||||
|
| 缺省值 | 500000 |
|
||||||
|
|
||||||
## 监控相关
|
## 监控相关
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -140,7 +158,7 @@ taos --dump-config
|
||||||
### telemetryReporting
|
### telemetryReporting
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | ------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 是否上传 telemetry |
|
| 含义 | 是否上传 telemetry |
|
||||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||||
|
@ -149,7 +167,7 @@ taos --dump-config
|
||||||
### crashReporting
|
### crashReporting
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | ------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 是否上传 crash 信息 |
|
| 含义 | 是否上传 crash 信息 |
|
||||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||||
|
@ -160,7 +178,7 @@ taos --dump-config
|
||||||
### queryPolicy
|
### queryPolicy
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------- |
|
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | 查询语句的执行策略 |
|
| 含义 | 查询语句的执行策略 |
|
||||||
| 单位 | 无 |
|
| 单位 | 无 |
|
||||||
|
@ -170,7 +188,7 @@ taos --dump-config
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------- |
|
| -------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | sma index 的优化策略 |
|
| 含义 | sma index 的优化策略 |
|
||||||
| 单位 | 无 |
|
| 单位 | 无 |
|
||||||
|
@ -189,7 +207,7 @@ taos --dump-config
|
||||||
### keepColumnName
|
### keepColumnName
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------- |
|
| -------- | ----------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
|
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
|
||||||
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
||||||
|
@ -198,7 +216,7 @@ taos --dump-config
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------- |
|
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值 |
|
| 含义 | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值 |
|
||||||
| 取值范围 | 0:返回空行,1:返回 0 |
|
| 取值范围 | 0:返回空行,1:返回 0 |
|
||||||
|
@ -315,7 +333,7 @@ charset 的有效值是 UTF-8。
|
||||||
### dataDir
|
### dataDir
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------ |
|
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||||
| 缺省值 | /var/lib/taos |
|
| 缺省值 | /var/lib/taos |
|
||||||
|
@ -324,7 +342,7 @@ charset 的有效值是 UTF-8。
|
||||||
### tempDir
|
### tempDir
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------ |
|
| -------- | ------------------------------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
|
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
|
||||||
| 缺省值 | /tmp |
|
| 缺省值 | /tmp |
|
||||||
|
@ -341,7 +359,7 @@ charset 的有效值是 UTF-8。
|
||||||
### minimalDataDirGB
|
### minimalDataDirGB
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------------ |
|
| -------- | ---------------------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
|
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
|
||||||
| 单位 | GB |
|
| 单位 | GB |
|
||||||
|
@ -382,7 +400,7 @@ charset 的有效值是 UTF-8。
|
||||||
### minimalLogDirGB
|
### minimalLogDirGB
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | ------------------------------------------------------ |
|
||||||
| 适用范围 | 服务端和客户端均适用 |
|
| 适用范围 | 服务端和客户端均适用 |
|
||||||
| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 |
|
| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 |
|
||||||
| 单位 | GB |
|
| 单位 | GB |
|
||||||
|
@ -591,7 +609,7 @@ charset 的有效值是 UTF-8。
|
||||||
### smlChildTableName
|
### smlChildTableName
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------- |
|
| -------- | ------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless 自定义的子表名的 key |
|
| 含义 | schemaless 自定义的子表名的 key |
|
||||||
| 类型 | 字符串 |
|
| 类型 | 字符串 |
|
||||||
|
@ -609,7 +627,7 @@ charset 的有效值是 UTF-8。
|
||||||
### smlDataFormat
|
### smlDataFormat
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------- |
|
| -------- | -------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
||||||
| 值域 | 0:不一致;1: 一致 |
|
| 值域 | 0:不一致;1: 一致 |
|
||||||
|
@ -630,7 +648,7 @@ charset 的有效值是 UTF-8。
|
||||||
### enableScience
|
### enableScience
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| -------- | ---------------------------- |
|
||||||
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
|
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
|
||||||
| 含义 | 是否开启科学计数法显示浮点数 |
|
| 含义 | 是否开启科学计数法显示浮点数 |
|
||||||
| 取值范围 | 0:否,1:是 |
|
| 取值范围 | 0:否,1:是 |
|
||||||
|
@ -650,7 +668,7 @@ charset 的有效值是 UTF-8。
|
||||||
### compressMsgSize
|
### compressMsgSize
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------- |
|
| -------- | ------------------------------------------------------------------------------- |
|
||||||
| 适用于 | 服务端和客户端均适用 |
|
| 适用于 | 服务端和客户端均适用 |
|
||||||
| 含义 | 是否对 RPC 消息进行压缩 |
|
| 含义 | 是否对 RPC 消息进行压缩 |
|
||||||
| 取值范围 | -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩 |
|
| 取值范围 | -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩 |
|
||||||
|
@ -659,7 +677,7 @@ charset 的有效值是 UTF-8。
|
||||||
## 3.0 中有效的配置参数列表
|
## 3.0 中有效的配置参数列表
|
||||||
|
|
||||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :--------------------: | --------------- | ------------------------------- | ------------------ |
|
||||||
| 1 | firstEp | 是 | 是 | |
|
| 1 | firstEp | 是 | 是 | |
|
||||||
| 2 | secondEp | 是 | 是 | |
|
| 2 | secondEp | 是 | 是 | |
|
||||||
| 3 | fqdn | 是 | 是 | |
|
| 3 | fqdn | 是 | 是 | |
|
||||||
|
@ -716,7 +734,7 @@ charset 的有效值是 UTF-8。
|
||||||
## 2.x->3.0 的废弃参数
|
## 2.x->3.0 的废弃参数
|
||||||
|
|
||||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :---------------------: | --------------- | --------------- | ---------------------------------------------------- |
|
||||||
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
||||||
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||||
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||||
|
@ -735,7 +753,6 @@ charset 的有效值是 UTF-8。
|
||||||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||||
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
|
|
|
@ -32,7 +32,7 @@ chmod +x TDinsight.sh
|
||||||
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
|
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
||||||
|
@ -270,7 +270,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -284,7 +284,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -298,7 +298,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
||||||
|
@ -326,7 +326,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent 表
|
### taosadapter\_system\_cpu\_percent 表
|
||||||
|
@ -336,5 +336,5 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
|
@ -200,6 +200,12 @@ docker run -d \
|
||||||
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
|
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
|
||||||
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
|
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。Grafana 插件中 SQL 语句中可以使用 <db_name>.<table_name> 来指定数据库。
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
||||||
|
|
||||||

|

|
||||||
|
|
|
@ -48,15 +48,14 @@ Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/test
|
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||||
```
|
```
|
||||||
|
|
||||||
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||||
PATH=$CONFLUENT_HOME/bin
|
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||||
export PATH
|
|
||||||
```
|
```
|
||||||
|
|
||||||
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
||||||
|
@ -319,7 +318,6 @@ connection.backoff.ms=5000
|
||||||
topic.prefix=tdengine-source-
|
topic.prefix=tdengine-source-
|
||||||
poll.interval.ms=1000
|
poll.interval.ms=1000
|
||||||
fetch.max.rows=100
|
fetch.max.rows=100
|
||||||
out.format=line
|
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
```
|
```
|
||||||
|
@ -333,7 +331,15 @@ DROP DATABASE IF EXISTS test;
|
||||||
CREATE DATABASE test;
|
CREATE DATABASE test;
|
||||||
USE test;
|
USE test;
|
||||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
|
||||||
|
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
|
||||||
|
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||||
```
|
```
|
||||||
|
|
||||||
使用 TDengine CLI, 执行 SQL 文件。
|
使用 TDengine CLI, 执行 SQL 文件。
|
||||||
|
@ -350,7 +356,7 @@ confluent local services connect connector load TDengineSourceConnector --config
|
||||||
|
|
||||||
### 查看 topic 数据
|
### 查看 topic 数据
|
||||||
|
|
||||||
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。
|
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
||||||
|
|
||||||
```
|
```
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||||
|
@ -388,7 +394,7 @@ confluent local services connect connector status
|
||||||
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
||||||
|
|
||||||
```
|
```
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSourceConnector
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -427,11 +433,12 @@ confluent local services connect connector unload TDengineSourceConnector
|
||||||
### TDengine Source Connector 特有的配置
|
### TDengine Source Connector 特有的配置
|
||||||
|
|
||||||
1. `connection.database`: 源数据库名称,无缺省值。
|
1. `connection.database`: 源数据库名称,无缺省值。
|
||||||
2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。
|
2. `topic.prefix`: 数据导入 kafka 时使用的 topic 名称的前缀。默认为空字符串 ""。
|
||||||
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。
|
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
|
||||||
4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。
|
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
|
||||||
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
||||||
6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。
|
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
|
||||||
|
7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
|
||||||
|
|
||||||
## 其他说明
|
## 其他说明
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ description: 一些常见问题的解决方法汇总
|
||||||
|
|
||||||
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
||||||
|
|
||||||
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。
|
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](../../operation/diagnose/)。
|
||||||
|
|
||||||
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
|
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
|
||||||
|
|
||||||
|
@ -247,4 +247,10 @@ launchctl limit maxfiles
|
||||||
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
||||||
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
||||||
|
|
||||||
|
### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
|
||||||
|
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
|
||||||
|
|
||||||
|
### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因?
|
||||||
|
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
<description>Demo project for TDengine</description>
|
<description>Demo project for TDengine</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<spring.version>5.3.26</spring.version>
|
<spring.version>5.3.27</spring.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -73,7 +73,7 @@ static int32_t init_env() {
|
||||||
taos_free_result(pRes);
|
taos_free_result(pRes);
|
||||||
|
|
||||||
// create database
|
// create database
|
||||||
pRes = taos_query(pConn, "create database tmqdb precision 'ns'");
|
pRes = taos_query(pConn, "create database tmqdb precision 'ns' WAL_RETENTION_PERIOD 3600");
|
||||||
if (taos_errno(pRes) != 0) {
|
if (taos_errno(pRes) != 0) {
|
||||||
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
|
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||||
goto END;
|
goto END;
|
||||||
|
@ -289,7 +289,7 @@ void consume_repeatly(tmq_t* tmq) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free(pAssign);
|
tmq_free_assignment(pAssign);
|
||||||
|
|
||||||
// let's do it again
|
// let's do it again
|
||||||
basic_consume_loop(tmq);
|
basic_consume_loop(tmq);
|
||||||
|
|
|
@ -230,6 +230,9 @@ DLL_EXPORT int taos_get_tables_vgId(TAOS *taos, const char *db, const char *tabl
|
||||||
|
|
||||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||||
|
|
||||||
|
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner
|
||||||
|
DLL_EXPORT void taos_set_hb_quit(int8_t quitByKill);
|
||||||
|
|
||||||
DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type);
|
DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type);
|
||||||
|
|
||||||
/* --------------------------schemaless INTERFACE------------------------------- */
|
/* --------------------------schemaless INTERFACE------------------------------- */
|
||||||
|
@ -283,7 +286,9 @@ DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char* pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment);
|
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
|
||||||
|
int32_t *numOfAssignment);
|
||||||
|
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||||
|
|
||||||
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
||||||
|
@ -306,6 +311,7 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
|
||||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||||
|
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||||
|
|
||||||
/* ------------------------------ TAOSX -----------------------------------*/
|
/* ------------------------------ TAOSX -----------------------------------*/
|
||||||
// note: following apis are unstable
|
// note: following apis are unstable
|
||||||
|
|
|
@ -82,7 +82,7 @@ typedef struct STuplePos {
|
||||||
int32_t pageId;
|
int32_t pageId;
|
||||||
int32_t offset;
|
int32_t offset;
|
||||||
};
|
};
|
||||||
STupleKey streamTupleKey;
|
SWinKey streamTupleKey;
|
||||||
};
|
};
|
||||||
} STuplePos;
|
} STuplePos;
|
||||||
|
|
||||||
|
@ -208,19 +208,6 @@ typedef struct SSDataBlock {
|
||||||
SDataBlockInfo info;
|
SDataBlockInfo info;
|
||||||
} SSDataBlock;
|
} SSDataBlock;
|
||||||
|
|
||||||
enum {
|
|
||||||
FETCH_TYPE__DATA = 0,
|
|
||||||
FETCH_TYPE__NONE,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
int8_t fetchType;
|
|
||||||
union {
|
|
||||||
SSDataBlock data;
|
|
||||||
void* meta;
|
|
||||||
};
|
|
||||||
} SFetchRet;
|
|
||||||
|
|
||||||
typedef struct SVarColAttr {
|
typedef struct SVarColAttr {
|
||||||
int32_t* offset; // start position for each entry in the list
|
int32_t* offset; // start position for each entry in the list
|
||||||
uint32_t length; // used buffer size that contain the valid data
|
uint32_t length; // used buffer size that contain the valid data
|
||||||
|
@ -342,6 +329,8 @@ typedef struct {
|
||||||
float f;
|
float f;
|
||||||
};
|
};
|
||||||
size_t length;
|
size_t length;
|
||||||
|
bool keyEscaped;
|
||||||
|
bool valueEscaped;
|
||||||
} SSmlKv;
|
} SSmlKv;
|
||||||
|
|
||||||
#define QUERY_ASC_FORWARD_STEP 1
|
#define QUERY_ASC_FORWARD_STEP 1
|
||||||
|
@ -380,6 +369,8 @@ typedef struct STUidTagInfo {
|
||||||
#define UD_GROUPID_COLUMN_INDEX 1
|
#define UD_GROUPID_COLUMN_INDEX 1
|
||||||
#define UD_TAG_COLUMN_INDEX 2
|
#define UD_TAG_COLUMN_INDEX 2
|
||||||
|
|
||||||
|
int32_t taosGenCrashJsonMsg(int signum, char **pMsg, int64_t clusterId, int64_t startTime);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -215,7 +215,7 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
|
||||||
void blockDataCleanup(SSDataBlock* pDataBlock);
|
void blockDataCleanup(SSDataBlock* pDataBlock);
|
||||||
void blockDataEmpty(SSDataBlock* pDataBlock);
|
void blockDataEmpty(SSDataBlock* pDataBlock);
|
||||||
|
|
||||||
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
|
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int32_t extraSize);
|
||||||
|
|
||||||
int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n);
|
int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n);
|
||||||
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||||
|
|
|
@ -124,6 +124,7 @@ extern int32_t tsRedirectFactor;
|
||||||
extern int32_t tsRedirectMaxPeriod;
|
extern int32_t tsRedirectMaxPeriod;
|
||||||
extern int32_t tsMaxRetryWaitTime;
|
extern int32_t tsMaxRetryWaitTime;
|
||||||
extern bool tsUseAdapter;
|
extern bool tsUseAdapter;
|
||||||
|
extern int32_t tsMetaCacheMaxSize;
|
||||||
extern int32_t tsSlowLogThreshold;
|
extern int32_t tsSlowLogThreshold;
|
||||||
extern int32_t tsSlowLogScope;
|
extern int32_t tsSlowLogScope;
|
||||||
|
|
||||||
|
@ -177,6 +178,8 @@ extern int32_t tsRpcRetryLimit;
|
||||||
extern int32_t tsRpcRetryInterval;
|
extern int32_t tsRpcRetryInterval;
|
||||||
|
|
||||||
extern bool tsDisableStream;
|
extern bool tsDisableStream;
|
||||||
|
extern int64_t tsStreamBufferSize;
|
||||||
|
extern int64_t tsCheckpointInterval;
|
||||||
|
|
||||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||||
|
|
||||||
|
@ -191,7 +194,7 @@ struct SConfig *taosGetCfg();
|
||||||
|
|
||||||
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
|
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
|
||||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
|
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
|
||||||
int32_t taosSetCfg(SConfig *pCfg, char *name);
|
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name);
|
||||||
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -26,6 +26,10 @@ extern "C" {
|
||||||
#include "tgrantCfg.h"
|
#include "tgrantCfg.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef GRANTS_COL_MAX_LEN
|
||||||
|
#define GRANTS_COL_MAX_LEN 196
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TSDB_GRANT_ALL,
|
TSDB_GRANT_ALL,
|
||||||
TSDB_GRANT_TIME,
|
TSDB_GRANT_TIME,
|
||||||
|
@ -47,6 +51,31 @@ typedef enum {
|
||||||
int32_t grantCheck(EGrantType grant);
|
int32_t grantCheck(EGrantType grant);
|
||||||
|
|
||||||
#ifndef GRANTS_CFG
|
#ifndef GRANTS_CFG
|
||||||
|
#ifdef TD_ENTERPRISE
|
||||||
|
#define GRANTS_SCHEMA \
|
||||||
|
static const SSysDbTableSchema grantsSchema[] = { \
|
||||||
|
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "opc_da", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "opc_ua", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "pi", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "kafka", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "influxdb", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "mqtt", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
}
|
||||||
|
#else
|
||||||
#define GRANTS_SCHEMA \
|
#define GRANTS_SCHEMA \
|
||||||
static const SSysDbTableSchema grantsSchema[] = { \
|
static const SSysDbTableSchema grantsSchema[] = { \
|
||||||
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
@ -64,6 +93,7 @@ int32_t grantCheck(EGrantType grant);
|
||||||
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
#define GRANT_CFG_ADD
|
#define GRANT_CFG_ADD
|
||||||
#define GRANT_CFG_SET
|
#define GRANT_CFG_SET
|
||||||
#define GRANT_CFG_GET
|
#define GRANT_CFG_GET
|
||||||
|
|
|
@ -416,7 +416,7 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p
|
||||||
return pSW;
|
return pSW;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
|
static FORCE_INLINE void tDeleteSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
|
||||||
if (pSchemaWrapper) {
|
if (pSchemaWrapper) {
|
||||||
taosMemoryFree(pSchemaWrapper->pSchema);
|
taosMemoryFree(pSchemaWrapper->pSchema);
|
||||||
taosMemoryFree(pSchemaWrapper);
|
taosMemoryFree(pSchemaWrapper);
|
||||||
|
@ -691,6 +691,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
int32_t tDeserializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tDeserializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
|
void tFreeSAlterUserReq(SAlterUserReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
|
@ -951,6 +952,9 @@ int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq
|
||||||
int32_t tDeserializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
|
int32_t tDeserializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
char db[TSDB_DB_FNAME_LEN];
|
||||||
|
int64_t dbId;
|
||||||
|
int32_t cfgVersion;
|
||||||
int32_t numOfVgroups;
|
int32_t numOfVgroups;
|
||||||
int32_t numOfStables;
|
int32_t numOfStables;
|
||||||
int32_t buffer;
|
int32_t buffer;
|
||||||
|
@ -983,8 +987,13 @@ typedef struct {
|
||||||
int16_t sstTrigger;
|
int16_t sstTrigger;
|
||||||
} SDbCfgRsp;
|
} SDbCfgRsp;
|
||||||
|
|
||||||
|
typedef SDbCfgRsp SDbCfgInfo;
|
||||||
|
|
||||||
|
int32_t tSerializeSDbCfgRspImpl(SEncoder *encoder, const SDbCfgRsp *pRsp);
|
||||||
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
|
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
|
||||||
int32_t tDeserializeSDbCfgRsp(void* buf, int32_t bufLen, SDbCfgRsp* pRsp);
|
int32_t tDeserializeSDbCfgRsp(void* buf, int32_t bufLen, SDbCfgRsp* pRsp);
|
||||||
|
int32_t tDeserializeSDbCfgRspImpl(SDecoder* decoder, SDbCfgRsp *pRsp);
|
||||||
|
void tFreeSDbCfgRsp(SDbCfgRsp *pRsp);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t rowNum;
|
int32_t rowNum;
|
||||||
|
@ -1041,12 +1050,17 @@ int32_t tDeserializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp
|
||||||
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
|
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SArray* pArray; // Array of SUseDbRsp
|
SUseDbRsp *useDbRsp;
|
||||||
} SUseDbBatchRsp;
|
SDbCfgRsp *cfgRsp;
|
||||||
|
} SDbHbRsp;
|
||||||
|
|
||||||
int32_t tSerializeSUseDbBatchRsp(void* buf, int32_t bufLen, SUseDbBatchRsp* pRsp);
|
typedef struct {
|
||||||
int32_t tDeserializeSUseDbBatchRsp(void* buf, int32_t bufLen, SUseDbBatchRsp* pRsp);
|
SArray* pArray; // Array of SDbHbRsp
|
||||||
void tFreeSUseDbBatchRsp(SUseDbBatchRsp* pRsp);
|
} SDbHbBatchRsp;
|
||||||
|
|
||||||
|
int32_t tSerializeSDbHbBatchRsp(void* buf, int32_t bufLen, SDbHbBatchRsp* pRsp);
|
||||||
|
int32_t tDeserializeSDbHbBatchRsp(void* buf, int32_t bufLen, SDbHbBatchRsp* pRsp);
|
||||||
|
void tFreeSDbHbBatchRsp(SDbHbBatchRsp* pRsp);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SArray* pArray; // Array of SGetUserAuthRsp
|
SArray* pArray; // Array of SGetUserAuthRsp
|
||||||
|
@ -1232,6 +1246,14 @@ typedef struct {
|
||||||
SEp ep;
|
SEp ep;
|
||||||
} SDnodeEp;
|
} SDnodeEp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t id;
|
||||||
|
int8_t isMnode;
|
||||||
|
SEp ep;
|
||||||
|
char active[TSDB_ACTIVE_KEY_LEN];
|
||||||
|
char connActive[TSDB_CONN_ACTIVE_KEY_LEN];
|
||||||
|
} SDnodeInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t dnodeVer;
|
int64_t dnodeVer;
|
||||||
SDnodeCfg dnodeCfg;
|
SDnodeCfg dnodeCfg;
|
||||||
|
@ -1620,11 +1642,27 @@ typedef struct {
|
||||||
char fqdn[TSDB_FQDN_LEN];
|
char fqdn[TSDB_FQDN_LEN];
|
||||||
int32_t port;
|
int32_t port;
|
||||||
int8_t force;
|
int8_t force;
|
||||||
|
int8_t unsafe;
|
||||||
} SDropDnodeReq;
|
} SDropDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||||
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||||
|
|
||||||
|
enum {
|
||||||
|
RESTORE_TYPE__ALL = 1,
|
||||||
|
RESTORE_TYPE__MNODE,
|
||||||
|
RESTORE_TYPE__VNODE,
|
||||||
|
RESTORE_TYPE__QNODE,
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t dnodeId;
|
||||||
|
int8_t restoreType;
|
||||||
|
} SRestoreDnodeReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
|
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
char config[TSDB_DNODE_CONFIG_LEN];
|
char config[TSDB_DNODE_CONFIG_LEN];
|
||||||
|
@ -1905,7 +1943,7 @@ typedef struct {
|
||||||
#define STREAM_FILL_HISTORY_ON 1
|
#define STREAM_FILL_HISTORY_ON 1
|
||||||
#define STREAM_FILL_HISTORY_OFF 0
|
#define STREAM_FILL_HISTORY_OFF 0
|
||||||
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
|
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
|
||||||
#define STREAM_DEFAULT_IGNORE_UPDATE 0
|
#define STREAM_DEFAULT_IGNORE_UPDATE 1
|
||||||
#define STREAM_CREATE_STABLE_TRUE 1
|
#define STREAM_CREATE_STABLE_TRUE 1
|
||||||
#define STREAM_CREATE_STABLE_FALSE 0
|
#define STREAM_CREATE_STABLE_FALSE 0
|
||||||
|
|
||||||
|
@ -2098,7 +2136,6 @@ static FORCE_INLINE void* tDeserializeSMVSubscribeReq(void* buf, SMVSubscribeReq
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||||
SArray* lostConsumers; // SArray<int64_t>
|
|
||||||
SArray* removedConsumers; // SArray<int64_t>
|
SArray* removedConsumers; // SArray<int64_t>
|
||||||
SArray* newConsumers; // SArray<int64_t>
|
SArray* newConsumers; // SArray<int64_t>
|
||||||
} SMqRebInfo;
|
} SMqRebInfo;
|
||||||
|
@ -2109,10 +2146,6 @@ static FORCE_INLINE SMqRebInfo* tNewSMqRebSubscribe(const char* key) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
tstrncpy(pRebInfo->key, key, TSDB_SUBSCRIBE_KEY_LEN);
|
tstrncpy(pRebInfo->key, key, TSDB_SUBSCRIBE_KEY_LEN);
|
||||||
pRebInfo->lostConsumers = taosArrayInit(0, sizeof(int64_t));
|
|
||||||
if (pRebInfo->lostConsumers == NULL) {
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
pRebInfo->removedConsumers = taosArrayInit(0, sizeof(int64_t));
|
pRebInfo->removedConsumers = taosArrayInit(0, sizeof(int64_t));
|
||||||
if (pRebInfo->removedConsumers == NULL) {
|
if (pRebInfo->removedConsumers == NULL) {
|
||||||
goto _err;
|
goto _err;
|
||||||
|
@ -2123,7 +2156,6 @@ static FORCE_INLINE SMqRebInfo* tNewSMqRebSubscribe(const char* key) {
|
||||||
}
|
}
|
||||||
return pRebInfo;
|
return pRebInfo;
|
||||||
_err:
|
_err:
|
||||||
taosArrayDestroy(pRebInfo->lostConsumers);
|
|
||||||
taosArrayDestroy(pRebInfo->removedConsumers);
|
taosArrayDestroy(pRebInfo->removedConsumers);
|
||||||
taosArrayDestroy(pRebInfo->newConsumers);
|
taosArrayDestroy(pRebInfo->newConsumers);
|
||||||
taosMemoryFreeClear(pRebInfo);
|
taosMemoryFreeClear(pRebInfo);
|
||||||
|
@ -2908,6 +2940,42 @@ typedef struct SMqVgOffset {
|
||||||
int32_t tEncodeMqVgOffset(SEncoder* pEncoder, const SMqVgOffset* pOffset);
|
int32_t tEncodeMqVgOffset(SEncoder* pEncoder, const SMqVgOffset* pOffset);
|
||||||
int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset);
|
int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SMsgHead head;
|
||||||
|
int32_t taskId;
|
||||||
|
} SVPauseStreamTaskReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SVPauseStreamTaskRsp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
int8_t igNotExists;
|
||||||
|
} SMPauseStreamReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSMPauseStreamReq(void* buf, int32_t bufLen, const SMPauseStreamReq* pReq);
|
||||||
|
int32_t tDeserializeSMPauseStreamReq(void* buf, int32_t bufLen, SMPauseStreamReq* pReq);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SMsgHead head;
|
||||||
|
int32_t taskId;
|
||||||
|
int8_t igUntreated;
|
||||||
|
} SVResumeStreamTaskReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SVResumeStreamTaskRsp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
int8_t igNotExists;
|
||||||
|
int8_t igUntreated;
|
||||||
|
} SMResumeStreamReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSMResumeStreamReq(void* buf, int32_t bufLen, const SMResumeStreamReq* pReq);
|
||||||
|
int32_t tDeserializeSMResumeStreamReq(void* buf, int32_t bufLen, SMResumeStreamReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_TABLE_FNAME_LEN];
|
char name[TSDB_TABLE_FNAME_LEN];
|
||||||
char stb[TSDB_TABLE_FNAME_LEN];
|
char stb[TSDB_TABLE_FNAME_LEN];
|
||||||
|
@ -3123,7 +3191,8 @@ typedef struct {
|
||||||
char dbFName[TSDB_DB_FNAME_LEN];
|
char dbFName[TSDB_DB_FNAME_LEN];
|
||||||
uint64_t suid;
|
uint64_t suid;
|
||||||
int32_t version;
|
int32_t version;
|
||||||
SArray* pIndex;
|
int32_t indexSize;
|
||||||
|
SArray* pIndex; // STableIndexInfo
|
||||||
} STableIndexRsp;
|
} STableIndexRsp;
|
||||||
|
|
||||||
int32_t tSerializeSTableIndexRsp(void* buf, int32_t bufLen, const STableIndexRsp* pRsp);
|
int32_t tSerializeSTableIndexRsp(void* buf, int32_t bufLen, const STableIndexRsp* pRsp);
|
||||||
|
@ -3429,10 +3498,10 @@ typedef struct {
|
||||||
char data[]; // SSubmitReq2
|
char data[]; // SSubmitReq2
|
||||||
} SSubmitReq2Msg;
|
} SSubmitReq2Msg;
|
||||||
|
|
||||||
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
|
int32_t tEncodeSubmitReq(SEncoder* pCoder, const SSubmitReq2* pReq);
|
||||||
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
|
int32_t tDecodeSubmitReq(SDecoder* pCoder, SSubmitReq2* pReq);
|
||||||
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
|
void tDestroySubmitTbData(SSubmitTbData* pTbData, int32_t flag);
|
||||||
void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
|
void tDestroySubmitReq(SSubmitReq2* pReq, int32_t flag);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t affectedRows;
|
int32_t affectedRows;
|
||||||
|
|
|
@ -178,6 +178,9 @@ enum {
|
||||||
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||||
|
@ -255,6 +258,8 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
||||||
|
|
|
@ -67,284 +67,292 @@
|
||||||
#define TK_DNODE 49
|
#define TK_DNODE 49
|
||||||
#define TK_PORT 50
|
#define TK_PORT 50
|
||||||
#define TK_DNODES 51
|
#define TK_DNODES 51
|
||||||
#define TK_NK_IPTOKEN 52
|
#define TK_RESTORE 52
|
||||||
#define TK_FORCE 53
|
#define TK_NK_IPTOKEN 53
|
||||||
#define TK_LOCAL 54
|
#define TK_FORCE 54
|
||||||
#define TK_QNODE 55
|
#define TK_UNSAFE 55
|
||||||
#define TK_BNODE 56
|
#define TK_LOCAL 56
|
||||||
#define TK_SNODE 57
|
#define TK_QNODE 57
|
||||||
#define TK_MNODE 58
|
#define TK_BNODE 58
|
||||||
#define TK_DATABASE 59
|
#define TK_SNODE 59
|
||||||
#define TK_USE 60
|
#define TK_MNODE 60
|
||||||
#define TK_FLUSH 61
|
#define TK_VNODE 61
|
||||||
#define TK_TRIM 62
|
#define TK_DATABASE 62
|
||||||
#define TK_COMPACT 63
|
#define TK_USE 63
|
||||||
#define TK_IF 64
|
#define TK_FLUSH 64
|
||||||
#define TK_NOT 65
|
#define TK_TRIM 65
|
||||||
#define TK_EXISTS 66
|
#define TK_COMPACT 66
|
||||||
#define TK_BUFFER 67
|
#define TK_IF 67
|
||||||
#define TK_CACHEMODEL 68
|
#define TK_NOT 68
|
||||||
#define TK_CACHESIZE 69
|
#define TK_EXISTS 69
|
||||||
#define TK_COMP 70
|
#define TK_BUFFER 70
|
||||||
#define TK_DURATION 71
|
#define TK_CACHEMODEL 71
|
||||||
#define TK_NK_VARIABLE 72
|
#define TK_CACHESIZE 72
|
||||||
#define TK_MAXROWS 73
|
#define TK_COMP 73
|
||||||
#define TK_MINROWS 74
|
#define TK_DURATION 74
|
||||||
#define TK_KEEP 75
|
#define TK_NK_VARIABLE 75
|
||||||
#define TK_PAGES 76
|
#define TK_MAXROWS 76
|
||||||
#define TK_PAGESIZE 77
|
#define TK_MINROWS 77
|
||||||
#define TK_TSDB_PAGESIZE 78
|
#define TK_KEEP 78
|
||||||
#define TK_PRECISION 79
|
#define TK_PAGES 79
|
||||||
#define TK_REPLICA 80
|
#define TK_PAGESIZE 80
|
||||||
#define TK_VGROUPS 81
|
#define TK_TSDB_PAGESIZE 81
|
||||||
#define TK_SINGLE_STABLE 82
|
#define TK_PRECISION 82
|
||||||
#define TK_RETENTIONS 83
|
#define TK_REPLICA 83
|
||||||
#define TK_SCHEMALESS 84
|
#define TK_VGROUPS 84
|
||||||
#define TK_WAL_LEVEL 85
|
#define TK_SINGLE_STABLE 85
|
||||||
#define TK_WAL_FSYNC_PERIOD 86
|
#define TK_RETENTIONS 86
|
||||||
#define TK_WAL_RETENTION_PERIOD 87
|
#define TK_SCHEMALESS 87
|
||||||
#define TK_WAL_RETENTION_SIZE 88
|
#define TK_WAL_LEVEL 88
|
||||||
#define TK_WAL_ROLL_PERIOD 89
|
#define TK_WAL_FSYNC_PERIOD 89
|
||||||
#define TK_WAL_SEGMENT_SIZE 90
|
#define TK_WAL_RETENTION_PERIOD 90
|
||||||
#define TK_STT_TRIGGER 91
|
#define TK_WAL_RETENTION_SIZE 91
|
||||||
#define TK_TABLE_PREFIX 92
|
#define TK_WAL_ROLL_PERIOD 92
|
||||||
#define TK_TABLE_SUFFIX 93
|
#define TK_WAL_SEGMENT_SIZE 93
|
||||||
#define TK_NK_COLON 94
|
#define TK_STT_TRIGGER 94
|
||||||
#define TK_MAX_SPEED 95
|
#define TK_TABLE_PREFIX 95
|
||||||
#define TK_START 96
|
#define TK_TABLE_SUFFIX 96
|
||||||
#define TK_TIMESTAMP 97
|
#define TK_NK_COLON 97
|
||||||
#define TK_END 98
|
#define TK_MAX_SPEED 98
|
||||||
#define TK_TABLE 99
|
#define TK_START 99
|
||||||
#define TK_NK_LP 100
|
#define TK_TIMESTAMP 100
|
||||||
#define TK_NK_RP 101
|
#define TK_END 101
|
||||||
#define TK_STABLE 102
|
#define TK_TABLE 102
|
||||||
#define TK_ADD 103
|
#define TK_NK_LP 103
|
||||||
#define TK_COLUMN 104
|
#define TK_NK_RP 104
|
||||||
#define TK_MODIFY 105
|
#define TK_STABLE 105
|
||||||
#define TK_RENAME 106
|
#define TK_ADD 106
|
||||||
#define TK_TAG 107
|
#define TK_COLUMN 107
|
||||||
#define TK_SET 108
|
#define TK_MODIFY 108
|
||||||
#define TK_NK_EQ 109
|
#define TK_RENAME 109
|
||||||
#define TK_USING 110
|
#define TK_TAG 110
|
||||||
#define TK_TAGS 111
|
#define TK_SET 111
|
||||||
#define TK_BOOL 112
|
#define TK_NK_EQ 112
|
||||||
#define TK_TINYINT 113
|
#define TK_USING 113
|
||||||
#define TK_SMALLINT 114
|
#define TK_TAGS 114
|
||||||
#define TK_INT 115
|
#define TK_BOOL 115
|
||||||
#define TK_INTEGER 116
|
#define TK_TINYINT 116
|
||||||
#define TK_BIGINT 117
|
#define TK_SMALLINT 117
|
||||||
#define TK_FLOAT 118
|
#define TK_INT 118
|
||||||
#define TK_DOUBLE 119
|
#define TK_INTEGER 119
|
||||||
#define TK_BINARY 120
|
#define TK_BIGINT 120
|
||||||
#define TK_NCHAR 121
|
#define TK_FLOAT 121
|
||||||
#define TK_UNSIGNED 122
|
#define TK_DOUBLE 122
|
||||||
#define TK_JSON 123
|
#define TK_BINARY 123
|
||||||
#define TK_VARCHAR 124
|
#define TK_NCHAR 124
|
||||||
#define TK_MEDIUMBLOB 125
|
#define TK_UNSIGNED 125
|
||||||
#define TK_BLOB 126
|
#define TK_JSON 126
|
||||||
#define TK_VARBINARY 127
|
#define TK_VARCHAR 127
|
||||||
#define TK_DECIMAL 128
|
#define TK_MEDIUMBLOB 128
|
||||||
#define TK_COMMENT 129
|
#define TK_BLOB 129
|
||||||
#define TK_MAX_DELAY 130
|
#define TK_VARBINARY 130
|
||||||
#define TK_WATERMARK 131
|
#define TK_DECIMAL 131
|
||||||
#define TK_ROLLUP 132
|
#define TK_COMMENT 132
|
||||||
#define TK_TTL 133
|
#define TK_MAX_DELAY 133
|
||||||
#define TK_SMA 134
|
#define TK_WATERMARK 134
|
||||||
#define TK_DELETE_MARK 135
|
#define TK_ROLLUP 135
|
||||||
#define TK_FIRST 136
|
#define TK_TTL 136
|
||||||
#define TK_LAST 137
|
#define TK_SMA 137
|
||||||
#define TK_SHOW 138
|
#define TK_DELETE_MARK 138
|
||||||
#define TK_PRIVILEGES 139
|
#define TK_FIRST 139
|
||||||
#define TK_DATABASES 140
|
#define TK_LAST 140
|
||||||
#define TK_TABLES 141
|
#define TK_SHOW 141
|
||||||
#define TK_STABLES 142
|
#define TK_PRIVILEGES 142
|
||||||
#define TK_MNODES 143
|
#define TK_DATABASES 143
|
||||||
#define TK_QNODES 144
|
#define TK_TABLES 144
|
||||||
#define TK_FUNCTIONS 145
|
#define TK_STABLES 145
|
||||||
#define TK_INDEXES 146
|
#define TK_MNODES 146
|
||||||
#define TK_ACCOUNTS 147
|
#define TK_QNODES 147
|
||||||
#define TK_APPS 148
|
#define TK_FUNCTIONS 148
|
||||||
#define TK_CONNECTIONS 149
|
#define TK_INDEXES 149
|
||||||
#define TK_LICENCES 150
|
#define TK_ACCOUNTS 150
|
||||||
#define TK_GRANTS 151
|
#define TK_APPS 151
|
||||||
#define TK_QUERIES 152
|
#define TK_CONNECTIONS 152
|
||||||
#define TK_SCORES 153
|
#define TK_LICENCES 153
|
||||||
#define TK_TOPICS 154
|
#define TK_GRANTS 154
|
||||||
#define TK_VARIABLES 155
|
#define TK_QUERIES 155
|
||||||
#define TK_CLUSTER 156
|
#define TK_SCORES 156
|
||||||
#define TK_BNODES 157
|
#define TK_TOPICS 157
|
||||||
#define TK_SNODES 158
|
#define TK_VARIABLES 158
|
||||||
#define TK_TRANSACTIONS 159
|
#define TK_CLUSTER 159
|
||||||
#define TK_DISTRIBUTED 160
|
#define TK_BNODES 160
|
||||||
#define TK_CONSUMERS 161
|
#define TK_SNODES 161
|
||||||
#define TK_SUBSCRIPTIONS 162
|
#define TK_TRANSACTIONS 162
|
||||||
#define TK_VNODES 163
|
#define TK_DISTRIBUTED 163
|
||||||
#define TK_ALIVE 164
|
#define TK_CONSUMERS 164
|
||||||
#define TK_LIKE 165
|
#define TK_SUBSCRIPTIONS 165
|
||||||
#define TK_TBNAME 166
|
#define TK_VNODES 166
|
||||||
#define TK_QTAGS 167
|
#define TK_ALIVE 167
|
||||||
#define TK_AS 168
|
#define TK_LIKE 168
|
||||||
#define TK_INDEX 169
|
#define TK_TBNAME 169
|
||||||
#define TK_FUNCTION 170
|
#define TK_QTAGS 170
|
||||||
#define TK_INTERVAL 171
|
#define TK_AS 171
|
||||||
#define TK_COUNT 172
|
#define TK_INDEX 172
|
||||||
#define TK_LAST_ROW 173
|
#define TK_FUNCTION 173
|
||||||
#define TK_TOPIC 174
|
#define TK_INTERVAL 174
|
||||||
#define TK_META 175
|
#define TK_COUNT 175
|
||||||
#define TK_CONSUMER 176
|
#define TK_LAST_ROW 176
|
||||||
#define TK_GROUP 177
|
#define TK_TOPIC 177
|
||||||
#define TK_DESC 178
|
#define TK_META 178
|
||||||
#define TK_DESCRIBE 179
|
#define TK_CONSUMER 179
|
||||||
#define TK_RESET 180
|
#define TK_GROUP 180
|
||||||
#define TK_QUERY 181
|
#define TK_DESC 181
|
||||||
#define TK_CACHE 182
|
#define TK_DESCRIBE 182
|
||||||
#define TK_EXPLAIN 183
|
#define TK_RESET 183
|
||||||
#define TK_ANALYZE 184
|
#define TK_QUERY 184
|
||||||
#define TK_VERBOSE 185
|
#define TK_CACHE 185
|
||||||
#define TK_NK_BOOL 186
|
#define TK_EXPLAIN 186
|
||||||
#define TK_RATIO 187
|
#define TK_ANALYZE 187
|
||||||
#define TK_NK_FLOAT 188
|
#define TK_VERBOSE 188
|
||||||
#define TK_OUTPUTTYPE 189
|
#define TK_NK_BOOL 189
|
||||||
#define TK_AGGREGATE 190
|
#define TK_RATIO 190
|
||||||
#define TK_BUFSIZE 191
|
#define TK_NK_FLOAT 191
|
||||||
#define TK_LANGUAGE 192
|
#define TK_OUTPUTTYPE 192
|
||||||
#define TK_REPLACE 193
|
#define TK_AGGREGATE 193
|
||||||
#define TK_STREAM 194
|
#define TK_BUFSIZE 194
|
||||||
#define TK_INTO 195
|
#define TK_LANGUAGE 195
|
||||||
#define TK_TRIGGER 196
|
#define TK_REPLACE 196
|
||||||
#define TK_AT_ONCE 197
|
#define TK_STREAM 197
|
||||||
#define TK_WINDOW_CLOSE 198
|
#define TK_INTO 198
|
||||||
#define TK_IGNORE 199
|
#define TK_PAUSE 199
|
||||||
#define TK_EXPIRED 200
|
#define TK_RESUME 200
|
||||||
#define TK_FILL_HISTORY 201
|
#define TK_TRIGGER 201
|
||||||
#define TK_UPDATE 202
|
#define TK_AT_ONCE 202
|
||||||
#define TK_SUBTABLE 203
|
#define TK_WINDOW_CLOSE 203
|
||||||
#define TK_KILL 204
|
#define TK_IGNORE 204
|
||||||
#define TK_CONNECTION 205
|
#define TK_EXPIRED 205
|
||||||
#define TK_TRANSACTION 206
|
#define TK_FILL_HISTORY 206
|
||||||
#define TK_BALANCE 207
|
#define TK_UPDATE 207
|
||||||
#define TK_VGROUP 208
|
#define TK_SUBTABLE 208
|
||||||
#define TK_LEADER 209
|
#define TK_UNTREATED 209
|
||||||
#define TK_MERGE 210
|
#define TK_KILL 210
|
||||||
#define TK_REDISTRIBUTE 211
|
#define TK_CONNECTION 211
|
||||||
#define TK_SPLIT 212
|
#define TK_TRANSACTION 212
|
||||||
#define TK_DELETE 213
|
#define TK_BALANCE 213
|
||||||
#define TK_INSERT 214
|
#define TK_VGROUP 214
|
||||||
#define TK_NULL 215
|
#define TK_LEADER 215
|
||||||
#define TK_NK_QUESTION 216
|
#define TK_MERGE 216
|
||||||
#define TK_NK_ARROW 217
|
#define TK_REDISTRIBUTE 217
|
||||||
#define TK_ROWTS 218
|
#define TK_SPLIT 218
|
||||||
#define TK_QSTART 219
|
#define TK_DELETE 219
|
||||||
#define TK_QEND 220
|
#define TK_INSERT 220
|
||||||
#define TK_QDURATION 221
|
#define TK_NULL 221
|
||||||
#define TK_WSTART 222
|
#define TK_NK_QUESTION 222
|
||||||
#define TK_WEND 223
|
#define TK_NK_ARROW 223
|
||||||
#define TK_WDURATION 224
|
#define TK_ROWTS 224
|
||||||
#define TK_IROWTS 225
|
#define TK_QSTART 225
|
||||||
#define TK_ISFILLED 226
|
#define TK_QEND 226
|
||||||
#define TK_CAST 227
|
#define TK_QDURATION 227
|
||||||
#define TK_NOW 228
|
#define TK_WSTART 228
|
||||||
#define TK_TODAY 229
|
#define TK_WEND 229
|
||||||
#define TK_TIMEZONE 230
|
#define TK_WDURATION 230
|
||||||
#define TK_CLIENT_VERSION 231
|
#define TK_IROWTS 231
|
||||||
#define TK_SERVER_VERSION 232
|
#define TK_ISFILLED 232
|
||||||
#define TK_SERVER_STATUS 233
|
#define TK_CAST 233
|
||||||
#define TK_CURRENT_USER 234
|
#define TK_NOW 234
|
||||||
#define TK_CASE 235
|
#define TK_TODAY 235
|
||||||
#define TK_WHEN 236
|
#define TK_TIMEZONE 236
|
||||||
#define TK_THEN 237
|
#define TK_CLIENT_VERSION 237
|
||||||
#define TK_ELSE 238
|
#define TK_SERVER_VERSION 238
|
||||||
#define TK_BETWEEN 239
|
#define TK_SERVER_STATUS 239
|
||||||
#define TK_IS 240
|
#define TK_CURRENT_USER 240
|
||||||
#define TK_NK_LT 241
|
#define TK_CASE 241
|
||||||
#define TK_NK_GT 242
|
#define TK_WHEN 242
|
||||||
#define TK_NK_LE 243
|
#define TK_THEN 243
|
||||||
#define TK_NK_GE 244
|
#define TK_ELSE 244
|
||||||
#define TK_NK_NE 245
|
#define TK_BETWEEN 245
|
||||||
#define TK_MATCH 246
|
#define TK_IS 246
|
||||||
#define TK_NMATCH 247
|
#define TK_NK_LT 247
|
||||||
#define TK_CONTAINS 248
|
#define TK_NK_GT 248
|
||||||
#define TK_IN 249
|
#define TK_NK_LE 249
|
||||||
#define TK_JOIN 250
|
#define TK_NK_GE 250
|
||||||
#define TK_INNER 251
|
#define TK_NK_NE 251
|
||||||
#define TK_SELECT 252
|
#define TK_MATCH 252
|
||||||
#define TK_DISTINCT 253
|
#define TK_NMATCH 253
|
||||||
#define TK_WHERE 254
|
#define TK_CONTAINS 254
|
||||||
#define TK_PARTITION 255
|
#define TK_IN 255
|
||||||
#define TK_BY 256
|
#define TK_JOIN 256
|
||||||
#define TK_SESSION 257
|
#define TK_INNER 257
|
||||||
#define TK_STATE_WINDOW 258
|
#define TK_SELECT 258
|
||||||
#define TK_EVENT_WINDOW 259
|
#define TK_DISTINCT 259
|
||||||
#define TK_SLIDING 260
|
#define TK_WHERE 260
|
||||||
#define TK_FILL 261
|
#define TK_PARTITION 261
|
||||||
#define TK_VALUE 262
|
#define TK_BY 262
|
||||||
#define TK_VALUE_F 263
|
#define TK_SESSION 263
|
||||||
#define TK_NONE 264
|
#define TK_STATE_WINDOW 264
|
||||||
#define TK_PREV 265
|
#define TK_EVENT_WINDOW 265
|
||||||
#define TK_NULL_F 266
|
#define TK_SLIDING 266
|
||||||
#define TK_LINEAR 267
|
#define TK_FILL 267
|
||||||
#define TK_NEXT 268
|
#define TK_VALUE 268
|
||||||
#define TK_HAVING 269
|
#define TK_VALUE_F 269
|
||||||
#define TK_RANGE 270
|
#define TK_NONE 270
|
||||||
#define TK_EVERY 271
|
#define TK_PREV 271
|
||||||
#define TK_ORDER 272
|
#define TK_NULL_F 272
|
||||||
#define TK_SLIMIT 273
|
#define TK_LINEAR 273
|
||||||
#define TK_SOFFSET 274
|
#define TK_NEXT 274
|
||||||
#define TK_LIMIT 275
|
#define TK_HAVING 275
|
||||||
#define TK_OFFSET 276
|
#define TK_RANGE 276
|
||||||
#define TK_ASC 277
|
#define TK_EVERY 277
|
||||||
#define TK_NULLS 278
|
#define TK_ORDER 278
|
||||||
#define TK_ABORT 279
|
#define TK_SLIMIT 279
|
||||||
#define TK_AFTER 280
|
#define TK_SOFFSET 280
|
||||||
#define TK_ATTACH 281
|
#define TK_LIMIT 281
|
||||||
#define TK_BEFORE 282
|
#define TK_OFFSET 282
|
||||||
#define TK_BEGIN 283
|
#define TK_ASC 283
|
||||||
#define TK_BITAND 284
|
#define TK_NULLS 284
|
||||||
#define TK_BITNOT 285
|
#define TK_ABORT 285
|
||||||
#define TK_BITOR 286
|
#define TK_AFTER 286
|
||||||
#define TK_BLOCKS 287
|
#define TK_ATTACH 287
|
||||||
#define TK_CHANGE 288
|
#define TK_BEFORE 288
|
||||||
#define TK_COMMA 289
|
#define TK_BEGIN 289
|
||||||
#define TK_CONCAT 290
|
#define TK_BITAND 290
|
||||||
#define TK_CONFLICT 291
|
#define TK_BITNOT 291
|
||||||
#define TK_COPY 292
|
#define TK_BITOR 292
|
||||||
#define TK_DEFERRED 293
|
#define TK_BLOCKS 293
|
||||||
#define TK_DELIMITERS 294
|
#define TK_CHANGE 294
|
||||||
#define TK_DETACH 295
|
#define TK_COMMA 295
|
||||||
#define TK_DIVIDE 296
|
#define TK_CONCAT 296
|
||||||
#define TK_DOT 297
|
#define TK_CONFLICT 297
|
||||||
#define TK_EACH 298
|
#define TK_COPY 298
|
||||||
#define TK_FAIL 299
|
#define TK_DEFERRED 299
|
||||||
#define TK_FILE 300
|
#define TK_DELIMITERS 300
|
||||||
#define TK_FOR 301
|
#define TK_DETACH 301
|
||||||
#define TK_GLOB 302
|
#define TK_DIVIDE 302
|
||||||
#define TK_ID 303
|
#define TK_DOT 303
|
||||||
#define TK_IMMEDIATE 304
|
#define TK_EACH 304
|
||||||
#define TK_IMPORT 305
|
#define TK_FAIL 305
|
||||||
#define TK_INITIALLY 306
|
#define TK_FILE 306
|
||||||
#define TK_INSTEAD 307
|
#define TK_FOR 307
|
||||||
#define TK_ISNULL 308
|
#define TK_GLOB 308
|
||||||
#define TK_KEY 309
|
#define TK_ID 309
|
||||||
#define TK_MODULES 310
|
#define TK_IMMEDIATE 310
|
||||||
#define TK_NK_BITNOT 311
|
#define TK_IMPORT 311
|
||||||
#define TK_NK_SEMI 312
|
#define TK_INITIALLY 312
|
||||||
#define TK_NOTNULL 313
|
#define TK_INSTEAD 313
|
||||||
#define TK_OF 314
|
#define TK_ISNULL 314
|
||||||
#define TK_PLUS 315
|
#define TK_KEY 315
|
||||||
#define TK_PRIVILEGE 316
|
#define TK_MODULES 316
|
||||||
#define TK_RAISE 317
|
#define TK_NK_BITNOT 317
|
||||||
#define TK_RESTRICT 318
|
#define TK_NK_SEMI 318
|
||||||
#define TK_ROW 319
|
#define TK_NOTNULL 319
|
||||||
#define TK_SEMI 320
|
#define TK_OF 320
|
||||||
#define TK_STAR 321
|
#define TK_PLUS 321
|
||||||
#define TK_STATEMENT 322
|
#define TK_PRIVILEGE 322
|
||||||
#define TK_STRICT 323
|
#define TK_RAISE 323
|
||||||
#define TK_STRING 324
|
#define TK_RESTRICT 324
|
||||||
#define TK_TIMES 325
|
#define TK_ROW 325
|
||||||
#define TK_VALUES 326
|
#define TK_SEMI 326
|
||||||
#define TK_VARIABLE 327
|
#define TK_STAR 327
|
||||||
#define TK_VIEW 328
|
#define TK_STATEMENT 328
|
||||||
#define TK_WAL 329
|
#define TK_STRICT 329
|
||||||
|
#define TK_STRING 330
|
||||||
|
#define TK_TIMES 331
|
||||||
|
#define TK_VALUES 332
|
||||||
|
#define TK_VARIABLE 333
|
||||||
|
#define TK_VIEW 334
|
||||||
|
#define TK_WAL 335
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define TK_NK_SPACE 600
|
#define TK_NK_SPACE 600
|
||||||
#define TK_NK_COMMENT 601
|
#define TK_NK_COMMENT 601
|
||||||
|
|
|
@ -82,6 +82,7 @@ typedef struct SCatalogReq {
|
||||||
SArray* pUser; // element is SUserAuthInfo
|
SArray* pUser; // element is SUserAuthInfo
|
||||||
SArray* pTableIndex; // element is SNAME
|
SArray* pTableIndex; // element is SNAME
|
||||||
SArray* pTableCfg; // element is SNAME
|
SArray* pTableCfg; // element is SNAME
|
||||||
|
SArray* pTableTag; // element is SNAME
|
||||||
bool qNodeRequired; // valid qnode
|
bool qNodeRequired; // valid qnode
|
||||||
bool dNodeRequired; // valid dnode
|
bool dNodeRequired; // valid dnode
|
||||||
bool svrVerRequired;
|
bool svrVerRequired;
|
||||||
|
@ -105,6 +106,7 @@ typedef struct SMetaData {
|
||||||
SArray* pUser; // pRes = SUserAuthRes*
|
SArray* pUser; // pRes = SUserAuthRes*
|
||||||
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
|
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
|
||||||
SArray* pTableCfg; // pRes = STableCfg*
|
SArray* pTableCfg; // pRes = STableCfg*
|
||||||
|
SArray* pTableTag; // pRes = SArray<STagVal>*
|
||||||
SArray* pDnodeList; // pRes = SArray<SEpSet>*
|
SArray* pDnodeList; // pRes = SArray<SEpSet>*
|
||||||
SMetaRes* pSvrVer; // pRes = char*
|
SMetaRes* pSvrVer; // pRes = char*
|
||||||
} SMetaData;
|
} SMetaData;
|
||||||
|
@ -122,18 +124,19 @@ typedef struct SSTableVersion {
|
||||||
char stbName[TSDB_TABLE_NAME_LEN];
|
char stbName[TSDB_TABLE_NAME_LEN];
|
||||||
uint64_t dbId;
|
uint64_t dbId;
|
||||||
uint64_t suid;
|
uint64_t suid;
|
||||||
int16_t sversion;
|
int32_t sversion;
|
||||||
int16_t tversion;
|
int32_t tversion;
|
||||||
int32_t smaVer;
|
int32_t smaVer;
|
||||||
} SSTableVersion;
|
} SSTableVersion;
|
||||||
|
|
||||||
typedef struct SDbVgVersion {
|
typedef struct SDbCacheInfo {
|
||||||
char dbFName[TSDB_DB_FNAME_LEN];
|
char dbFName[TSDB_DB_FNAME_LEN];
|
||||||
int64_t dbId;
|
int64_t dbId;
|
||||||
int32_t vgVersion;
|
int32_t vgVersion;
|
||||||
|
int32_t cfgVersion;
|
||||||
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
||||||
int64_t stateTs;
|
int64_t stateTs;
|
||||||
} SDbVgVersion;
|
} SDbCacheInfo;
|
||||||
|
|
||||||
typedef struct STbSVersion {
|
typedef struct STbSVersion {
|
||||||
char* tbFName;
|
char* tbFName;
|
||||||
|
@ -146,7 +149,6 @@ typedef struct SUserAuthVersion {
|
||||||
int32_t version;
|
int32_t version;
|
||||||
} SUserAuthVersion;
|
} SUserAuthVersion;
|
||||||
|
|
||||||
typedef SDbCfgRsp SDbCfgInfo;
|
|
||||||
typedef SUserIndexRsp SIndexInfo;
|
typedef SUserIndexRsp SIndexInfo;
|
||||||
|
|
||||||
typedef void (*catalogCallback)(SMetaData* pResult, void* param, int32_t code);
|
typedef void (*catalogCallback)(SMetaData* pResult, void* param, int32_t code);
|
||||||
|
@ -178,6 +180,8 @@ int32_t catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
|
||||||
|
|
||||||
int32_t catalogUpdateDBVgInfo(SCatalog* pCatalog, const char* dbName, uint64_t dbId, SDBVgInfo* dbInfo);
|
int32_t catalogUpdateDBVgInfo(SCatalog* pCatalog, const char* dbName, uint64_t dbId, SDBVgInfo* dbInfo);
|
||||||
|
|
||||||
|
int32_t catalogUpdateDbCfg(SCatalog* pCtg, const char* dbFName, uint64_t dbId, SDbCfgInfo* cfgInfo);
|
||||||
|
|
||||||
int32_t catalogRemoveDB(SCatalog* pCatalog, const char* dbName, uint64_t dbId);
|
int32_t catalogRemoveDB(SCatalog* pCatalog, const char* dbName, uint64_t dbId);
|
||||||
|
|
||||||
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName);
|
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName);
|
||||||
|
@ -210,7 +214,7 @@ int32_t catalogGetSTableMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const
|
||||||
|
|
||||||
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
|
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
|
||||||
|
|
||||||
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
|
int32_t catalogAsyncUpdateTableMeta(SCatalog* pCtg, STableMetaRsp* pMsg);
|
||||||
|
|
||||||
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
|
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
|
||||||
|
|
||||||
|
@ -302,7 +306,7 @@ int32_t catalogGetDnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray*
|
||||||
|
|
||||||
int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableVersion** stables, uint32_t* num);
|
int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableVersion** stables, uint32_t* num);
|
||||||
|
|
||||||
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbVgVersion** dbs, uint32_t* num);
|
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbCacheInfo** dbs, uint32_t* num);
|
||||||
|
|
||||||
int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion** users, uint32_t* num);
|
int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion** users, uint32_t* num);
|
||||||
|
|
||||||
|
@ -312,6 +316,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
|
||||||
|
|
||||||
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
|
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
|
||||||
|
|
||||||
|
int32_t catalogGetTableTag(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
|
||||||
|
|
||||||
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg);
|
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg);
|
||||||
|
|
||||||
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp* pRsp);
|
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp* pRsp);
|
||||||
|
|
|
@ -192,8 +192,6 @@ SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
|
||||||
|
|
||||||
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
|
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
|
||||||
|
|
||||||
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
|
|
||||||
|
|
||||||
void qStreamSetOpen(qTaskInfo_t tinfo);
|
void qStreamSetOpen(qTaskInfo_t tinfo);
|
||||||
|
|
||||||
void qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
|
void qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
|
||||||
|
@ -208,8 +206,6 @@ void* qExtractReaderFromStreamScanner(void* scanner);
|
||||||
|
|
||||||
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
||||||
|
|
||||||
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
|
|
||||||
|
|
||||||
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
|
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
|
||||||
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
|
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
|
||||||
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
|
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue