Merge remote-tracking branch 'origin/3.0' into feat/TD-22746
This commit is contained in:
commit
ee2521a6ab
|
@ -131,3 +131,4 @@ tools/BUGS
|
||||||
tools/taos-tools
|
tools/taos-tools
|
||||||
tools/taosws-rs
|
tools/taosws-rs
|
||||||
tags
|
tags
|
||||||
|
.clangd
|
||||||
|
|
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
||||||
### Ubuntu 18.04 及以上版本 & Debian:
|
### Ubuntu 18.04 及以上版本 & Debian:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 为 taos-tools 安装编译需要的软件
|
#### 为 taos-tools 安装编译需要的软件
|
||||||
|
@ -352,4 +352,4 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
|
||||||
|
|
||||||
# 加入技术交流群
|
# 加入技术交流群
|
||||||
|
|
||||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
|
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine1",加小 T 为好友,即可入群。
|
||||||
|
|
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
||||||
### Ubuntu 18.04 and above or Debian
|
### Ubuntu 18.04 and above or Debian
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install build dependencies for taosTools
|
#### Install build dependencies for taosTools
|
||||||
|
|
|
@ -117,17 +117,11 @@ ELSE ()
|
||||||
|
|
||||||
IF (${BUILD_SANITIZER})
|
IF (${BUILD_SANITIZER})
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||||
ELSE ()
|
ELSE ()
|
||||||
MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN})
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
IF (${TD_DARWIN})
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
|
|
||||||
ELSE ()
|
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
|
||||||
ENDIF ()
|
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
# disable all assert
|
# disable all assert
|
||||||
|
|
|
@ -162,6 +162,14 @@ ELSE ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF(APPLE)
|
||||||
|
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
|
||||||
|
set(CMAKE_HAVE_THREADS_LIBRARY 1)
|
||||||
|
set(CMAKE_USE_WIN32_THREADS_INIT 0)
|
||||||
|
set(CMAKE_USE_PTHREADS 1)
|
||||||
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||||
|
|
||||||
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
||||||
|
|
|
@ -8,4 +8,4 @@ ExternalProject_Add(rocksdb
|
||||||
BUILD_COMMAND ""
|
BUILD_COMMAND ""
|
||||||
INSTALL_COMMAND ""
|
INSTALL_COMMAND ""
|
||||||
TEST_COMMAND ""
|
TEST_COMMAND ""
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG ae8d51c
|
GIT_TAG 565ca21
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG ffc2e6f
|
GIT_TAG 4378702
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -223,31 +223,53 @@ endif(${BUILD_WITH_LEVELDB})
|
||||||
# rocksdb
|
# rocksdb
|
||||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
if(${TD_LINUX})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||||
|
endif(${TD_LINUX})
|
||||||
|
|
||||||
|
if(${TD_DARWIN})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
if (${TD_WINDOWS})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
|
if(${TD_DARWIN})
|
||||||
|
option(HAVE_THREAD_LOCAL "" OFF)
|
||||||
|
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||||
|
option(WITH_PERF_CONTEXT "" OFF)
|
||||||
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
if(${TD_WINDOWS})
|
||||||
|
option(WITH_JNI "" ON)
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
if(${TD_WINDOWS})
|
||||||
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
|
option(WITH_FALLOCATE "" OFF)
|
||||||
|
option(WITH_JEMALLOC "" OFF)
|
||||||
|
option(WITH_GFLAGS "" OFF)
|
||||||
|
option(PORTABLE "" ON)
|
||||||
|
option(WITH_LIBURING "" OFF)
|
||||||
|
option(FAIL_ON_WARNINGS OFF)
|
||||||
|
|
||||||
option(WITH_TESTS "" OFF)
|
option(WITH_TESTS "" OFF)
|
||||||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||||
option(WITH_TOOLS "" OFF)
|
option(WITH_TOOLS "" OFF)
|
||||||
option(WITH_LIBURING "" OFF)
|
option(WITH_LIBURING "" OFF)
|
||||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
|
||||||
option(WITH_PERF_CONTEXT "" OFF)
|
|
||||||
option(FAIL_ON_WARNINGS "" OFF)
|
|
||||||
#option(WITH_JEMALLOC "" ON)
|
|
||||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||||
IF (${TD_WINDOWS})
|
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
|
||||||
endif(${TD_WINDOWS})
|
|
||||||
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
rocksdb
|
rocksdb
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||||
)
|
)
|
||||||
IF (${TD_DARWIN})
|
|
||||||
target_compile_options(
|
|
||||||
rocksdb
|
|
||||||
PRIVATE -Wno-unused-private-field
|
|
||||||
)
|
|
||||||
endif(${TD_DARWIN})
|
|
||||||
endif(${BUILD_WITH_ROCKSDB})
|
endif(${BUILD_WITH_ROCKSDB})
|
||||||
|
|
||||||
# lucene
|
# lucene
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <bits/stdint-uintn.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
@ -9,40 +10,307 @@
|
||||||
const char DBPath[] = "rocksdb_c_simple_example";
|
const char DBPath[] = "rocksdb_c_simple_example";
|
||||||
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
|
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
|
||||||
|
|
||||||
int main(int argc, char const *argv[]) {
|
static const int32_t endian_test_var = 1;
|
||||||
rocksdb_t * db;
|
#define IS_LITTLE_ENDIAN() (*(uint8_t *)(&endian_test_var) != 0)
|
||||||
rocksdb_backup_engine_t *be;
|
#define TD_RT_ENDIAN() (IS_LITTLE_ENDIAN() ? TD_LITTLE_ENDIAN : TD_BIG_ENDIAN)
|
||||||
rocksdb_options_t * options = rocksdb_options_create();
|
|
||||||
rocksdb_options_set_create_if_missing(options, 1);
|
|
||||||
|
|
||||||
// open DB
|
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
|
||||||
char *err = NULL;
|
static void *taosDecodeFixedU64(const void *buf, uint64_t *value) {
|
||||||
db = rocksdb_open(options, DBPath, &err);
|
if (IS_LITTLE_ENDIAN()) {
|
||||||
|
memcpy(value, buf, sizeof(*value));
|
||||||
|
} else {
|
||||||
|
((uint8_t *)value)[7] = ((uint8_t *)buf)[0];
|
||||||
|
((uint8_t *)value)[6] = ((uint8_t *)buf)[1];
|
||||||
|
((uint8_t *)value)[5] = ((uint8_t *)buf)[2];
|
||||||
|
((uint8_t *)value)[4] = ((uint8_t *)buf)[3];
|
||||||
|
((uint8_t *)value)[3] = ((uint8_t *)buf)[4];
|
||||||
|
((uint8_t *)value)[2] = ((uint8_t *)buf)[5];
|
||||||
|
((uint8_t *)value)[1] = ((uint8_t *)buf)[6];
|
||||||
|
((uint8_t *)value)[0] = ((uint8_t *)buf)[7];
|
||||||
|
}
|
||||||
|
|
||||||
// Write
|
return POINTER_SHIFT(buf, sizeof(*value));
|
||||||
rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
|
}
|
||||||
rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
|
|
||||||
|
|
||||||
// Read
|
// ---- Fixed U64
|
||||||
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
static int32_t taosEncodeFixedU64(void **buf, uint64_t value) {
|
||||||
//rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
if (buf != NULL) {
|
||||||
char buf[256] = {0};
|
if (IS_LITTLE_ENDIAN()) {
|
||||||
size_t vallen = 0;
|
memcpy(*buf, &value, sizeof(value));
|
||||||
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
} else {
|
||||||
snprintf(buf, vallen+5, "val:%s", val);
|
((uint8_t *)(*buf))[0] = value & 0xff;
|
||||||
printf("%ld %ld %s\n", strlen(val), vallen, buf);
|
((uint8_t *)(*buf))[1] = (value >> 8) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[2] = (value >> 16) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[3] = (value >> 24) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[4] = (value >> 32) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[5] = (value >> 40) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[6] = (value >> 48) & 0xff;
|
||||||
|
((uint8_t *)(*buf))[7] = (value >> 56) & 0xff;
|
||||||
|
}
|
||||||
|
|
||||||
// Update
|
*buf = POINTER_SHIFT(*buf, sizeof(value));
|
||||||
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
|
}
|
||||||
|
|
||||||
// Delete
|
return (int32_t)sizeof(value);
|
||||||
rocksdb_delete(db, writeoptions, "key", 3, &err);
|
}
|
||||||
|
|
||||||
// Read again
|
typedef struct KV {
|
||||||
val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
uint64_t k1;
|
||||||
printf("val:%s\n", val);
|
uint64_t k2;
|
||||||
|
} KV;
|
||||||
|
|
||||||
rocksdb_close(db);
|
int kvSerial(KV *kv, char *buf) {
|
||||||
|
int len = 0;
|
||||||
|
len += taosEncodeFixedU64((void **)&buf, kv->k1);
|
||||||
|
len += taosEncodeFixedU64((void **)&buf, kv->k2);
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
const char *kvDBName(void *name) { return "kvDBname"; }
|
||||||
|
int kvDBComp(void *state, const char *aBuf, size_t aLen, const char *bBuf, size_t bLen) {
|
||||||
|
KV w1, w2;
|
||||||
|
|
||||||
|
memset(&w1, 0, sizeof(w1));
|
||||||
|
memset(&w2, 0, sizeof(w2));
|
||||||
|
|
||||||
|
char *p1 = (char *)aBuf;
|
||||||
|
char *p2 = (char *)bBuf;
|
||||||
|
// p1 += 1;
|
||||||
|
// p2 += 1;
|
||||||
|
|
||||||
|
p1 = taosDecodeFixedU64(p1, &w1.k1);
|
||||||
|
p2 = taosDecodeFixedU64(p2, &w2.k1);
|
||||||
|
|
||||||
|
p1 = taosDecodeFixedU64(p1, &w1.k2);
|
||||||
|
p2 = taosDecodeFixedU64(p2, &w2.k2);
|
||||||
|
|
||||||
|
if (w1.k1 < w2.k1) {
|
||||||
|
return -1;
|
||||||
|
} else if (w1.k1 > w2.k1) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (w1.k2 < w2.k2) {
|
||||||
|
return -1;
|
||||||
|
} else if (w1.k2 > w2.k2) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int kvDeserial(KV *kv, char *buf) {
|
||||||
|
char *p1 = (char *)buf;
|
||||||
|
// p1 += 1;
|
||||||
|
p1 = taosDecodeFixedU64(p1, &kv->k1);
|
||||||
|
p1 = taosDecodeFixedU64(p1, &kv->k2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char const *argv[]) {
|
||||||
|
rocksdb_t *db;
|
||||||
|
rocksdb_backup_engine_t *be;
|
||||||
|
|
||||||
|
char *err = NULL;
|
||||||
|
const char *path = "/tmp/db";
|
||||||
|
|
||||||
|
rocksdb_options_t *opt = rocksdb_options_create();
|
||||||
|
rocksdb_options_set_create_if_missing(opt, 1);
|
||||||
|
rocksdb_options_set_create_missing_column_families(opt, 1);
|
||||||
|
|
||||||
|
// Read
|
||||||
|
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||||
|
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||||
|
int len = 1;
|
||||||
|
char buf[256] = {0};
|
||||||
|
size_t vallen = 0;
|
||||||
|
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||||
|
snprintf(buf, vallen + 5, "val:%s", val);
|
||||||
|
printf("%ld %ld %s\n", strlen(val), vallen, buf);
|
||||||
|
|
||||||
|
char **cfName = calloc(len, sizeof(char *));
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
cfName[i] = "test";
|
||||||
|
}
|
||||||
|
const rocksdb_options_t **cfOpt = malloc(len * sizeof(rocksdb_options_t *));
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
cfOpt[i] = rocksdb_options_create_copy(opt);
|
||||||
|
if (i != 0) {
|
||||||
|
rocksdb_comparator_t *comp = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||||
|
rocksdb_options_set_comparator((rocksdb_options_t *)cfOpt[i], comp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rocksdb_column_family_handle_t **cfHandle = malloc(len * sizeof(rocksdb_column_family_handle_t *));
|
||||||
|
db = rocksdb_open_column_families(opt, path, len, (const char *const *)cfName, cfOpt, cfHandle, &err);
|
||||||
|
|
||||||
|
{
|
||||||
|
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||||
|
size_t vlen = 0;
|
||||||
|
|
||||||
|
char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
|
||||||
|
printf("Get value %s, and len = %d\n", v, (int)vlen);
|
||||||
|
}
|
||||||
|
|
||||||
|
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
|
||||||
|
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
|
||||||
|
rocksdb_writebatch_put_cf(wBatch, cfHandle[0], "key", strlen("key"), "value", strlen("value"));
|
||||||
|
rocksdb_write(db, wOpt, wBatch, &err);
|
||||||
|
|
||||||
|
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||||
|
size_t vlen = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
|
||||||
|
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = (100 - i) % 26, .k2 = i % 26};
|
||||||
|
kvSerial(&kv, buf);
|
||||||
|
rocksdb_writebatch_put_cf(wBatch, cfHandle[1], buf, sizeof(kv), "value", strlen("value"));
|
||||||
|
}
|
||||||
|
rocksdb_write(db, wOpt, wBatch, &err);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
{
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = 0, .k2 = 0};
|
||||||
|
kvSerial(&kv, buf);
|
||||||
|
char *v = rocksdb_get_cf(db, rOpt, cfHandle[1], buf, sizeof(kv), &vlen, &err);
|
||||||
|
printf("Get value %s, and len = %d, xxxx\n", v, (int)vlen);
|
||||||
|
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||||
|
rocksdb_iter_seek_to_first(iter);
|
||||||
|
int i = 0;
|
||||||
|
while (rocksdb_iter_valid(iter)) {
|
||||||
|
size_t klen, vlen;
|
||||||
|
const char *key = rocksdb_iter_key(iter, &klen);
|
||||||
|
const char *value = rocksdb_iter_value(iter, &vlen);
|
||||||
|
KV kv;
|
||||||
|
kvDeserial(&kv, (char *)key);
|
||||||
|
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
|
||||||
|
i++;
|
||||||
|
rocksdb_iter_next(iter);
|
||||||
|
}
|
||||||
|
rocksdb_iter_destroy(iter);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = 0, .k2 = 0};
|
||||||
|
int len = kvSerial(&kv, buf);
|
||||||
|
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||||
|
rocksdb_iter_seek(iter, buf, len);
|
||||||
|
if (!rocksdb_iter_valid(iter)) {
|
||||||
|
printf("invalid iter");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
char buf[128] = {0};
|
||||||
|
KV kv = {.k1 = 100, .k2 = 0};
|
||||||
|
int len = kvSerial(&kv, buf);
|
||||||
|
|
||||||
|
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||||
|
rocksdb_iter_seek(iter, buf, len);
|
||||||
|
if (!rocksdb_iter_valid(iter)) {
|
||||||
|
printf("invalid iter\n");
|
||||||
|
rocksdb_iter_seek_for_prev(iter, buf, len);
|
||||||
|
if (!rocksdb_iter_valid(iter)) {
|
||||||
|
printf("stay invalid iter\n");
|
||||||
|
} else {
|
||||||
|
size_t klen = 0, vlen = 0;
|
||||||
|
const char *key = rocksdb_iter_key(iter, &klen);
|
||||||
|
const char *value = rocksdb_iter_value(iter, &vlen);
|
||||||
|
KV kv;
|
||||||
|
kvDeserial(&kv, (char *)key);
|
||||||
|
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
|
||||||
|
// printf("Get value %s, and len = %d\n", v, (int)vlen);
|
||||||
|
|
||||||
|
rocksdb_column_family_handle_destroy(cfHandle[0]);
|
||||||
|
rocksdb_column_family_handle_destroy(cfHandle[1]);
|
||||||
|
rocksdb_close(db);
|
||||||
|
|
||||||
|
// {
|
||||||
|
// // rocksdb_options_t *Options = rocksdb_options_create();
|
||||||
|
// db = rocksdb_open(comm, path, &err);
|
||||||
|
// if (db != NULL) {
|
||||||
|
// rocksdb_options_t *cfo = rocksdb_options_create_copy(comm);
|
||||||
|
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||||
|
// rocksdb_options_set_comparator(cfo, cmp1);
|
||||||
|
|
||||||
|
// rocksdb_column_family_handle_t *handle = rocksdb_create_column_family(db, cfo, "cf1", &err);
|
||||||
|
|
||||||
|
// rocksdb_column_family_handle_destroy(handle);
|
||||||
|
// rocksdb_close(db);
|
||||||
|
// db = NULL;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// int ncf = 2;
|
||||||
|
|
||||||
|
// rocksdb_column_family_handle_t **pHandle = malloc(ncf * sizeof(rocksdb_column_family_handle_t *));
|
||||||
|
|
||||||
|
// {
|
||||||
|
// rocksdb_options_t *options = rocksdb_options_create_copy(comm);
|
||||||
|
|
||||||
|
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||||
|
// rocksdb_options_t *dbOpts1 = rocksdb_options_create_copy(comm);
|
||||||
|
// rocksdb_options_t *dbOpts2 = rocksdb_options_create_copy(comm);
|
||||||
|
// rocksdb_options_set_comparator(dbOpts2, cmp1);
|
||||||
|
// // rocksdb_column_family_handle_t *cf = rocksdb_create_column_family(db, dbOpts1, "cmp1", &err);
|
||||||
|
|
||||||
|
// const char *pName[] = {"default", "cf1"};
|
||||||
|
|
||||||
|
// const rocksdb_options_t **pOpts = malloc(ncf * sizeof(rocksdb_options_t *));
|
||||||
|
// pOpts[0] = dbOpts1;
|
||||||
|
// pOpts[1] = dbOpts2;
|
||||||
|
|
||||||
|
// rocksdb_options_t *allOptions = rocksdb_options_create_copy(comm);
|
||||||
|
// db = rocksdb_open_column_families(allOptions, "test", ncf, pName, pOpts, pHandle, &err);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// // rocksdb_options_t *options = rocksdb_options_create();
|
||||||
|
// // rocksdb_options_set_create_if_missing(options, 1);
|
||||||
|
|
||||||
|
// // //rocksdb_open_column_families(const rocksdb_options_t *options, const char *name, int num_column_families,
|
||||||
|
// // const char *const *column_family_names,
|
||||||
|
// // const rocksdb_options_t *const *column_family_options,
|
||||||
|
// // rocksdb_column_family_handle_t **column_family_handles, char **errptr);
|
||||||
|
|
||||||
|
// for (int i = 0; i < 100; i++) {
|
||||||
|
// char buf[128] = {0};
|
||||||
|
|
||||||
|
// rocksdb_writeoptions_t *wopt = rocksdb_writeoptions_create();
|
||||||
|
// KV kv = {.k1 = i, .k2 = i};
|
||||||
|
// kvSerial(&kv, buf);
|
||||||
|
// rocksdb_put_cf(db, wopt, pHandle[0], buf, strlen(buf), (const char *)&i, sizeof(i), &err);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// rocksdb_close(db);
|
||||||
|
// Write
|
||||||
|
// rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
|
||||||
|
// rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
|
||||||
|
|
||||||
|
//// Read
|
||||||
|
// rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||||
|
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||||
|
// size_t vallen = 0;
|
||||||
|
// char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||||
|
// printf("val:%s\n", val);
|
||||||
|
|
||||||
|
//// Update
|
||||||
|
//// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
|
||||||
|
|
||||||
|
//// Delete
|
||||||
|
// rocksdb_delete(db, writeoptions, "key", 3, &err);
|
||||||
|
|
||||||
|
//// Read again
|
||||||
|
// val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||||
|
// printf("val:%s\n", val);
|
||||||
|
|
||||||
|
// rocksdb_close(db);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
|
||||||
slug: /
|
slug: /
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
|
||||||
|
|
||||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
|
||||||
|
|
||||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||||
|
|
||||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||||
|
|
||||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||||
|
|
||||||
|
@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
||||||
|
|
||||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||||
|
|
||||||
### System Maintenance Requirements
|
### System Maintenance Requirements
|
||||||
|
|
|
@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
|
||||||
|
|
||||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||||
|
|
||||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
|
||||||
|
|
||||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ sudo apt-get install tdengine
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
This installation method is supported only for Debian and Ubuntu.
|
This installation method is supported only for Debian and Ubuntu.
|
||||||
::::
|
:::
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="Windows" value="windows">
|
<TabItem label="Windows" value="windows">
|
||||||
|
|
||||||
|
|
|
@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
||||||
```
|
```
|
||||||
|
|
||||||
More configuration about connection,please refer to [Java Connector](/reference/connector/java)
|
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```php title="原生连接"
|
```php title=""native"
|
||||||
{{#include docs/examples/php/connect.php}}
|
{{#include docs/examples/php/connect.php}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
|
||||||
|
|
||||||
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
|
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
|
||||||
|
|
||||||
Key differences:
|
Key differences:
|
||||||
|
|
||||||
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
||||||
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
||||||
|
@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
|
||||||
<TabItem label="R" value="r">
|
<TabItem label="R" value="r">
|
||||||
|
|
||||||
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
||||||
2. Install the dependency package `RJDBC`:
|
2. Install the dependency package `RJDBC`:
|
||||||
|
|
||||||
```R
|
```R
|
||||||
install.packages("RJDBC")
|
install.packages("RJDBC")
|
||||||
|
@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="PHP" value="php">
|
<TabItem label="PHP" value="php">
|
||||||
|
|
||||||
**Download Source Code Package and Unzip:**
|
**Download Source Code Package and Unzip: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||||
|
@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
||||||
|
|
||||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||||
|
|
||||||
**Non-Swoole Environment:**
|
**Non-Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure && make -j && make install
|
phpize && ./configure && make -j && make install
|
||||||
```
|
```
|
||||||
|
|
||||||
**Specify TDengine Location:**
|
**Specify TDengine Location: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||||
|
@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
||||||
> `--with-tdengine-dir=` is followed by the TDengine installation location.
|
> `--with-tdengine-dir=` is followed by the TDengine installation location.
|
||||||
> This way is useful in case TDengine location can't be found automatically or macOS.
|
> This way is useful in case TDengine location can't be found automatically or macOS.
|
||||||
|
|
||||||
**Swoole Environment:**
|
**Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --enable-swoole && make -j && make install
|
phpize && ./configure --enable-swoole && make -j && make install
|
||||||
|
|
|
@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
||||||
|
|
||||||
## Query Examples
|
## Query Examples
|
||||||
|
|
||||||
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query SQL:
|
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
||||||
|
|
|
@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
|
||||||
|
|
||||||
## Query Examples
|
## Query Examples
|
||||||
|
|
||||||
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query SQL:
|
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||||
|
|
|
@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
|
||||||
|
|
||||||
## Query Examples
|
## Query Examples
|
||||||
|
|
||||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query SQL:
|
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||||
|
|
|
@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
|
||||||
|
|
||||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||||
|
|
||||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
|
||||||
|
|
||||||
## Sample Programs
|
## Sample Programs
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ The main Program is responsible for:
|
||||||
3. Start reading threads
|
3. Start reading threads
|
||||||
4. Output writing speed every 10 seconds
|
4. Output writing speed every 10 seconds
|
||||||
|
|
||||||
The main program provides 4 parameters for tuning:
|
The main program provides 4 parameters for tuning:
|
||||||
|
|
||||||
1. The number of reading threads, default value is 1
|
1. The number of reading threads, default value is 1
|
||||||
2. The number of writing threads, default value is 2
|
2. The number of writing threads, default value is 2
|
||||||
|
@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
||||||
|
|
||||||
If you want to launch the sample program on a remote server, please follow below steps:
|
If you want to launch the sample program on a remote server, please follow below steps:
|
||||||
|
|
||||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
|
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
|
||||||
```
|
```
|
||||||
mvn package
|
mvn package
|
||||||
```
|
```
|
||||||
|
@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
|
||||||
pip3 install faster-fifo
|
pip3 install faster-fifo
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
|
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
|
||||||
|
|
||||||
4. Execute the program
|
4. Execute the program
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
### python Kafka 客户端
|
### python Kafka client
|
||||||
|
|
||||||
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
|
||||||
<details>
|
<details>
|
||||||
<summary>kafka_example_consumer</summary>
|
<summary>kafka_example_consumer</summary>
|
||||||
|
|
||||||
`kafka_example_consumer` is `consumer`,which is responsible for consuming data from kafka and writing it to TDengine.
|
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
{{#include docs/examples/python/kafka_example_consumer.py}}
|
{{#include docs/examples/python/kafka_example_consumer.py}}
|
||||||
|
|
|
@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
||||||
|
|
||||||
- Query on single column or multiple columns
|
- Query on single column or multiple columns
|
||||||
- Filter on tags or data columns:>, <, =, <\>, like
|
- Filter on tags or data columns: >, <, =, <\>, like
|
||||||
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
|
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
|
||||||
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
|
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
|
||||||
- Arithmetic on columns of numeric types or aggregate results
|
- Arithmetic on columns of numeric types or aggregate results
|
||||||
|
@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
1. With either REST connection or native connection, the above sample code works well.
|
1. With either REST connection or native connection, the above sample code works well.
|
||||||
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
|
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
||||||
|
|
||||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||||
|
|
||||||
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
|
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||||
|
|
||||||
## Data Schema and API
|
## Data Schema and API
|
||||||
|
|
||||||
|
@ -294,7 +294,6 @@ You configure the following parameters when creating a consumer:
|
||||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||||
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
|
|
||||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||||
|
|
||||||
The method of specifying these parameters depends on the language used:
|
The method of specifying these parameters depends on the language used:
|
||||||
|
@ -312,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
||||||
tmq_conf_set(conf, "td.connect.user", "root");
|
tmq_conf_set(conf, "td.connect.user", "root");
|
||||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
|
||||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||||
|
|
||||||
|
@ -368,7 +366,6 @@ conf := &tmq.ConfigMap{
|
||||||
"td.connect.port": "6030",
|
"td.connect.port": "6030",
|
||||||
"client.id": "test_tmq_c",
|
"client.id": "test_tmq_c",
|
||||||
"enable.auto.commit": "false",
|
"enable.auto.commit": "false",
|
||||||
"experimental.snapshot.enable": "true",
|
|
||||||
"msg.with.table.name": "true",
|
"msg.with.table.name": "true",
|
||||||
}
|
}
|
||||||
consumer, err := NewConsumer(conf)
|
consumer, err := NewConsumer(conf)
|
||||||
|
@ -416,7 +413,6 @@ Python programs use the following parameters:
|
||||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
||||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
||||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||||
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
|
||||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
|
@ -252,9 +252,9 @@ create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId
|
||||||
```
|
```
|
||||||
Create the UDF:
|
Create the UDF:
|
||||||
```bash
|
```bash
|
||||||
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
||||||
```
|
```
|
||||||
Use the UDF in the query:
|
Use the UDF in the query:
|
||||||
```bash
|
```bash
|
||||||
select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||||
```
|
```
|
||||||
|
@ -271,9 +271,9 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||||
## Implement a UDF in Python
|
## Implement a UDF in Python
|
||||||
|
|
||||||
Implement the specified interface functions when implementing a UDF in Python.
|
Implement the specified interface functions when implementing a UDF in Python.
|
||||||
- implement `process` function for the scalar UDF。
|
- implement `process` function for the scalar UDF.
|
||||||
- implement `start`, `reduce`, `finish` for the aggregate UDF。
|
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
||||||
- implement `init` for initialization and `destroy` for termination。
|
- implement `init` for initialization and `destroy` for termination.
|
||||||
|
|
||||||
### Implement a Scalar UDF in Python
|
### Implement a Scalar UDF in Python
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
|
||||||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
||||||
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
||||||
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
||||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
- Epoch Time: timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||||
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||||
|
|
||||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||||
|
@ -24,24 +24,24 @@ CREATE DATABASE db_name PRECISION 'ns';
|
||||||
|
|
||||||
In TDengine, the data types below can be used when specifying a column or tag.
|
In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
|
|
||||||
| # | **type** | **Bytes** | **Description** |
|
| # | **type** | **Bytes** | **Description** |
|
||||||
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| --- | :---------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||||
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
| 10 | SMALLINT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
|
|
|
@ -72,8 +72,8 @@ database_option: {
|
||||||
- 0: The database can contain multiple supertables.
|
- 0: The database can contain multiple supertables.
|
||||||
- 1: The database can contain only one supertable.
|
- 1: The database can contain only one supertable.
|
||||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||||
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TABLE_SUFFIX:The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
||||||
SHOW STABLES [LIKE tb_name_wildcard];
|
SHOW STABLES [LIKE tb_name_wildcard];
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
|
The preceding SQL statement shows all supertables in the current TDengine database.
|
||||||
|
|
||||||
### View the CREATE Statement for a Supertable
|
### View the CREATE Statement for a Supertable
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
## Automatically Create Table When Inserting
|
## Automatically Create Table When Inserting
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
|
||||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||||
```
|
```
|
||||||
|
|
||||||
For sub-table and super table:
|
For sub-table and super table:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
|
|
|
@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
Prior to TDengine 3.0.3.0 (excluded),only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
|
||||||
1. The syntax of creating an index
|
1. The syntax of creating an index
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
||||||
|
|
|
@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
|
||||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||||
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
||||||
|
|
||||||
|
@ -626,7 +626,7 @@ algo_type: {
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||||
|
@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
|
||||||
ELAPSED(ts_primary_key [, time_unit])
|
ELAPSED(ts_primary_key [, time_unit])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||||
|
|
||||||
**Return value type**: Double if the input value is not NULL;
|
**Return value type**: Double if the input value is not NULL;
|
||||||
|
|
||||||
|
@ -680,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
||||||
|
|
||||||
**Applicable tables**: table, STable, outer in nested query
|
**Applicable tables**: table, STable, outer in nested query
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||||
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
||||||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||||
|
@ -758,7 +758,7 @@ SUM(expr)
|
||||||
HYPERLOGLOG(expr)
|
HYPERLOGLOG(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:
|
**Description**:
|
||||||
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
||||||
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||||
|
|
||||||
|
@ -772,10 +772,10 @@ HYPERLOGLOG(expr)
|
||||||
### HISTOGRAM
|
### HISTOGRAM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
HISTOGRAM(expr,bin_type, bin_description, normalized)
|
HISTOGRAM(expr, bin_type, bin_description, normalized)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:Returns count of data points in user-specified ranges.
|
**Description**: Returns count of data points in user-specified ranges.
|
||||||
|
|
||||||
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
|
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
|
||||||
|
|
||||||
|
@ -783,18 +783,18 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||||
|
|
||||||
**Applicable table types**: table, STable
|
**Applicable table types**: table, STable
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
|
||||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
|
||||||
- "user_input": "[1, 3, 5, 7]":
|
- "user_input": "[1, 3, 5, 7]":
|
||||||
User specified bin values.
|
User specified bin values.
|
||||||
|
|
||||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
|
||||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
||||||
|
|
||||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
|
||||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
||||||
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
|
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
|
||||||
|
|
||||||
|
@ -886,7 +886,7 @@ INTERP(expr)
|
||||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||||
|
|
||||||
|
@ -1107,7 +1107,7 @@ ignore_negative: {
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
||||||
|
|
||||||
### DIFF
|
### DIFF
|
||||||
|
|
||||||
|
@ -1131,7 +1131,7 @@ ignore_negative: {
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
|
||||||
|
|
||||||
|
|
||||||
### IRATE
|
### IRATE
|
||||||
|
@ -1183,7 +1183,7 @@ STATECOUNT(expr, oper, val)
|
||||||
**Applicable parameter values**:
|
**Applicable parameter values**:
|
||||||
|
|
||||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||||
- val : Numeric types
|
- val: Numeric types
|
||||||
|
|
||||||
**Return value type**: Integer
|
**Return value type**: Integer
|
||||||
|
|
||||||
|
@ -1210,7 +1210,7 @@ STATEDURATION(expr, oper, val, unit)
|
||||||
**Applicable parameter values**:
|
**Applicable parameter values**:
|
||||||
|
|
||||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||||
- val : Numeric types
|
- val: Numeric types
|
||||||
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
||||||
|
|
||||||
**Return value type**: Integer
|
**Return value type**: Integer
|
||||||
|
|
|
@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
|
||||||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||||
|
|
||||||
1. NONE: No fill (the default fill mode)
|
1. NONE: No fill (the default fill mode)
|
||||||
2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
||||||
3. PREV:Fill with the previous non-NULL value, `FILL(PREV)`
|
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||||
4. NULL:Fill with NULL, `FILL(NULL)`
|
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||||
5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)`
|
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||||
6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)`
|
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
|
||||||
|
|
||||||
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
||||||
|
|
||||||
1. NULL_F: Fill `NULL` by force
|
1. NULL_F: Fill `NULL` by force
|
||||||
2. VALUE_F: Fill `VALUE` by force
|
2. VALUE_F: Fill `VALUE` by force
|
||||||
|
|
||||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
|
||||||
|
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||||
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
||||||
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
||||||
|
|
||||||
|
@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
|
||||||
|
|
||||||
There are two kinds of time windows: sliding window and flip time/tumbling window.
|
There are two kinds of time windows: sliding window and flip time/tumbling window.
|
||||||
|
|
||||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
|
||||||
|
|
||||||
### State Window
|
### State Window
|
||||||
|
|
||||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
|
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
|
||||||
|
|
||||||
### Session Window
|
### Session Window
|
||||||
|
|
||||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
A table of intelligent meters can be created by the SQL statement below:
|
A table of intelligent meters can be created by the SQL statement below:
|
||||||
|
|
||||||
```
|
```
|
||||||
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
|
|
|
@ -112,7 +112,7 @@ SHOW STREAMS;
|
||||||
|
|
||||||
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
||||||
|
|
||||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
|
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
|
||||||
|
|
||||||
1. AT_ONCE: triggers on write
|
1. AT_ONCE: triggers on write
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
|
||||||
|
|
||||||
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
|
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
|
||||||
|
|
||||||
- JSON format:
|
- JSON format:
|
||||||
|
|
||||||
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
||||||
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
||||||
|
|
|
@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
|
||||||
|
|
||||||
1. If there are escape characters in identifiers (database name, table name, column name)
|
1. If there are escape characters in identifiers (database name, table name, column name)
|
||||||
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
|
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
|
||||||
- Identifier quoted with ``: Original content is kept, no escaping
|
- Identifier quoted with ``: Original content is kept, no escaping
|
||||||
2. If there are escape characters in values
|
2. If there are escape characters in values
|
||||||
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
|
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
|
||||||
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
|
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
|
||||||
|
|
|
@ -184,7 +184,7 @@ Provides information about standard tables and subtables.
|
||||||
|
|
||||||
## INS_COLUMNS
|
## INS_COLUMNS
|
||||||
|
|
||||||
| # | **列名** | **数据类型** | **说明** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------- | ---------------------- |
|
| --- | :---------: | ------------- | ---------------------- |
|
||||||
| 1 | table_name | BINARY(192) | Table name |
|
| 1 | table_name | BINARY(192) | Table name |
|
||||||
| 2 | db_name | BINARY(64) | Database name |
|
| 2 | db_name | BINARY(64) | Database name |
|
||||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
|
||||||
description: This document describes how to use the SHOW statement in TDengine.
|
description: This document describes how to use the SHOW statement in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||||
|
|
||||||
## SHOW APPS
|
## SHOW APPS
|
||||||
|
|
||||||
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
||||||
|
|
||||||
Shows information about qnodes in the system.
|
Shows information about qnodes in the system.
|
||||||
|
|
||||||
|
## SHOW QUERIES
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW QUERIES;
|
||||||
|
```
|
||||||
|
|
||||||
|
Shows the queries in progress in the system.
|
||||||
|
|
||||||
## SHOW SCORES
|
## SHOW SCORES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -179,45 +187,45 @@ SHOW TABLE DISTRIBUTED table_name;
|
||||||
|
|
||||||
Shows how table data is distributed.
|
Shows how table data is distributed.
|
||||||
|
|
||||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
show table distributed d0\G;
|
show table distributed d0\G;
|
||||||
```
|
```
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary> Show Example </summary>
|
<summary> Show Example </summary>
|
||||||
<pre><code>
|
<pre><code>
|
||||||
*************************** 1.row ***************************
|
*************************** 1.row ***************************
|
||||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||||
|
|
||||||
Total_Blocks : Table `d0` contains total 5 blocks
|
Total_Blocks : Table `d0` contains total 5 blocks
|
||||||
|
|
||||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||||
|
|
||||||
Average_size: The average size of each block is 18.73 KB
|
Average_size: The average size of each block is 18.73 KB
|
||||||
|
|
||||||
Compression_Ratio: The data compression rate is 23.98%
|
Compression_Ratio: The data compression rate is 23.98%
|
||||||
|
|
||||||
*************************** 2.row ***************************
|
*************************** 2.row ***************************
|
||||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||||
|
|
||||||
Total_Rows: Table `d0` contains 20,000 rows
|
Total_Rows: Table `d0` contains 20,000 rows
|
||||||
|
|
||||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||||
|
|
||||||
MinRows: The minimum number of rows in a block is 3,616
|
MinRows: The minimum number of rows in a block is 3,616
|
||||||
|
|
||||||
MaxRows: The maximum number of rows in a block is 4,096B
|
MaxRows: The maximum number of rows in a block is 4,096B
|
||||||
|
|
||||||
Average_Rows: The average number of rows in a block is 4,000
|
Average_Rows: The average number of rows in a block is 4,000
|
||||||
|
|
||||||
*************************** 3.row ***************************
|
*************************** 3.row ***************************
|
||||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||||
|
|
||||||
Total_Tables: The number of child tables, 1 in this example
|
Total_Tables: The number of child tables, 1 in this example
|
||||||
|
|
||||||
Total_Files: The number of files storing the table's data, 2 in this example
|
Total_Files: The number of files storing the table's data, 2 in this example
|
||||||
|
|
||||||
*************************** 4.row ***************************
|
*************************** 4.row ***************************
|
||||||
|
|
||||||
|
@ -353,7 +361,7 @@ SHOW VARIABLES;
|
||||||
SHOW DNODE dnode_id VARIABLES;
|
SHOW DNODE dnode_id VARIABLES;
|
||||||
```
|
```
|
||||||
|
|
||||||
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
|
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
|
||||||
|
|
||||||
## SHOW VGROUPS
|
## SHOW VGROUPS
|
||||||
|
|
||||||
|
@ -361,7 +369,7 @@ Shows the working configuration of the parameters that must be the same on each
|
||||||
SHOW [db_name.]VGROUPS;
|
SHOW [db_name.]VGROUPS;
|
||||||
```
|
```
|
||||||
|
|
||||||
Shows information about all vgroups in the current database.
|
Shows information about all vgroups in the current database.
|
||||||
|
|
||||||
## SHOW VNODES
|
## SHOW VNODES
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
|
||||||
- Information that you input is given in lowercase.
|
- Information that you input is given in lowercase.
|
||||||
- \[ \] means optional input, excluding [] itself.
|
- \[ \] means optional input, excluding [] itself.
|
||||||
- | means one of a few options, excluding | itself.
|
- | means one of a few options, excluding | itself.
|
||||||
- … means the item prior to it can be repeated multiple times.
|
- ... means the item prior to it can be repeated multiple times.
|
||||||
|
|
||||||
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||||
|
|
||||||
|
|
|
@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
|
||||||
chmod +x TDinsight.sh
|
chmod +x TDinsight.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Prepare:
|
Prepare:
|
||||||
|
|
||||||
1. TDengine Server
|
1. TDengine Server
|
||||||
|
|
||||||
- The URL of REST service:for example `http://localhost:6041` if TDengine is deployed locally
|
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
|
||||||
- User name and password
|
- User name and password
|
||||||
|
|
||||||
2. Grafana Alert Notification
|
2. Grafana Alert Notification
|
||||||
|
|
|
@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
|
||||||
|
|
||||||
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||||
|
|
||||||
Diagnostic steps:
|
Diagnostic steps:
|
||||||
|
|
||||||
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
||||||
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
||||||
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
||||||
|
|
||||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||||
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
||||||
|
|
||||||
Output of the server side for the example is below:
|
Output of the server side for the example is below:
|
||||||
|
|
|
@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
|
||||||
|
|
||||||
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
||||||
|
|
||||||
- authentication information is shown below:
|
- authentication information is shown below:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Authorization: Taosd <TOKEN>
|
Authorization: Taosd <TOKEN>
|
||||||
```
|
```
|
||||||
|
|
||||||
- Basic authentication information is shown below:
|
- Basic authentication information is shown below:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Authorization: Basic <TOKEN>
|
Authorization: Basic <TOKEN>
|
||||||
|
|
|
@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
|
||||||
|
|
||||||
After TDengine server or client installation, `taos.h` is located at
|
After TDengine server or client installation, `taos.h` is located at
|
||||||
|
|
||||||
- Linux:`/usr/local/taos/include`
|
- Linux: usr/local/taos/include`
|
||||||
- Windows:`C:\TDengine\include`
|
- Windows: C:\TDengine\include`
|
||||||
- macOS:`/usr/local/include`
|
- macOS: usr/local/include`
|
||||||
|
|
||||||
The dynamic libraries for the TDengine client driver are located in.
|
The dynamic libraries for the TDengine client driver are located in.
|
||||||
|
|
||||||
|
@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
||||||
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
||||||
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
||||||
|
|
||||||
schemaless 其他相关的接口
|
schemaless interfaces:
|
||||||
|
|
||||||
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
||||||
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
||||||
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
||||||
|
|
|
@ -90,46 +90,35 @@ For specific error codes, please refer to.
|
||||||
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
|
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
|
||||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
|
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
|
||||||
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
|
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
|
||||||
| 0x2309 | invalid sql for executeQuery: (?) | - |
|
|
||||||
| 0x230a | Database not specified or available | - |
|
|
||||||
| 0x230b | invalid sql for executeUpdate: (?) | - |
|
|
||||||
| 0x230c | invalid sql for execute: (?) | - |
|
|
||||||
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
|
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
|
||||||
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
|
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
|
||||||
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
|
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
|
||||||
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
|
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
|
||||||
| 0x2311 | can't register JDBC-RESTful driver | - |
|
|
||||||
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
|
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
|
||||||
| 0x2313 | invalid sql | - |
|
|
||||||
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
|
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
|
||||||
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
|
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
|
||||||
| 0x2316 | unknown timestamp precision | - |
|
|
||||||
| 0x2317 | | wrong request type was used in the REST connection. |
|
| 0x2317 | | wrong request type was used in the REST connection. |
|
||||||
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
|
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
|
||||||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||||
| 0x231b | invalid json format | - |
|
|
||||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||||
| 0x2351 | failed to create subscription | - |
|
|
||||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||||
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
|
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
|
||||||
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
|
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
|
||||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
|
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
|
||||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||||
| 0x2358 | fetch to the end of resultSet | - |
|
|
||||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
|
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
|
||||||
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
|
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
|
||||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
|
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
|
||||||
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
|
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
|
||||||
| 0x2374 | consumer config error | - |
|
|
||||||
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
|
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
|
||||||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||||
| - | can't create connection with server within: | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||||
| - | failed to complete the task within the specified time : | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
@ -981,8 +970,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
- group.id: consumer: Specifies the group that the consumer is in.
|
- group.id: consumer: Specifies the group that the consumer is in.
|
||||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||||
- httpConnectTimeout:WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||||
- messageWaitTimeout:socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||||
|
|
||||||
#### Subscribe to consume data
|
#### Subscribe to consume data
|
||||||
|
@ -1278,9 +1267,9 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
|
|
||||||
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
||||||
|
|
||||||
**Cause**:taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
||||||
|
|
||||||
**Solution**: Use taos-jdbcdriver 3.0.2.
|
**Solution**: Use taos-jdbcdriver 3.0.2.
|
||||||
|
|
||||||
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||||
|
|
||||||
|
|
|
@ -121,7 +121,7 @@ The parameters are described as follows:
|
||||||
- **username/password**: Username and password used to create connections.
|
- **username/password**: Username and password used to create connections.
|
||||||
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
||||||
- **database**: Specify the default database to connect to. It's optional.
|
- **database**: Specify the default database to connect to. It's optional.
|
||||||
- **params**:Optional parameters.
|
- **params**: Optional parameters.
|
||||||
|
|
||||||
A sample DSN description string is as follows:
|
A sample DSN description string is as follows:
|
||||||
|
|
||||||
|
|
|
@ -255,7 +255,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
|
||||||
|
|
||||||
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
|
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
|
||||||
|
|
||||||
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
||||||
- `user`: TDengine user name. The default is `root`.
|
- `user`: TDengine user name. The default is `root`.
|
||||||
- `password`: TDengine user password. The default is `taosdata`.
|
- `password`: TDengine user password. The default is `taosdata`.
|
||||||
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
||||||
|
|
|
@ -321,18 +321,18 @@ let cursor = conn.cursor();
|
||||||
| package name | version | TDengine version | Description |
|
| package name | version | TDengine version | Description |
|
||||||
|------------------|---------|---------------------|------------------------------------------------------------------|
|
|------------------|---------|---------------------|------------------------------------------------------------------|
|
||||||
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||||
| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. |
|
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
|
||||||
| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
||||||
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
||||||
### REST Connector
|
### REST Connector
|
||||||
|
|
||||||
| package name | version | TDengine version | Description |
|
| package name | version | TDengine version | Description |
|
||||||
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
|
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
|
||||||
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||||
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 |
|
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
|
||||||
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
||||||
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token |
|
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
|
||||||
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
||||||
|
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ The parameters are described as follows:
|
||||||
* **username/password**: Username and password used to create connections.
|
* **username/password**: Username and password used to create connections.
|
||||||
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
||||||
* **database**: Specify the default database to connect to. It's optional.
|
* **database**: Specify the default database to connect to. It's optional.
|
||||||
* **params**:Optional parameters.
|
* **params**: Optional parameters.
|
||||||
|
|
||||||
A sample DSN description string is as follows:
|
A sample DSN description string is as follows:
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ ws://localhost:6041/test
|
||||||
| TDengine.Connector | Description |
|
| TDengine.Connector | Description |
|
||||||
|--------------------|--------------------------------|
|
|--------------------|--------------------------------|
|
||||||
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
|
||||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||||
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
||||||
|
|
|
@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
|
||||||
|
|
||||||
PHP Connector relies on TDengine client driver.
|
PHP Connector relies on TDengine client driver.
|
||||||
|
|
||||||
Project Repository:<https://github.com/Yurunsoft/php-tdengine>
|
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
|
||||||
|
|
||||||
After TDengine client or server is installed, `taos.h` is located at:
|
After TDengine client or server is installed, `taos.h` is located at:
|
||||||
|
|
||||||
- Linux:`/usr/local/taos/include`
|
- Linux: `/usr/local/taos/include`
|
||||||
- Windows:`C:\TDengine\include`
|
- Windows: `C:\TDengine\include`
|
||||||
- macOS:`/usr/local/include`
|
- macOS: `/usr/local/include`
|
||||||
|
|
||||||
TDengine client driver is located at:
|
TDengine client driver is located at:
|
||||||
|
|
||||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||||
- Windows: `C:\TDengine\taos.dll`
|
- Windows: `C:\TDengine\taos.dll`
|
||||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
- Windows、Linux、MacOS
|
- Windows, Linux, and macOS
|
||||||
|
|
||||||
- PHP >= 7.4
|
- PHP >= 7.4
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
|
||||||
|
|
||||||
### Install php-tdengine
|
### Install php-tdengine
|
||||||
|
|
||||||
**Download Source Code Package and Unzip:**
|
**Download Source Code Package and Unzip: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||||
|
@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
||||||
|
|
||||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||||
|
|
||||||
**Non-Swoole Environment:**
|
**Non-Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure && make -j && make install
|
phpize && ./configure && make -j && make install
|
||||||
```
|
```
|
||||||
|
|
||||||
**Specify TDengine location:**
|
**Specify TDengine location: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||||
|
@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
||||||
> `--with-tdengine-dir=` is followed by TDengine location.
|
> `--with-tdengine-dir=` is followed by TDengine location.
|
||||||
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
|
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
|
||||||
|
|
||||||
**Swoole Environment:**
|
**Swoole Environment: **
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
phpize && ./configure --enable-swoole && make -j && make install
|
phpize && ./configure --enable-swoole && make -j && make install
|
||||||
|
|
|
@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
|
||||||
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||||
|
|
||||||
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
||||||
|
|
||||||
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
||||||
|
|
||||||
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
||||||
|
|
|
@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
|
||||||
|
|
||||||
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## TDinsight dashboard details
|
## TDinsight dashboard details
|
||||||
|
|
||||||
|
|
|
@ -151,7 +151,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | -------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
|
||||||
|
@ -183,7 +183,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
| -------- | -------------------------------- |
|
| -------- | -------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||||
| Vlue Range | 0:Return empty line,1:Return 0 |
|
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||||
|
|
||||||
|
@ -661,7 +661,7 @@ The charset that takes effect is UTF-8.
|
||||||
|
|
||||||
## 3.0 Parameters
|
## 3.0 Parameters
|
||||||
|
|
||||||
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||||
| 1 | firstEp | Yes | Yes | |
|
| 1 | firstEp | Yes | Yes | |
|
||||||
| 2 | secondEp | Yes | Yes | |
|
| 2 | secondEp | Yes | Yes | |
|
||||||
|
|
|
@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
|
||||||
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
||||||
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
|
|
||||||
The example to query the average system memory usage for the specified interval on each server as follows.
|
The example to query the average system memory usage for the specified interval on each server as follows.
|
||||||
|
|
||||||

|

|
||||||
|
@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||||
|
|
||||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
|
||||||
|
|
||||||
### Edit SQL fields
|
### Edit SQL fields
|
||||||
|
|
||||||
Copy SQL bellow and paste it to the SQL edit area:
|
Copy SQL bellow and paste it to the SQL edit area:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
|
||||||
|
|
||||||
### Edit "action"
|
### Edit "action"
|
||||||
|
|
||||||
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
||||||
|
|
||||||
```
|
```
|
||||||
Basic cm9vdDp0YW9zZGF0YQ==
|
Basic cm9vdDp0YW9zZGF0YQ==
|
||||||
```
|
```
|
||||||
|
|
|
@ -46,15 +46,14 @@ Execute in any directory:
|
||||||
|
|
||||||
````
|
````
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/test
|
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||||
````
|
````
|
||||||
|
|
||||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||||
PATH=$CONFLUENT_HOME/bin
|
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||||
export PATH
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||||
|
@ -329,7 +328,15 @@ DROP DATABASE IF EXISTS test;
|
||||||
CREATE DATABASE test;
|
CREATE DATABASE test;
|
||||||
USE test;
|
USE test;
|
||||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
|
||||||
|
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
|
||||||
|
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||||
```
|
```
|
||||||
|
|
||||||
Use TDengine CLI to execute SQL script
|
Use TDengine CLI to execute SQL script
|
||||||
|
@ -384,7 +391,7 @@ confluent local services connect connector status
|
||||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||||
|
|
||||||
````
|
````
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSourceConnector
|
||||||
````
|
````
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
|
||||||
|
|
||||||
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
||||||
|
|
||||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
|
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
|
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
|
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.
|
||||||
|
|
|
@ -26,9 +26,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, a comp
|
||||||
|
|
||||||
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
|
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
|
||||||
|
|
||||||
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
||||||
|
|
||||||
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
||||||
|
|
||||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
|
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ After obtaining the mnode EP list, the data node initiates the connection. It wi
|
||||||
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
||||||
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||||
|
|
||||||
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it's not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
||||||
|
|
||||||
### A Typical Data Writing Process
|
### A Typical Data Writing Process
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ For large-scale data management, to achieve scale-out, it is generally necessary
|
||||||
|
|
||||||
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
|
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
|
||||||
|
|
||||||
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables' quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
||||||
|
|
||||||
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
|
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
|
||||||
|
|
||||||
|
@ -132,9 +132,9 @@ Leader Vnode uses a writing process as follows:
|
||||||
<center> Figure 3: TDengine Leader writing process </center>
|
<center> Figure 3: TDengine Leader writing process </center>
|
||||||
|
|
||||||
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
||||||
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `"wal_level"` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
||||||
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||||
4. Leader vnode Writes the data into memory and add the record to “skip list”;
|
4. Leader vnode Writes the data into memory and add the record to "skip list";
|
||||||
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
||||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ For a follower vnode, the write process as follows:
|
||||||
|
|
||||||
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
||||||
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
|
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
|
||||||
3. Write into memory and add the record to “skip list”.
|
3. Write into memory and add the record to "skip list".
|
||||||
|
|
||||||
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
|
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
|
||||||
|
|
||||||
|
@ -156,7 +156,7 @@ Compared with Leader vnode, follower vnode has no forwarding or reply confirmati
|
||||||
|
|
||||||
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
||||||
|
|
||||||
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It's necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
||||||
|
|
||||||
### Synchronous Replication
|
### Synchronous Replication
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ When data is written to disk, the system decides whether to compress the data ba
|
||||||
|
|
||||||
### Tiered Storage
|
### Tiered Storage
|
||||||
|
|
||||||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
|
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
|
||||||
|
|
||||||
dataDir format is as follows:
|
dataDir format is as follows:
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ dataDir data_path [tier_level]
|
||||||
|
|
||||||
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
|
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
|
||||||
|
|
||||||
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
dataDir /mnt/disk1/taos
|
dataDir /mnt/disk1/taos
|
||||||
|
|
|
@ -200,7 +200,7 @@ After migrating via DataX, we found that we can significantly improve the effici
|
||||||
|
|
||||||
### 2. Manual data migration
|
### 2. Manual data migration
|
||||||
|
|
||||||
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement—written to the database.
|
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement-written to the database.
|
||||||
|
|
||||||
Manual migration of data requires attention to the following two issues:
|
Manual migration of data requires attention to the following two issues:
|
||||||
|
|
||||||
|
@ -258,7 +258,7 @@ Equivalent function: apercentile
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
Select apercentile(col1, 50, “t-digest”) from table_name
|
select apercentile(col1, 50, "t-digest") from table_name
|
||||||
```
|
```
|
||||||
|
|
||||||
Remark:
|
Remark:
|
||||||
|
|
|
@ -161,7 +161,7 @@ Query OK, 6 rows in database (0.005515s)
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
|
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
|
||||||
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。
|
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。除了在 REST 参数中指定数据库以外也可以在 SQL 语句中使用 <db_name>.<table_name> 来指定数据库。
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,8 @@ import CDemo from "./_sub_c.mdx";
|
||||||
|
|
||||||
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
||||||
|
|
||||||
注意:默认是从wal消费数据,如果wal被删除,消费到的数据会不全,此时可以将参数 experimental.snapshot.enable 设置为true,从tsdb获取全部数据,但是这样的话就不能保证数据的消费顺序。所以建议根据自己的消费情况合理的设置wal的保留策略,保证可以从wal里订阅到全部数据。
|
注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。
|
||||||
|
|
||||||
## 主要数据结构和 API
|
## 主要数据结构和 API
|
||||||
|
|
||||||
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
|
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
|
||||||
|
@ -293,7 +294,6 @@ CREATE TOPIC topic_name AS DATABASE db_name;
|
||||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||||
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
||||||
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
||||||
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据。当其关闭时,只能消费依据 WAL 保留策略仍然在WAL中的数据;当其打开时,除WAL中的数据以外,也能够消费已经从WAL中删除但落盘到TSDB中的数据 | 实验功能,默认关闭 |
|
|
||||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
||||||
|
|
||||||
对于不同编程语言,其设置方式如下:
|
对于不同编程语言,其设置方式如下:
|
||||||
|
@ -311,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
||||||
tmq_conf_set(conf, "td.connect.user", "root");
|
tmq_conf_set(conf, "td.connect.user", "root");
|
||||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
|
||||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||||
|
|
||||||
|
@ -367,7 +366,6 @@ conf := &tmq.ConfigMap{
|
||||||
"td.connect.port": "6030",
|
"td.connect.port": "6030",
|
||||||
"client.id": "test_tmq_c",
|
"client.id": "test_tmq_c",
|
||||||
"enable.auto.commit": "false",
|
"enable.auto.commit": "false",
|
||||||
"experimental.snapshot.enable": "true",
|
|
||||||
"msg.with.table.name": "true",
|
"msg.with.table.name": "true",
|
||||||
}
|
}
|
||||||
consumer, err := NewConsumer(conf)
|
consumer, err := NewConsumer(conf)
|
||||||
|
@ -417,7 +415,6 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
|
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
|
||||||
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
||||||
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
||||||
| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
|
|
|
@ -90,46 +90,35 @@ JDBC 连接器可能报错的错误码包括 4 种:
|
||||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||||
| 0x2309 | invalid sql for executeQuery: (?) | - |
|
|
||||||
| 0x230a | Database not specified or available | - |
|
|
||||||
| 0x230b | invalid sql for executeUpdate: (?) | - |
|
|
||||||
| 0x230c | invalid sql for execute: (?) | - |
|
|
||||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||||
| 0x2311 | can't register JDBC-RESTful driver | - |
|
|
||||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||||
| 0x2313 | invalid sql | - |
|
|
||||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||||
| 0x2316 | unknown timestamp precision | - |
|
|
||||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||||
| 0x231b | invalid json format | - |
|
|
||||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||||
| 0x2350 | unknown error | 未知异常,请在 github 返回给开发人员。 |
|
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||||
| 0x2351 | failed to create subscription | - |
|
|
||||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||||
| 0x2358 | fetch to the end of resultSet | - |
|
|
||||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||||
| 0x2374 | consumer config error | - |
|
|
||||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||||
| - | can't create connection with server within: | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||||
| - | failed to complete the task within the specified time : | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
||||||
SHOW STABLES [LIKE tb_name_wildcard];
|
SHOW STABLES [LIKE tb_name_wildcard];
|
||||||
```
|
```
|
||||||
|
|
||||||
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
|
查看数据库内全部超级表。
|
||||||
|
|
||||||
### 显示一个超级表的创建语句
|
### 显示一个超级表的创建语句
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
|
|
@ -888,7 +888,7 @@ INTERP(expr)
|
||||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||||
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
||||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
||||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
||||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
||||||
|
|
||||||
|
|
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
||||||
|
|
||||||
显示当前系统中 QNODE (查询节点)的信息。
|
显示当前系统中 QNODE (查询节点)的信息。
|
||||||
|
|
||||||
|
## SHOW QUERIES
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW QUERIES;
|
||||||
|
```
|
||||||
|
|
||||||
|
显示当前系统中正在进行的查询。
|
||||||
|
|
||||||
## SHOW SCORES
|
## SHOW SCORES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -189,7 +197,7 @@ SHOW TABLE DISTRIBUTED table_name;
|
||||||
|
|
||||||
*************************** 1.row ***************************
|
*************************** 1.row ***************************
|
||||||
|
|
||||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||||
|
|
||||||
Total_Blocks: 表 d0 占用的 block 个数为 5 个
|
Total_Blocks: 表 d0 占用的 block 个数为 5 个
|
||||||
|
|
||||||
|
|
|
@ -735,7 +735,6 @@ charset 的有效值是 UTF-8。
|
||||||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||||
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
|
||||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
|
|
|
@ -200,6 +200,12 @@ docker run -d \
|
||||||
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
|
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
|
||||||
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
|
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。Grafana 插件中 SQL 语句中可以使用 <db_name>.<table_name> 来指定数据库。
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
||||||
|
|
||||||

|

|
||||||
|
|
|
@ -48,15 +48,14 @@ Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/test
|
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||||
```
|
```
|
||||||
|
|
||||||
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||||
PATH=$CONFLUENT_HOME/bin
|
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||||
export PATH
|
|
||||||
```
|
```
|
||||||
|
|
||||||
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
||||||
|
@ -333,7 +332,15 @@ DROP DATABASE IF EXISTS test;
|
||||||
CREATE DATABASE test;
|
CREATE DATABASE test;
|
||||||
USE test;
|
USE test;
|
||||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
|
||||||
|
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
|
||||||
|
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
|
||||||
|
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
|
||||||
|
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
|
||||||
|
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||||
```
|
```
|
||||||
|
|
||||||
使用 TDengine CLI, 执行 SQL 文件。
|
使用 TDengine CLI, 执行 SQL 文件。
|
||||||
|
@ -388,7 +395,7 @@ confluent local services connect connector status
|
||||||
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
||||||
|
|
||||||
```
|
```
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
confluent local services connect connector unload TDengineSourceConnector
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ description: 一些常见问题的解决方法汇总
|
||||||
|
|
||||||
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
||||||
|
|
||||||
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。
|
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](../../operation/diagnose/)。
|
||||||
|
|
||||||
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
|
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
<description>Demo project for TDengine</description>
|
<description>Demo project for TDengine</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<spring.version>5.3.26</spring.version>
|
<spring.version>5.3.27</spring.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -167,7 +167,7 @@ DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name
|
||||||
DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
|
DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
|
||||||
DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
|
DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
|
||||||
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
|
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
|
||||||
DLL_EXPORT void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields);
|
DLL_EXPORT void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields);
|
||||||
|
|
||||||
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
|
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
|
||||||
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
|
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
|
||||||
|
@ -230,6 +230,9 @@ DLL_EXPORT int taos_get_tables_vgId(TAOS *taos, const char *db, const char *tabl
|
||||||
|
|
||||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||||
|
|
||||||
|
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner
|
||||||
|
DLL_EXPORT void taos_set_hb_quit(int8_t quitByKill);
|
||||||
|
|
||||||
DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type);
|
DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type);
|
||||||
|
|
||||||
/* --------------------------schemaless INTERFACE------------------------------- */
|
/* --------------------------schemaless INTERFACE------------------------------- */
|
||||||
|
@ -270,10 +273,10 @@ DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||||
|
|
||||||
/* ------------------------TMQ CONSUMER INTERFACE------------------------ */
|
/* ------------------------TMQ CONSUMER INTERFACE------------------------ */
|
||||||
typedef struct tmq_topic_assignment {
|
typedef struct tmq_topic_assignment {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
int64_t currentOffset;
|
int64_t currentOffset;
|
||||||
int64_t begin;
|
int64_t begin;
|
||||||
int64_t end;
|
int64_t end;
|
||||||
} tmq_topic_assignment;
|
} tmq_topic_assignment;
|
||||||
|
|
||||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||||
|
@ -283,8 +286,9 @@ DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char* pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment);
|
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
|
||||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char* pTopicName, int32_t vgId, int64_t offset);
|
int32_t *numOfAssignment);
|
||||||
|
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||||
|
|
||||||
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
* You should have received a copy of the GNU Affero General Public License
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef TDENGINE_SYSTABLE_H
|
#ifndef TDENGINE_SYSTABLE_H
|
||||||
#define TDENGINE_SYSTABLE_H
|
#define TDENGINE_SYSTABLE_H
|
||||||
|
|
||||||
|
|
|
@ -213,14 +213,6 @@ enum {
|
||||||
FETCH_TYPE__NONE,
|
FETCH_TYPE__NONE,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
int8_t fetchType;
|
|
||||||
union {
|
|
||||||
SSDataBlock data;
|
|
||||||
void* meta;
|
|
||||||
};
|
|
||||||
} SFetchRet;
|
|
||||||
|
|
||||||
typedef struct SVarColAttr {
|
typedef struct SVarColAttr {
|
||||||
int32_t* offset; // start position for each entry in the list
|
int32_t* offset; // start position for each entry in the list
|
||||||
uint32_t length; // used buffer size that contain the valid data
|
uint32_t length; // used buffer size that contain the valid data
|
||||||
|
@ -342,6 +334,8 @@ typedef struct {
|
||||||
float f;
|
float f;
|
||||||
};
|
};
|
||||||
size_t length;
|
size_t length;
|
||||||
|
bool keyEscaped;
|
||||||
|
bool valueEscaped;
|
||||||
} SSmlKv;
|
} SSmlKv;
|
||||||
|
|
||||||
#define QUERY_ASC_FORWARD_STEP 1
|
#define QUERY_ASC_FORWARD_STEP 1
|
||||||
|
@ -380,6 +374,8 @@ typedef struct STUidTagInfo {
|
||||||
#define UD_GROUPID_COLUMN_INDEX 1
|
#define UD_GROUPID_COLUMN_INDEX 1
|
||||||
#define UD_TAG_COLUMN_INDEX 2
|
#define UD_TAG_COLUMN_INDEX 2
|
||||||
|
|
||||||
|
int32_t taosGenCrashJsonMsg(int signum, char **pMsg, int64_t clusterId, int64_t startTime);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -177,7 +177,9 @@ extern int32_t tsUptimeInterval;
|
||||||
extern int32_t tsRpcRetryLimit;
|
extern int32_t tsRpcRetryLimit;
|
||||||
extern int32_t tsRpcRetryInterval;
|
extern int32_t tsRpcRetryInterval;
|
||||||
|
|
||||||
extern bool tsDisableStream;
|
extern bool tsDisableStream;
|
||||||
|
extern int64_t tsStreamBufferSize;
|
||||||
|
extern int64_t tsCheckpointInterval;
|
||||||
|
|
||||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,10 @@ extern "C" {
|
||||||
#include "tgrantCfg.h"
|
#include "tgrantCfg.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef GRANTS_COL_MAX_LEN
|
||||||
|
#define GRANTS_COL_MAX_LEN 196
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TSDB_GRANT_ALL,
|
TSDB_GRANT_ALL,
|
||||||
TSDB_GRANT_TIME,
|
TSDB_GRANT_TIME,
|
||||||
|
@ -47,23 +51,49 @@ typedef enum {
|
||||||
int32_t grantCheck(EGrantType grant);
|
int32_t grantCheck(EGrantType grant);
|
||||||
|
|
||||||
#ifndef GRANTS_CFG
|
#ifndef GRANTS_CFG
|
||||||
#define GRANTS_SCHEMA \
|
#ifdef TD_ENTERPRISE
|
||||||
static const SSysDbTableSchema grantsSchema[] = { \
|
#define GRANTS_SCHEMA \
|
||||||
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
static const SSysDbTableSchema grantsSchema[] = { \
|
||||||
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "opc_da", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "opc_ua", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "pi", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "kafka", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "influxdb", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "mqtt", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
#define GRANTS_SCHEMA \
|
||||||
|
static const SSysDbTableSchema grantsSchema[] = { \
|
||||||
|
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#define GRANT_CFG_ADD
|
#define GRANT_CFG_ADD
|
||||||
#define GRANT_CFG_SET
|
#define GRANT_CFG_SET
|
||||||
#define GRANT_CFG_GET
|
#define GRANT_CFG_GET
|
||||||
|
|
|
@ -416,7 +416,7 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p
|
||||||
return pSW;
|
return pSW;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
|
static FORCE_INLINE void tDeleteSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
|
||||||
if (pSchemaWrapper) {
|
if (pSchemaWrapper) {
|
||||||
taosMemoryFree(pSchemaWrapper->pSchema);
|
taosMemoryFree(pSchemaWrapper->pSchema);
|
||||||
taosMemoryFree(pSchemaWrapper);
|
taosMemoryFree(pSchemaWrapper);
|
||||||
|
@ -691,6 +691,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
int32_t tDeserializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tDeserializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
|
void tFreeSAlterUserReq(SAlterUserReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
|
@ -1232,6 +1233,14 @@ typedef struct {
|
||||||
SEp ep;
|
SEp ep;
|
||||||
} SDnodeEp;
|
} SDnodeEp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t id;
|
||||||
|
int8_t isMnode;
|
||||||
|
SEp ep;
|
||||||
|
char active[TSDB_ACTIVE_KEY_LEN];
|
||||||
|
char connActive[TSDB_CONN_ACTIVE_KEY_LEN];
|
||||||
|
} SDnodeInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t dnodeVer;
|
int64_t dnodeVer;
|
||||||
SDnodeCfg dnodeCfg;
|
SDnodeCfg dnodeCfg;
|
||||||
|
@ -1625,6 +1634,21 @@ typedef struct {
|
||||||
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||||
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||||
|
|
||||||
|
enum {
|
||||||
|
RESTORE_TYPE__ALL = 1,
|
||||||
|
RESTORE_TYPE__MNODE,
|
||||||
|
RESTORE_TYPE__VNODE,
|
||||||
|
RESTORE_TYPE__QNODE,
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t dnodeId;
|
||||||
|
int8_t restoreType;
|
||||||
|
} SRestoreDnodeReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
|
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
char config[TSDB_DNODE_CONFIG_LEN];
|
char config[TSDB_DNODE_CONFIG_LEN];
|
||||||
|
@ -1905,7 +1929,7 @@ typedef struct {
|
||||||
#define STREAM_FILL_HISTORY_ON 1
|
#define STREAM_FILL_HISTORY_ON 1
|
||||||
#define STREAM_FILL_HISTORY_OFF 0
|
#define STREAM_FILL_HISTORY_OFF 0
|
||||||
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
|
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
|
||||||
#define STREAM_DEFAULT_IGNORE_UPDATE 0
|
#define STREAM_DEFAULT_IGNORE_UPDATE 1
|
||||||
#define STREAM_CREATE_STABLE_TRUE 1
|
#define STREAM_CREATE_STABLE_TRUE 1
|
||||||
#define STREAM_CREATE_STABLE_FALSE 0
|
#define STREAM_CREATE_STABLE_FALSE 0
|
||||||
|
|
||||||
|
@ -2098,7 +2122,6 @@ static FORCE_INLINE void* tDeserializeSMVSubscribeReq(void* buf, SMVSubscribeReq
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||||
SArray* lostConsumers; // SArray<int64_t>
|
|
||||||
SArray* removedConsumers; // SArray<int64_t>
|
SArray* removedConsumers; // SArray<int64_t>
|
||||||
SArray* newConsumers; // SArray<int64_t>
|
SArray* newConsumers; // SArray<int64_t>
|
||||||
} SMqRebInfo;
|
} SMqRebInfo;
|
||||||
|
@ -2109,10 +2132,6 @@ static FORCE_INLINE SMqRebInfo* tNewSMqRebSubscribe(const char* key) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
tstrncpy(pRebInfo->key, key, TSDB_SUBSCRIBE_KEY_LEN);
|
tstrncpy(pRebInfo->key, key, TSDB_SUBSCRIBE_KEY_LEN);
|
||||||
pRebInfo->lostConsumers = taosArrayInit(0, sizeof(int64_t));
|
|
||||||
if (pRebInfo->lostConsumers == NULL) {
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
pRebInfo->removedConsumers = taosArrayInit(0, sizeof(int64_t));
|
pRebInfo->removedConsumers = taosArrayInit(0, sizeof(int64_t));
|
||||||
if (pRebInfo->removedConsumers == NULL) {
|
if (pRebInfo->removedConsumers == NULL) {
|
||||||
goto _err;
|
goto _err;
|
||||||
|
@ -2123,7 +2142,6 @@ static FORCE_INLINE SMqRebInfo* tNewSMqRebSubscribe(const char* key) {
|
||||||
}
|
}
|
||||||
return pRebInfo;
|
return pRebInfo;
|
||||||
_err:
|
_err:
|
||||||
taosArrayDestroy(pRebInfo->lostConsumers);
|
|
||||||
taosArrayDestroy(pRebInfo->removedConsumers);
|
taosArrayDestroy(pRebInfo->removedConsumers);
|
||||||
taosArrayDestroy(pRebInfo->newConsumers);
|
taosArrayDestroy(pRebInfo->newConsumers);
|
||||||
taosMemoryFreeClear(pRebInfo);
|
taosMemoryFreeClear(pRebInfo);
|
||||||
|
@ -2908,6 +2926,42 @@ typedef struct SMqVgOffset {
|
||||||
int32_t tEncodeMqVgOffset(SEncoder* pEncoder, const SMqVgOffset* pOffset);
|
int32_t tEncodeMqVgOffset(SEncoder* pEncoder, const SMqVgOffset* pOffset);
|
||||||
int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset);
|
int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SMsgHead head;
|
||||||
|
int32_t taskId;
|
||||||
|
} SVPauseStreamTaskReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SVPauseStreamTaskRsp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
int8_t igNotExists;
|
||||||
|
} SMPauseStreamReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSMPauseStreamReq(void* buf, int32_t bufLen, const SMPauseStreamReq* pReq);
|
||||||
|
int32_t tDeserializeSMPauseStreamReq(void* buf, int32_t bufLen, SMPauseStreamReq* pReq);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SMsgHead head;
|
||||||
|
int32_t taskId;
|
||||||
|
int8_t igUntreated;
|
||||||
|
} SVResumeStreamTaskReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SVResumeStreamTaskRsp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
int8_t igNotExists;
|
||||||
|
int8_t igUntreated;
|
||||||
|
} SMResumeStreamReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSMResumeStreamReq(void* buf, int32_t bufLen, const SMResumeStreamReq* pReq);
|
||||||
|
int32_t tDeserializeSMResumeStreamReq(void* buf, int32_t bufLen, SMResumeStreamReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_TABLE_FNAME_LEN];
|
char name[TSDB_TABLE_FNAME_LEN];
|
||||||
char stb[TSDB_TABLE_FNAME_LEN];
|
char stb[TSDB_TABLE_FNAME_LEN];
|
||||||
|
@ -3430,10 +3484,10 @@ typedef struct {
|
||||||
char data[]; // SSubmitReq2
|
char data[]; // SSubmitReq2
|
||||||
} SSubmitReq2Msg;
|
} SSubmitReq2Msg;
|
||||||
|
|
||||||
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
|
int32_t tEncodeSubmitReq(SEncoder* pCoder, const SSubmitReq2* pReq);
|
||||||
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
|
int32_t tDecodeSubmitReq(SDecoder* pCoder, SSubmitReq2* pReq);
|
||||||
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
|
void tDestroySubmitTbData(SSubmitTbData* pTbData, int32_t flag);
|
||||||
void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
|
void tDestroySubmitReq(SSubmitReq2* pReq, int32_t flag);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t affectedRows;
|
int32_t affectedRows;
|
||||||
|
|
|
@ -178,6 +178,9 @@ enum {
|
||||||
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||||
|
@ -255,6 +258,8 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
||||||
|
|
|
@ -16,335 +16,341 @@
|
||||||
#ifndef _TD_COMMON_TOKEN_H_
|
#ifndef _TD_COMMON_TOKEN_H_
|
||||||
#define _TD_COMMON_TOKEN_H_
|
#define _TD_COMMON_TOKEN_H_
|
||||||
|
|
||||||
#define TK_OR 1
|
#define TK_OR 1
|
||||||
#define TK_AND 2
|
#define TK_AND 2
|
||||||
#define TK_UNION 3
|
#define TK_UNION 3
|
||||||
#define TK_ALL 4
|
#define TK_ALL 4
|
||||||
#define TK_MINUS 5
|
#define TK_MINUS 5
|
||||||
#define TK_EXCEPT 6
|
#define TK_EXCEPT 6
|
||||||
#define TK_INTERSECT 7
|
#define TK_INTERSECT 7
|
||||||
#define TK_NK_BITAND 8
|
#define TK_NK_BITAND 8
|
||||||
#define TK_NK_BITOR 9
|
#define TK_NK_BITOR 9
|
||||||
#define TK_NK_LSHIFT 10
|
#define TK_NK_LSHIFT 10
|
||||||
#define TK_NK_RSHIFT 11
|
#define TK_NK_RSHIFT 11
|
||||||
#define TK_NK_PLUS 12
|
#define TK_NK_PLUS 12
|
||||||
#define TK_NK_MINUS 13
|
#define TK_NK_MINUS 13
|
||||||
#define TK_NK_STAR 14
|
#define TK_NK_STAR 14
|
||||||
#define TK_NK_SLASH 15
|
#define TK_NK_SLASH 15
|
||||||
#define TK_NK_REM 16
|
#define TK_NK_REM 16
|
||||||
#define TK_NK_CONCAT 17
|
#define TK_NK_CONCAT 17
|
||||||
#define TK_CREATE 18
|
#define TK_CREATE 18
|
||||||
#define TK_ACCOUNT 19
|
#define TK_ACCOUNT 19
|
||||||
#define TK_NK_ID 20
|
#define TK_NK_ID 20
|
||||||
#define TK_PASS 21
|
#define TK_PASS 21
|
||||||
#define TK_NK_STRING 22
|
#define TK_NK_STRING 22
|
||||||
#define TK_ALTER 23
|
#define TK_ALTER 23
|
||||||
#define TK_PPS 24
|
#define TK_PPS 24
|
||||||
#define TK_TSERIES 25
|
#define TK_TSERIES 25
|
||||||
#define TK_STORAGE 26
|
#define TK_STORAGE 26
|
||||||
#define TK_STREAMS 27
|
#define TK_STREAMS 27
|
||||||
#define TK_QTIME 28
|
#define TK_QTIME 28
|
||||||
#define TK_DBS 29
|
#define TK_DBS 29
|
||||||
#define TK_USERS 30
|
#define TK_USERS 30
|
||||||
#define TK_CONNS 31
|
#define TK_CONNS 31
|
||||||
#define TK_STATE 32
|
#define TK_STATE 32
|
||||||
#define TK_USER 33
|
#define TK_USER 33
|
||||||
#define TK_ENABLE 34
|
#define TK_ENABLE 34
|
||||||
#define TK_NK_INTEGER 35
|
#define TK_NK_INTEGER 35
|
||||||
#define TK_SYSINFO 36
|
#define TK_SYSINFO 36
|
||||||
#define TK_DROP 37
|
#define TK_DROP 37
|
||||||
#define TK_GRANT 38
|
#define TK_GRANT 38
|
||||||
#define TK_ON 39
|
#define TK_ON 39
|
||||||
#define TK_TO 40
|
#define TK_TO 40
|
||||||
#define TK_REVOKE 41
|
#define TK_REVOKE 41
|
||||||
#define TK_FROM 42
|
#define TK_FROM 42
|
||||||
#define TK_SUBSCRIBE 43
|
#define TK_SUBSCRIBE 43
|
||||||
#define TK_NK_COMMA 44
|
#define TK_NK_COMMA 44
|
||||||
#define TK_READ 45
|
#define TK_READ 45
|
||||||
#define TK_WRITE 46
|
#define TK_WRITE 46
|
||||||
#define TK_NK_DOT 47
|
#define TK_NK_DOT 47
|
||||||
#define TK_WITH 48
|
#define TK_WITH 48
|
||||||
#define TK_DNODE 49
|
#define TK_DNODE 49
|
||||||
#define TK_PORT 50
|
#define TK_PORT 50
|
||||||
#define TK_DNODES 51
|
#define TK_DNODES 51
|
||||||
#define TK_NK_IPTOKEN 52
|
#define TK_RESTORE 52
|
||||||
#define TK_FORCE 53
|
#define TK_NK_IPTOKEN 53
|
||||||
#define TK_LOCAL 54
|
#define TK_FORCE 54
|
||||||
#define TK_QNODE 55
|
#define TK_LOCAL 55
|
||||||
#define TK_BNODE 56
|
#define TK_QNODE 56
|
||||||
#define TK_SNODE 57
|
#define TK_BNODE 57
|
||||||
#define TK_MNODE 58
|
#define TK_SNODE 58
|
||||||
#define TK_DATABASE 59
|
#define TK_MNODE 59
|
||||||
#define TK_USE 60
|
#define TK_VNODE 60
|
||||||
#define TK_FLUSH 61
|
#define TK_DATABASE 61
|
||||||
#define TK_TRIM 62
|
#define TK_USE 62
|
||||||
#define TK_COMPACT 63
|
#define TK_FLUSH 63
|
||||||
#define TK_IF 64
|
#define TK_TRIM 64
|
||||||
#define TK_NOT 65
|
#define TK_COMPACT 65
|
||||||
#define TK_EXISTS 66
|
#define TK_IF 66
|
||||||
#define TK_BUFFER 67
|
#define TK_NOT 67
|
||||||
#define TK_CACHEMODEL 68
|
#define TK_EXISTS 68
|
||||||
#define TK_CACHESIZE 69
|
#define TK_BUFFER 69
|
||||||
#define TK_COMP 70
|
#define TK_CACHEMODEL 70
|
||||||
#define TK_DURATION 71
|
#define TK_CACHESIZE 71
|
||||||
#define TK_NK_VARIABLE 72
|
#define TK_COMP 72
|
||||||
#define TK_MAXROWS 73
|
#define TK_DURATION 73
|
||||||
#define TK_MINROWS 74
|
#define TK_NK_VARIABLE 74
|
||||||
#define TK_KEEP 75
|
#define TK_MAXROWS 75
|
||||||
#define TK_PAGES 76
|
#define TK_MINROWS 76
|
||||||
#define TK_PAGESIZE 77
|
#define TK_KEEP 77
|
||||||
#define TK_TSDB_PAGESIZE 78
|
#define TK_PAGES 78
|
||||||
#define TK_PRECISION 79
|
#define TK_PAGESIZE 79
|
||||||
#define TK_REPLICA 80
|
#define TK_TSDB_PAGESIZE 80
|
||||||
#define TK_VGROUPS 81
|
#define TK_PRECISION 81
|
||||||
#define TK_SINGLE_STABLE 82
|
#define TK_REPLICA 82
|
||||||
#define TK_RETENTIONS 83
|
#define TK_VGROUPS 83
|
||||||
#define TK_SCHEMALESS 84
|
#define TK_SINGLE_STABLE 84
|
||||||
#define TK_WAL_LEVEL 85
|
#define TK_RETENTIONS 85
|
||||||
#define TK_WAL_FSYNC_PERIOD 86
|
#define TK_SCHEMALESS 86
|
||||||
#define TK_WAL_RETENTION_PERIOD 87
|
#define TK_WAL_LEVEL 87
|
||||||
#define TK_WAL_RETENTION_SIZE 88
|
#define TK_WAL_FSYNC_PERIOD 88
|
||||||
#define TK_WAL_ROLL_PERIOD 89
|
#define TK_WAL_RETENTION_PERIOD 89
|
||||||
#define TK_WAL_SEGMENT_SIZE 90
|
#define TK_WAL_RETENTION_SIZE 90
|
||||||
#define TK_STT_TRIGGER 91
|
#define TK_WAL_ROLL_PERIOD 91
|
||||||
#define TK_TABLE_PREFIX 92
|
#define TK_WAL_SEGMENT_SIZE 92
|
||||||
#define TK_TABLE_SUFFIX 93
|
#define TK_STT_TRIGGER 93
|
||||||
#define TK_NK_COLON 94
|
#define TK_TABLE_PREFIX 94
|
||||||
#define TK_MAX_SPEED 95
|
#define TK_TABLE_SUFFIX 95
|
||||||
#define TK_START 96
|
#define TK_NK_COLON 96
|
||||||
#define TK_TIMESTAMP 97
|
#define TK_MAX_SPEED 97
|
||||||
#define TK_END 98
|
#define TK_START 98
|
||||||
#define TK_TABLE 99
|
#define TK_TIMESTAMP 99
|
||||||
#define TK_NK_LP 100
|
#define TK_END 100
|
||||||
#define TK_NK_RP 101
|
#define TK_TABLE 101
|
||||||
#define TK_STABLE 102
|
#define TK_NK_LP 102
|
||||||
#define TK_ADD 103
|
#define TK_NK_RP 103
|
||||||
#define TK_COLUMN 104
|
#define TK_STABLE 104
|
||||||
#define TK_MODIFY 105
|
#define TK_ADD 105
|
||||||
#define TK_RENAME 106
|
#define TK_COLUMN 106
|
||||||
#define TK_TAG 107
|
#define TK_MODIFY 107
|
||||||
#define TK_SET 108
|
#define TK_RENAME 108
|
||||||
#define TK_NK_EQ 109
|
#define TK_TAG 109
|
||||||
#define TK_USING 110
|
#define TK_SET 110
|
||||||
#define TK_TAGS 111
|
#define TK_NK_EQ 111
|
||||||
#define TK_BOOL 112
|
#define TK_USING 112
|
||||||
#define TK_TINYINT 113
|
#define TK_TAGS 113
|
||||||
#define TK_SMALLINT 114
|
#define TK_BOOL 114
|
||||||
#define TK_INT 115
|
#define TK_TINYINT 115
|
||||||
#define TK_INTEGER 116
|
#define TK_SMALLINT 116
|
||||||
#define TK_BIGINT 117
|
#define TK_INT 117
|
||||||
#define TK_FLOAT 118
|
#define TK_INTEGER 118
|
||||||
#define TK_DOUBLE 119
|
#define TK_BIGINT 119
|
||||||
#define TK_BINARY 120
|
#define TK_FLOAT 120
|
||||||
#define TK_NCHAR 121
|
#define TK_DOUBLE 121
|
||||||
#define TK_UNSIGNED 122
|
#define TK_BINARY 122
|
||||||
#define TK_JSON 123
|
#define TK_NCHAR 123
|
||||||
#define TK_VARCHAR 124
|
#define TK_UNSIGNED 124
|
||||||
#define TK_MEDIUMBLOB 125
|
#define TK_JSON 125
|
||||||
#define TK_BLOB 126
|
#define TK_VARCHAR 126
|
||||||
#define TK_VARBINARY 127
|
#define TK_MEDIUMBLOB 127
|
||||||
#define TK_DECIMAL 128
|
#define TK_BLOB 128
|
||||||
#define TK_COMMENT 129
|
#define TK_VARBINARY 129
|
||||||
#define TK_MAX_DELAY 130
|
#define TK_DECIMAL 130
|
||||||
#define TK_WATERMARK 131
|
#define TK_COMMENT 131
|
||||||
#define TK_ROLLUP 132
|
#define TK_MAX_DELAY 132
|
||||||
#define TK_TTL 133
|
#define TK_WATERMARK 133
|
||||||
#define TK_SMA 134
|
#define TK_ROLLUP 134
|
||||||
#define TK_DELETE_MARK 135
|
#define TK_TTL 135
|
||||||
#define TK_FIRST 136
|
#define TK_SMA 136
|
||||||
#define TK_LAST 137
|
#define TK_DELETE_MARK 137
|
||||||
#define TK_SHOW 138
|
#define TK_FIRST 138
|
||||||
#define TK_PRIVILEGES 139
|
#define TK_LAST 139
|
||||||
#define TK_DATABASES 140
|
#define TK_SHOW 140
|
||||||
#define TK_TABLES 141
|
#define TK_PRIVILEGES 141
|
||||||
#define TK_STABLES 142
|
#define TK_DATABASES 142
|
||||||
#define TK_MNODES 143
|
#define TK_TABLES 143
|
||||||
#define TK_QNODES 144
|
#define TK_STABLES 144
|
||||||
#define TK_FUNCTIONS 145
|
#define TK_MNODES 145
|
||||||
#define TK_INDEXES 146
|
#define TK_QNODES 146
|
||||||
#define TK_ACCOUNTS 147
|
#define TK_FUNCTIONS 147
|
||||||
#define TK_APPS 148
|
#define TK_INDEXES 148
|
||||||
#define TK_CONNECTIONS 149
|
#define TK_ACCOUNTS 149
|
||||||
#define TK_LICENCES 150
|
#define TK_APPS 150
|
||||||
#define TK_GRANTS 151
|
#define TK_CONNECTIONS 151
|
||||||
#define TK_QUERIES 152
|
#define TK_LICENCES 152
|
||||||
#define TK_SCORES 153
|
#define TK_GRANTS 153
|
||||||
#define TK_TOPICS 154
|
#define TK_QUERIES 154
|
||||||
#define TK_VARIABLES 155
|
#define TK_SCORES 155
|
||||||
#define TK_CLUSTER 156
|
#define TK_TOPICS 156
|
||||||
#define TK_BNODES 157
|
#define TK_VARIABLES 157
|
||||||
#define TK_SNODES 158
|
#define TK_CLUSTER 158
|
||||||
#define TK_TRANSACTIONS 159
|
#define TK_BNODES 159
|
||||||
#define TK_DISTRIBUTED 160
|
#define TK_SNODES 160
|
||||||
#define TK_CONSUMERS 161
|
#define TK_TRANSACTIONS 161
|
||||||
#define TK_SUBSCRIPTIONS 162
|
#define TK_DISTRIBUTED 162
|
||||||
#define TK_VNODES 163
|
#define TK_CONSUMERS 163
|
||||||
#define TK_ALIVE 164
|
#define TK_SUBSCRIPTIONS 164
|
||||||
#define TK_LIKE 165
|
#define TK_VNODES 165
|
||||||
#define TK_TBNAME 166
|
#define TK_ALIVE 166
|
||||||
#define TK_QTAGS 167
|
#define TK_LIKE 167
|
||||||
#define TK_AS 168
|
#define TK_TBNAME 168
|
||||||
#define TK_INDEX 169
|
#define TK_QTAGS 169
|
||||||
#define TK_FUNCTION 170
|
#define TK_AS 170
|
||||||
#define TK_INTERVAL 171
|
#define TK_INDEX 171
|
||||||
#define TK_COUNT 172
|
#define TK_FUNCTION 172
|
||||||
#define TK_LAST_ROW 173
|
#define TK_INTERVAL 173
|
||||||
#define TK_TOPIC 174
|
#define TK_COUNT 174
|
||||||
#define TK_META 175
|
#define TK_LAST_ROW 175
|
||||||
#define TK_CONSUMER 176
|
#define TK_TOPIC 176
|
||||||
#define TK_GROUP 177
|
#define TK_META 177
|
||||||
#define TK_DESC 178
|
#define TK_CONSUMER 178
|
||||||
#define TK_DESCRIBE 179
|
#define TK_GROUP 179
|
||||||
#define TK_RESET 180
|
#define TK_DESC 180
|
||||||
#define TK_QUERY 181
|
#define TK_DESCRIBE 181
|
||||||
#define TK_CACHE 182
|
#define TK_RESET 182
|
||||||
#define TK_EXPLAIN 183
|
#define TK_QUERY 183
|
||||||
#define TK_ANALYZE 184
|
#define TK_CACHE 184
|
||||||
#define TK_VERBOSE 185
|
#define TK_EXPLAIN 185
|
||||||
#define TK_NK_BOOL 186
|
#define TK_ANALYZE 186
|
||||||
#define TK_RATIO 187
|
#define TK_VERBOSE 187
|
||||||
#define TK_NK_FLOAT 188
|
#define TK_NK_BOOL 188
|
||||||
#define TK_OUTPUTTYPE 189
|
#define TK_RATIO 189
|
||||||
#define TK_AGGREGATE 190
|
#define TK_NK_FLOAT 190
|
||||||
#define TK_BUFSIZE 191
|
#define TK_OUTPUTTYPE 191
|
||||||
#define TK_LANGUAGE 192
|
#define TK_AGGREGATE 192
|
||||||
#define TK_REPLACE 193
|
#define TK_BUFSIZE 193
|
||||||
#define TK_STREAM 194
|
#define TK_LANGUAGE 194
|
||||||
#define TK_INTO 195
|
#define TK_REPLACE 195
|
||||||
#define TK_TRIGGER 196
|
#define TK_STREAM 196
|
||||||
#define TK_AT_ONCE 197
|
#define TK_INTO 197
|
||||||
#define TK_WINDOW_CLOSE 198
|
#define TK_PAUSE 198
|
||||||
#define TK_IGNORE 199
|
#define TK_RESUME 199
|
||||||
#define TK_EXPIRED 200
|
#define TK_TRIGGER 200
|
||||||
#define TK_FILL_HISTORY 201
|
#define TK_AT_ONCE 201
|
||||||
#define TK_UPDATE 202
|
#define TK_WINDOW_CLOSE 202
|
||||||
#define TK_SUBTABLE 203
|
#define TK_IGNORE 203
|
||||||
#define TK_KILL 204
|
#define TK_EXPIRED 204
|
||||||
#define TK_CONNECTION 205
|
#define TK_FILL_HISTORY 205
|
||||||
#define TK_TRANSACTION 206
|
#define TK_UPDATE 206
|
||||||
#define TK_BALANCE 207
|
#define TK_SUBTABLE 207
|
||||||
#define TK_VGROUP 208
|
#define TK_UNTREATED 208
|
||||||
#define TK_LEADER 209
|
#define TK_KILL 209
|
||||||
#define TK_MERGE 210
|
#define TK_CONNECTION 210
|
||||||
#define TK_REDISTRIBUTE 211
|
#define TK_TRANSACTION 211
|
||||||
#define TK_SPLIT 212
|
#define TK_BALANCE 212
|
||||||
#define TK_DELETE 213
|
#define TK_VGROUP 213
|
||||||
#define TK_INSERT 214
|
#define TK_LEADER 214
|
||||||
#define TK_NULL 215
|
#define TK_MERGE 215
|
||||||
#define TK_NK_QUESTION 216
|
#define TK_REDISTRIBUTE 216
|
||||||
#define TK_NK_ARROW 217
|
#define TK_SPLIT 217
|
||||||
#define TK_ROWTS 218
|
#define TK_DELETE 218
|
||||||
#define TK_QSTART 219
|
#define TK_INSERT 219
|
||||||
#define TK_QEND 220
|
#define TK_NULL 220
|
||||||
#define TK_QDURATION 221
|
#define TK_NK_QUESTION 221
|
||||||
#define TK_WSTART 222
|
#define TK_NK_ARROW 222
|
||||||
#define TK_WEND 223
|
#define TK_ROWTS 223
|
||||||
#define TK_WDURATION 224
|
#define TK_QSTART 224
|
||||||
#define TK_IROWTS 225
|
#define TK_QEND 225
|
||||||
#define TK_ISFILLED 226
|
#define TK_QDURATION 226
|
||||||
#define TK_CAST 227
|
#define TK_WSTART 227
|
||||||
#define TK_NOW 228
|
#define TK_WEND 228
|
||||||
#define TK_TODAY 229
|
#define TK_WDURATION 229
|
||||||
#define TK_TIMEZONE 230
|
#define TK_IROWTS 230
|
||||||
#define TK_CLIENT_VERSION 231
|
#define TK_ISFILLED 231
|
||||||
#define TK_SERVER_VERSION 232
|
#define TK_CAST 232
|
||||||
#define TK_SERVER_STATUS 233
|
#define TK_NOW 233
|
||||||
#define TK_CURRENT_USER 234
|
#define TK_TODAY 234
|
||||||
#define TK_CASE 235
|
#define TK_TIMEZONE 235
|
||||||
#define TK_WHEN 236
|
#define TK_CLIENT_VERSION 236
|
||||||
#define TK_THEN 237
|
#define TK_SERVER_VERSION 237
|
||||||
#define TK_ELSE 238
|
#define TK_SERVER_STATUS 238
|
||||||
#define TK_BETWEEN 239
|
#define TK_CURRENT_USER 239
|
||||||
#define TK_IS 240
|
#define TK_CASE 240
|
||||||
#define TK_NK_LT 241
|
#define TK_WHEN 241
|
||||||
#define TK_NK_GT 242
|
#define TK_THEN 242
|
||||||
#define TK_NK_LE 243
|
#define TK_ELSE 243
|
||||||
#define TK_NK_GE 244
|
#define TK_BETWEEN 244
|
||||||
#define TK_NK_NE 245
|
#define TK_IS 245
|
||||||
#define TK_MATCH 246
|
#define TK_NK_LT 246
|
||||||
#define TK_NMATCH 247
|
#define TK_NK_GT 247
|
||||||
#define TK_CONTAINS 248
|
#define TK_NK_LE 248
|
||||||
#define TK_IN 249
|
#define TK_NK_GE 249
|
||||||
#define TK_JOIN 250
|
#define TK_NK_NE 250
|
||||||
#define TK_INNER 251
|
#define TK_MATCH 251
|
||||||
#define TK_SELECT 252
|
#define TK_NMATCH 252
|
||||||
#define TK_DISTINCT 253
|
#define TK_CONTAINS 253
|
||||||
#define TK_WHERE 254
|
#define TK_IN 254
|
||||||
#define TK_PARTITION 255
|
#define TK_JOIN 255
|
||||||
#define TK_BY 256
|
#define TK_INNER 256
|
||||||
#define TK_SESSION 257
|
#define TK_SELECT 257
|
||||||
#define TK_STATE_WINDOW 258
|
#define TK_DISTINCT 258
|
||||||
#define TK_EVENT_WINDOW 259
|
#define TK_WHERE 259
|
||||||
#define TK_SLIDING 260
|
#define TK_PARTITION 260
|
||||||
#define TK_FILL 261
|
#define TK_BY 261
|
||||||
#define TK_VALUE 262
|
#define TK_SESSION 262
|
||||||
#define TK_VALUE_F 263
|
#define TK_STATE_WINDOW 263
|
||||||
#define TK_NONE 264
|
#define TK_EVENT_WINDOW 264
|
||||||
#define TK_PREV 265
|
#define TK_SLIDING 265
|
||||||
#define TK_NULL_F 266
|
#define TK_FILL 266
|
||||||
#define TK_LINEAR 267
|
#define TK_VALUE 267
|
||||||
#define TK_NEXT 268
|
#define TK_VALUE_F 268
|
||||||
#define TK_HAVING 269
|
#define TK_NONE 269
|
||||||
#define TK_RANGE 270
|
#define TK_PREV 270
|
||||||
#define TK_EVERY 271
|
#define TK_NULL_F 271
|
||||||
#define TK_ORDER 272
|
#define TK_LINEAR 272
|
||||||
#define TK_SLIMIT 273
|
#define TK_NEXT 273
|
||||||
#define TK_SOFFSET 274
|
#define TK_HAVING 274
|
||||||
#define TK_LIMIT 275
|
#define TK_RANGE 275
|
||||||
#define TK_OFFSET 276
|
#define TK_EVERY 276
|
||||||
#define TK_ASC 277
|
#define TK_ORDER 277
|
||||||
#define TK_NULLS 278
|
#define TK_SLIMIT 278
|
||||||
#define TK_ABORT 279
|
#define TK_SOFFSET 279
|
||||||
#define TK_AFTER 280
|
#define TK_LIMIT 280
|
||||||
#define TK_ATTACH 281
|
#define TK_OFFSET 281
|
||||||
#define TK_BEFORE 282
|
#define TK_ASC 282
|
||||||
#define TK_BEGIN 283
|
#define TK_NULLS 283
|
||||||
#define TK_BITAND 284
|
#define TK_ABORT 284
|
||||||
#define TK_BITNOT 285
|
#define TK_AFTER 285
|
||||||
#define TK_BITOR 286
|
#define TK_ATTACH 286
|
||||||
#define TK_BLOCKS 287
|
#define TK_BEFORE 287
|
||||||
#define TK_CHANGE 288
|
#define TK_BEGIN 288
|
||||||
#define TK_COMMA 289
|
#define TK_BITAND 289
|
||||||
#define TK_CONCAT 290
|
#define TK_BITNOT 290
|
||||||
#define TK_CONFLICT 291
|
#define TK_BITOR 291
|
||||||
#define TK_COPY 292
|
#define TK_BLOCKS 292
|
||||||
#define TK_DEFERRED 293
|
#define TK_CHANGE 293
|
||||||
#define TK_DELIMITERS 294
|
#define TK_COMMA 294
|
||||||
#define TK_DETACH 295
|
#define TK_CONCAT 295
|
||||||
#define TK_DIVIDE 296
|
#define TK_CONFLICT 296
|
||||||
#define TK_DOT 297
|
#define TK_COPY 297
|
||||||
#define TK_EACH 298
|
#define TK_DEFERRED 298
|
||||||
#define TK_FAIL 299
|
#define TK_DELIMITERS 299
|
||||||
#define TK_FILE 300
|
#define TK_DETACH 300
|
||||||
#define TK_FOR 301
|
#define TK_DIVIDE 301
|
||||||
#define TK_GLOB 302
|
#define TK_DOT 302
|
||||||
#define TK_ID 303
|
#define TK_EACH 303
|
||||||
#define TK_IMMEDIATE 304
|
#define TK_FAIL 304
|
||||||
#define TK_IMPORT 305
|
#define TK_FILE 305
|
||||||
#define TK_INITIALLY 306
|
#define TK_FOR 306
|
||||||
#define TK_INSTEAD 307
|
#define TK_GLOB 307
|
||||||
#define TK_ISNULL 308
|
#define TK_ID 308
|
||||||
#define TK_KEY 309
|
#define TK_IMMEDIATE 309
|
||||||
#define TK_MODULES 310
|
#define TK_IMPORT 310
|
||||||
#define TK_NK_BITNOT 311
|
#define TK_INITIALLY 311
|
||||||
#define TK_NK_SEMI 312
|
#define TK_INSTEAD 312
|
||||||
#define TK_NOTNULL 313
|
#define TK_ISNULL 313
|
||||||
#define TK_OF 314
|
#define TK_KEY 314
|
||||||
#define TK_PLUS 315
|
#define TK_MODULES 315
|
||||||
#define TK_PRIVILEGE 316
|
#define TK_NK_BITNOT 316
|
||||||
#define TK_RAISE 317
|
#define TK_NK_SEMI 317
|
||||||
#define TK_RESTRICT 318
|
#define TK_NOTNULL 318
|
||||||
#define TK_ROW 319
|
#define TK_OF 319
|
||||||
#define TK_SEMI 320
|
#define TK_PLUS 320
|
||||||
#define TK_STAR 321
|
#define TK_PRIVILEGE 321
|
||||||
#define TK_STATEMENT 322
|
#define TK_RAISE 322
|
||||||
#define TK_STRICT 323
|
#define TK_RESTRICT 323
|
||||||
#define TK_STRING 324
|
#define TK_ROW 324
|
||||||
#define TK_TIMES 325
|
#define TK_SEMI 325
|
||||||
#define TK_VALUES 326
|
#define TK_STAR 326
|
||||||
#define TK_VARIABLE 327
|
#define TK_STATEMENT 327
|
||||||
#define TK_VIEW 328
|
#define TK_STRICT 328
|
||||||
#define TK_WAL 329
|
#define TK_STRING 329
|
||||||
|
#define TK_TIMES 330
|
||||||
|
#define TK_VALUES 331
|
||||||
|
#define TK_VARIABLE 332
|
||||||
|
#define TK_VIEW 333
|
||||||
|
#define TK_WAL 334
|
||||||
|
|
||||||
|
|
||||||
#define TK_NK_SPACE 600
|
#define TK_NK_SPACE 600
|
||||||
#define TK_NK_COMMENT 601
|
#define TK_NK_COMMENT 601
|
||||||
|
|
|
@ -82,6 +82,7 @@ typedef struct SCatalogReq {
|
||||||
SArray* pUser; // element is SUserAuthInfo
|
SArray* pUser; // element is SUserAuthInfo
|
||||||
SArray* pTableIndex; // element is SNAME
|
SArray* pTableIndex; // element is SNAME
|
||||||
SArray* pTableCfg; // element is SNAME
|
SArray* pTableCfg; // element is SNAME
|
||||||
|
SArray* pTableTag; // element is SNAME
|
||||||
bool qNodeRequired; // valid qnode
|
bool qNodeRequired; // valid qnode
|
||||||
bool dNodeRequired; // valid dnode
|
bool dNodeRequired; // valid dnode
|
||||||
bool svrVerRequired;
|
bool svrVerRequired;
|
||||||
|
@ -105,6 +106,7 @@ typedef struct SMetaData {
|
||||||
SArray* pUser; // pRes = SUserAuthRes*
|
SArray* pUser; // pRes = SUserAuthRes*
|
||||||
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
|
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
|
||||||
SArray* pTableCfg; // pRes = STableCfg*
|
SArray* pTableCfg; // pRes = STableCfg*
|
||||||
|
SArray* pTableTag; // pRes = SArray<STagVal>*
|
||||||
SArray* pDnodeList; // pRes = SArray<SEpSet>*
|
SArray* pDnodeList; // pRes = SArray<SEpSet>*
|
||||||
SMetaRes* pSvrVer; // pRes = char*
|
SMetaRes* pSvrVer; // pRes = char*
|
||||||
} SMetaData;
|
} SMetaData;
|
||||||
|
@ -122,8 +124,8 @@ typedef struct SSTableVersion {
|
||||||
char stbName[TSDB_TABLE_NAME_LEN];
|
char stbName[TSDB_TABLE_NAME_LEN];
|
||||||
uint64_t dbId;
|
uint64_t dbId;
|
||||||
uint64_t suid;
|
uint64_t suid;
|
||||||
int16_t sversion;
|
int32_t sversion;
|
||||||
int16_t tversion;
|
int32_t tversion;
|
||||||
int32_t smaVer;
|
int32_t smaVer;
|
||||||
} SSTableVersion;
|
} SSTableVersion;
|
||||||
|
|
||||||
|
@ -312,6 +314,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
|
||||||
|
|
||||||
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
|
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
|
||||||
|
|
||||||
|
int32_t catalogGetTableTag(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
|
||||||
|
|
||||||
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg);
|
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg);
|
||||||
|
|
||||||
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp* pRsp);
|
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp* pRsp);
|
||||||
|
|
|
@ -190,9 +190,9 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
|
||||||
|
|
||||||
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
|
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
|
||||||
|
|
||||||
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
|
void verifyOffset(void *pWalReader, STqOffsetVal* pOffset);
|
||||||
|
|
||||||
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
|
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
|
||||||
|
|
||||||
void qStreamSetOpen(qTaskInfo_t tinfo);
|
void qStreamSetOpen(qTaskInfo_t tinfo);
|
||||||
|
|
||||||
|
@ -208,8 +208,6 @@ void* qExtractReaderFromStreamScanner(void* scanner);
|
||||||
|
|
||||||
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
||||||
|
|
||||||
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
|
|
||||||
|
|
||||||
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
|
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
|
||||||
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
|
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
|
||||||
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
|
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
|
||||||
|
|
|
@ -350,6 +350,11 @@ typedef struct SDropComponentNodeStmt {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
} SDropComponentNodeStmt;
|
} SDropComponentNodeStmt;
|
||||||
|
|
||||||
|
typedef struct SRestoreComponentNodeStmt {
|
||||||
|
ENodeType type;
|
||||||
|
int32_t dnodeId;
|
||||||
|
} SRestoreComponentNodeStmt;
|
||||||
|
|
||||||
typedef struct SCreateTopicStmt {
|
typedef struct SCreateTopicStmt {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
char topicName[TSDB_TABLE_NAME_LEN];
|
char topicName[TSDB_TABLE_NAME_LEN];
|
||||||
|
@ -436,6 +441,19 @@ typedef struct SDropStreamStmt {
|
||||||
bool ignoreNotExists;
|
bool ignoreNotExists;
|
||||||
} SDropStreamStmt;
|
} SDropStreamStmt;
|
||||||
|
|
||||||
|
typedef struct SPauseStreamStmt {
|
||||||
|
ENodeType type;
|
||||||
|
char streamName[TSDB_TABLE_NAME_LEN];
|
||||||
|
bool ignoreNotExists;
|
||||||
|
} SPauseStreamStmt;
|
||||||
|
|
||||||
|
typedef struct SResumeStreamStmt {
|
||||||
|
ENodeType type;
|
||||||
|
char streamName[TSDB_TABLE_NAME_LEN];
|
||||||
|
bool ignoreNotExists;
|
||||||
|
bool ignoreUntreated;
|
||||||
|
} SResumeStreamStmt;
|
||||||
|
|
||||||
typedef struct SCreateFunctionStmt {
|
typedef struct SCreateFunctionStmt {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
bool orReplace;
|
bool orReplace;
|
||||||
|
|
|
@ -211,6 +211,12 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_SHOW_DB_ALIVE_STMT,
|
QUERY_NODE_SHOW_DB_ALIVE_STMT,
|
||||||
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
|
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
|
||||||
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
|
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
|
||||||
|
QUERY_NODE_RESTORE_DNODE_STMT,
|
||||||
|
QUERY_NODE_RESTORE_QNODE_STMT,
|
||||||
|
QUERY_NODE_RESTORE_MNODE_STMT,
|
||||||
|
QUERY_NODE_RESTORE_VNODE_STMT,
|
||||||
|
QUERY_NODE_PAUSE_STREAM_STMT,
|
||||||
|
QUERY_NODE_RESUME_STREAM_STMT,
|
||||||
|
|
||||||
// logic plan node
|
// logic plan node
|
||||||
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
||||||
|
|
|
@ -379,6 +379,8 @@ typedef struct SVnodeModifyOpStmt {
|
||||||
SName usingTableName;
|
SName usingTableName;
|
||||||
const char* pBoundCols;
|
const char* pBoundCols;
|
||||||
struct STableMeta* pTableMeta;
|
struct STableMeta* pTableMeta;
|
||||||
|
SNode* pTagCond;
|
||||||
|
SArray* pTableTag;
|
||||||
SHashObj* pVgroupsHashObj;
|
SHashObj* pVgroupsHashObj;
|
||||||
SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
|
SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
|
||||||
SHashObj* pSubTableHashObj;
|
SHashObj* pSubTableHashObj;
|
||||||
|
|
|
@ -111,8 +111,8 @@ typedef struct STableMeta {
|
||||||
|
|
||||||
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
|
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
|
||||||
// info
|
// info
|
||||||
int16_t sversion;
|
int32_t sversion;
|
||||||
int16_t tversion;
|
int32_t tversion;
|
||||||
STableComInfo tableInfo;
|
STableComInfo tableInfo;
|
||||||
SSchema schema[];
|
SSchema schema[];
|
||||||
} STableMeta;
|
} STableMeta;
|
||||||
|
|
|
@ -14,19 +14,39 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "tdatablock.h"
|
#include "tdatablock.h"
|
||||||
|
|
||||||
|
#include "rocksdb/c.h"
|
||||||
#include "tdbInt.h"
|
#include "tdbInt.h"
|
||||||
|
#include "tsimplehash.h"
|
||||||
|
#include "tstreamFileState.h"
|
||||||
|
|
||||||
|
#ifndef _STREAM_STATE_H_
|
||||||
|
#define _STREAM_STATE_H_
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef _STREAM_STATE_H_
|
// void* streamBackendInit(const char* path);
|
||||||
#define _STREAM_STATE_H_
|
// void streamBackendCleanup(void* arg);
|
||||||
|
// SListNode* streamBackendAddCompare(void* backend, void* arg);
|
||||||
|
// void streamBackendDelCompare(void* backend, void* arg);
|
||||||
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
|
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
|
||||||
|
|
||||||
typedef struct STdbState {
|
typedef struct STdbState {
|
||||||
struct SStreamTask* pOwner;
|
rocksdb_t* rocksdb;
|
||||||
|
rocksdb_column_family_handle_t** pHandle;
|
||||||
|
rocksdb_writeoptions_t* writeOpts;
|
||||||
|
rocksdb_readoptions_t* readOpts;
|
||||||
|
rocksdb_options_t** cfOpts;
|
||||||
|
rocksdb_options_t* dbOpt;
|
||||||
|
struct SStreamTask* pOwner;
|
||||||
|
void* param;
|
||||||
|
void* env;
|
||||||
|
SListNode* pComparNode;
|
||||||
|
void* pBackendHandle;
|
||||||
|
char idstr[64];
|
||||||
|
void* compactFactory;
|
||||||
|
|
||||||
TDB* db;
|
TDB* db;
|
||||||
TTB* pStateDb;
|
TTB* pStateDb;
|
||||||
|
@ -40,19 +60,28 @@ typedef struct STdbState {
|
||||||
|
|
||||||
// incremental state storage
|
// incremental state storage
|
||||||
typedef struct {
|
typedef struct {
|
||||||
STdbState* pTdbState;
|
STdbState* pTdbState;
|
||||||
int32_t number;
|
SStreamFileState* pFileState;
|
||||||
int64_t checkPointId;
|
int32_t number;
|
||||||
|
SSHashObj* parNameMap;
|
||||||
|
int64_t checkPointId;
|
||||||
|
int32_t taskId;
|
||||||
|
int64_t streamId;
|
||||||
} SStreamState;
|
} SStreamState;
|
||||||
|
|
||||||
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
|
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||||
void streamStateClose(SStreamState* pState);
|
void streamStateClose(SStreamState* pState, bool remove);
|
||||||
int32_t streamStateBegin(SStreamState* pState);
|
int32_t streamStateBegin(SStreamState* pState);
|
||||||
int32_t streamStateCommit(SStreamState* pState);
|
int32_t streamStateCommit(SStreamState* pState);
|
||||||
int32_t streamStateAbort(SStreamState* pState);
|
void streamStateDestroy(SStreamState* pState, bool remove);
|
||||||
void streamStateDestroy(SStreamState* pState);
|
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
rocksdb_iterator_t* iter;
|
||||||
|
rocksdb_snapshot_t* snapshot;
|
||||||
|
rocksdb_readoptions_t* readOpt;
|
||||||
|
rocksdb_t* db;
|
||||||
|
|
||||||
TBC* pCur;
|
TBC* pCur;
|
||||||
int64_t number;
|
int64_t number;
|
||||||
} SStreamStateCur;
|
} SStreamStateCur;
|
||||||
|
@ -63,9 +92,13 @@ int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
|
||||||
|
|
||||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
|
bool streamStateCheck(SStreamState* pState, const SWinKey* key);
|
||||||
|
int32_t streamStateGetByPos(SStreamState* pState, void* pos, void** pVal);
|
||||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
|
||||||
int32_t streamStateClear(SStreamState* pState);
|
int32_t streamStateClear(SStreamState* pState);
|
||||||
void streamStateSetNumber(SStreamState* pState, int32_t number);
|
void streamStateSetNumber(SStreamState* pState, int32_t number);
|
||||||
|
int32_t streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
|
||||||
|
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
|
||||||
|
|
||||||
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen);
|
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen);
|
||||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
|
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
|
||||||
|
@ -89,7 +122,6 @@ int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void*
|
||||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
|
||||||
void streamFreeVal(void* val);
|
void streamFreeVal(void* val);
|
||||||
|
|
||||||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
|
|
||||||
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
|
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
|
||||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||||
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||||
|
@ -109,9 +141,33 @@ int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
|
||||||
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
|
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
|
||||||
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
|
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
|
||||||
|
|
||||||
int32_t streamStatePutParTag(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen);
|
/***compare func **/
|
||||||
int32_t streamStateGetParTag(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen);
|
|
||||||
|
|
||||||
|
typedef struct SStateChekpoint {
|
||||||
|
char* taskName;
|
||||||
|
int64_t checkpointId;
|
||||||
|
} SStateChekpoint;
|
||||||
|
// todo refactor
|
||||||
|
typedef struct SStateKey {
|
||||||
|
SWinKey key;
|
||||||
|
int64_t opNum;
|
||||||
|
} SStateKey;
|
||||||
|
|
||||||
|
typedef struct SStateSessionKey {
|
||||||
|
SSessionKey key;
|
||||||
|
int64_t opNum;
|
||||||
|
} SStateSessionKey;
|
||||||
|
|
||||||
|
typedef struct SStreamValue {
|
||||||
|
int64_t unixTimestamp;
|
||||||
|
int32_t len;
|
||||||
|
char* data;
|
||||||
|
} SStreamValue;
|
||||||
|
|
||||||
|
int sessionRangeKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
|
||||||
|
int sessionWinKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
|
||||||
|
int stateSessionKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
|
||||||
|
int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
|
||||||
#if 0
|
#if 0
|
||||||
char* streamStateSessionDump(SStreamState* pState);
|
char* streamStateSessionDump(SStreamState* pState);
|
||||||
char* streamStateIntervalDump(SStreamState* pState);
|
char* streamStateIntervalDump(SStreamState* pState);
|
||||||
|
|
|
@ -14,15 +14,12 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "executor.h"
|
|
||||||
#include "query.h"
|
|
||||||
#include "streamState.h"
|
#include "streamState.h"
|
||||||
#include "tdatablock.h"
|
#include "tdatablock.h"
|
||||||
#include "tdbInt.h"
|
#include "tdbInt.h"
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
#include "tmsgcb.h"
|
#include "tmsgcb.h"
|
||||||
#include "tqueue.h"
|
#include "tqueue.h"
|
||||||
#include "trpc.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -39,6 +36,7 @@ enum {
|
||||||
STREAM_STATUS__INIT,
|
STREAM_STATUS__INIT,
|
||||||
STREAM_STATUS__FAILED,
|
STREAM_STATUS__FAILED,
|
||||||
STREAM_STATUS__RECOVER,
|
STREAM_STATUS__RECOVER,
|
||||||
|
STREAM_STATUS__PAUSE,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -50,7 +48,7 @@ enum {
|
||||||
TASK_STATUS__RECOVER_PREPARE,
|
TASK_STATUS__RECOVER_PREPARE,
|
||||||
TASK_STATUS__RECOVER1,
|
TASK_STATUS__RECOVER1,
|
||||||
TASK_STATUS__RECOVER2,
|
TASK_STATUS__RECOVER2,
|
||||||
TASK_STATUS__RESTORE, // only available for source task to replay WAL from the checkpoint
|
TASK_STATUS__PAUSE,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -192,7 +190,7 @@ typedef struct {
|
||||||
int32_t streamInit();
|
int32_t streamInit();
|
||||||
void streamCleanUp();
|
void streamCleanUp();
|
||||||
|
|
||||||
SStreamQueue* streamQueueOpen();
|
SStreamQueue* streamQueueOpen(int64_t cap);
|
||||||
void streamQueueClose(SStreamQueue* queue);
|
void streamQueueClose(SStreamQueue* queue);
|
||||||
|
|
||||||
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
|
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
|
||||||
|
@ -206,14 +204,10 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
|
||||||
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
|
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
|
|
||||||
return queue->qItem;
|
|
||||||
}
|
|
||||||
|
|
||||||
void* streamQueueNextItem(SStreamQueue* queue);
|
void* streamQueueNextItem(SStreamQueue* queue);
|
||||||
|
|
||||||
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
|
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
|
||||||
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
|
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
|
||||||
|
|
||||||
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
|
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
|
||||||
|
|
||||||
|
@ -242,6 +236,7 @@ typedef struct {
|
||||||
void* vnode; // not available to encoder and decoder
|
void* vnode; // not available to encoder and decoder
|
||||||
FTbSink* tbSinkFunc;
|
FTbSink* tbSinkFunc;
|
||||||
STSchema* pTSchema;
|
STSchema* pTSchema;
|
||||||
|
SSHashObj* pTblInfo;
|
||||||
} STaskSinkTb;
|
} STaskSinkTb;
|
||||||
|
|
||||||
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
||||||
|
@ -272,12 +267,14 @@ typedef struct SStreamId {
|
||||||
|
|
||||||
typedef struct SCheckpointInfo {
|
typedef struct SCheckpointInfo {
|
||||||
int64_t id;
|
int64_t id;
|
||||||
int64_t version; // offset in WAL
|
int64_t version; // offset in WAL
|
||||||
|
int64_t currentVer; // current offset in WAL, not serialize it
|
||||||
} SCheckpointInfo;
|
} SCheckpointInfo;
|
||||||
|
|
||||||
typedef struct SStreamStatus {
|
typedef struct SStreamStatus {
|
||||||
int8_t taskStatus;
|
int8_t taskStatus;
|
||||||
int8_t schedStatus;
|
int8_t schedStatus;
|
||||||
|
int8_t keepTaskStatus;
|
||||||
} SStreamStatus;
|
} SStreamStatus;
|
||||||
|
|
||||||
struct SStreamTask {
|
struct SStreamTask {
|
||||||
|
@ -340,12 +337,16 @@ typedef struct SStreamMeta {
|
||||||
TTB* pTaskDb;
|
TTB* pTaskDb;
|
||||||
TTB* pCheckpointDb;
|
TTB* pCheckpointDb;
|
||||||
SHashObj* pTasks;
|
SHashObj* pTasks;
|
||||||
|
SArray* pTaskList; // SArray<task_id*>
|
||||||
void* ahandle;
|
void* ahandle;
|
||||||
TXN* txn;
|
TXN* txn;
|
||||||
FTaskExpand* expandFunc;
|
FTaskExpand* expandFunc;
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
SRWLatch lock;
|
SRWLatch lock;
|
||||||
int8_t walScan;
|
int32_t walScanCounter;
|
||||||
|
void* streamBackend;
|
||||||
|
int32_t streamBackendId;
|
||||||
|
int64_t streamBackendRid;
|
||||||
} SStreamMeta;
|
} SStreamMeta;
|
||||||
|
|
||||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||||
|
@ -537,14 +538,17 @@ void streamTaskInputFail(SStreamTask* pTask);
|
||||||
int32_t streamTryExec(SStreamTask* pTask);
|
int32_t streamTryExec(SStreamTask* pTask);
|
||||||
int32_t streamSchedExec(SStreamTask* pTask);
|
int32_t streamSchedExec(SStreamTask* pTask);
|
||||||
int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||||
|
bool streamTaskShouldStop(const SStreamStatus* pStatus);
|
||||||
|
bool streamTaskShouldPause(const SStreamStatus* pStatus);
|
||||||
|
|
||||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
|
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
|
||||||
|
|
||||||
// recover and fill history
|
// recover and fill history
|
||||||
int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version);
|
int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version);
|
||||||
int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version);
|
int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version);
|
||||||
int32_t streamProcessTaskCheckReq(SStreamTask* pTask, const SStreamTaskCheckReq* pReq);
|
int32_t streamTaskCheckStatus(SStreamTask* pTask);
|
||||||
int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp, int64_t version);
|
int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp, int64_t version);
|
||||||
|
|
||||||
// common
|
// common
|
||||||
int32_t streamSetParamForRecover(SStreamTask* pTask);
|
int32_t streamSetParamForRecover(SStreamTask* pTask);
|
||||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||||
|
@ -561,22 +565,24 @@ int32_t streamAggRecoverPrepare(SStreamTask* pTask);
|
||||||
// int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask);
|
// int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask);
|
||||||
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId);
|
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId);
|
||||||
|
|
||||||
|
void streamMetaInit();
|
||||||
|
void streamMetaCleanup();
|
||||||
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
|
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
|
||||||
void streamMetaClose(SStreamMeta* streamMeta);
|
void streamMetaClose(SStreamMeta* streamMeta);
|
||||||
|
|
||||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||||
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
|
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
|
||||||
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
|
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
|
||||||
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
|
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
|
||||||
|
|
||||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
|
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
|
||||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||||
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
||||||
|
|
||||||
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||||
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
|
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
|
||||||
|
|
||||||
// checkpoint
|
// checkpoint
|
||||||
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _STREAM_FILE_STATE_H_
|
||||||
|
#define _STREAM_FILE_STATE_H_
|
||||||
|
|
||||||
|
#include "os.h"
|
||||||
|
|
||||||
|
#include "tarray.h"
|
||||||
|
#include "tdef.h"
|
||||||
|
#include "tlist.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct SStreamFileState SStreamFileState;
|
||||||
|
typedef struct SRowBuffPos {
|
||||||
|
void* pRowBuff;
|
||||||
|
void* pKey;
|
||||||
|
bool beFlushed;
|
||||||
|
bool beUsed;
|
||||||
|
} SRowBuffPos;
|
||||||
|
|
||||||
|
typedef SList SStreamSnapshot;
|
||||||
|
|
||||||
|
typedef TSKEY (*GetTsFun)(void*);
|
||||||
|
|
||||||
|
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, GetTsFun fp, void* pFile,
|
||||||
|
TSKEY delMark);
|
||||||
|
void streamFileStateDestroy(SStreamFileState* pFileState);
|
||||||
|
void streamFileStateClear(SStreamFileState* pFileState);
|
||||||
|
bool needClearDiskBuff(SStreamFileState* pFileState);
|
||||||
|
|
||||||
|
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
|
||||||
|
int32_t deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
|
||||||
|
int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** pVal);
|
||||||
|
void releaseRowBuffPos(SRowBuffPos* pBuff);
|
||||||
|
bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen);
|
||||||
|
|
||||||
|
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
|
||||||
|
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
|
||||||
|
int32_t recoverSnapshot(SStreamFileState* pFileState);
|
||||||
|
|
||||||
|
int32_t getSnapshotIdList(SStreamFileState* pFileState, SArray* list);
|
||||||
|
int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // _STREAM_FILE_STATE_H_
|
|
@ -40,9 +40,7 @@ typedef struct SUpdateInfo {
|
||||||
TSKEY minTS;
|
TSKEY minTS;
|
||||||
SScalableBf *pCloseWinSBF;
|
SScalableBf *pCloseWinSBF;
|
||||||
SHashObj *pMap;
|
SHashObj *pMap;
|
||||||
STimeWindow scanWindow;
|
uint64_t maxDataVersion;
|
||||||
uint64_t scanGroupId;
|
|
||||||
uint64_t maxVersion;
|
|
||||||
} SUpdateInfo;
|
} SUpdateInfo;
|
||||||
|
|
||||||
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
|
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
|
||||||
|
@ -50,8 +48,6 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
|
||||||
TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
|
TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
|
||||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
|
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
|
||||||
bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
|
bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
|
||||||
void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
|
|
||||||
bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
|
|
||||||
void updateInfoDestroy(SUpdateInfo *pInfo);
|
void updateInfoDestroy(SUpdateInfo *pInfo);
|
||||||
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
|
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
|
||||||
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
|
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
|
||||||
|
|
|
@ -133,6 +133,16 @@ int32_t tfsMkdirAt(STfs *pTfs, const char *rname, SDiskID diskId);
|
||||||
*/
|
*/
|
||||||
int32_t tfsMkdirRecurAt(STfs *pTfs, const char *rname, SDiskID diskId);
|
int32_t tfsMkdirRecurAt(STfs *pTfs, const char *rname, SDiskID diskId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief check directories exist in tfs.
|
||||||
|
*
|
||||||
|
* @param pTfs The fs object.
|
||||||
|
* @param rname The rel name of directory.
|
||||||
|
* @param diskId The disk ID.
|
||||||
|
* @return true for exist, false for not exist.
|
||||||
|
*/
|
||||||
|
bool tfsDirExistAt(STfs *pTfs, const char *rname, SDiskID diskId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Remove directory at all levels in tfs.
|
* @brief Remove directory at all levels in tfs.
|
||||||
*
|
*
|
||||||
|
|
|
@ -127,8 +127,8 @@ typedef struct SWal {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t refId;
|
int64_t refId;
|
||||||
int64_t refVer;
|
int64_t refVer;
|
||||||
// int64_t refFile;
|
// int64_t refFile;
|
||||||
SWal *pWal;
|
SWal *pWal;
|
||||||
} SWalRef;
|
} SWalRef;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -138,6 +138,8 @@ typedef struct {
|
||||||
int8_t enableRef;
|
int8_t enableRef;
|
||||||
} SWalFilterCond;
|
} SWalFilterCond;
|
||||||
|
|
||||||
|
typedef struct SWalReader SWalReader;
|
||||||
|
|
||||||
// todo hide this struct
|
// todo hide this struct
|
||||||
typedef struct SWalReader {
|
typedef struct SWalReader {
|
||||||
SWal *pWal;
|
SWal *pWal;
|
||||||
|
@ -147,8 +149,6 @@ typedef struct SWalReader {
|
||||||
int64_t curFileFirstVer;
|
int64_t curFileFirstVer;
|
||||||
int64_t curVersion;
|
int64_t curVersion;
|
||||||
int64_t capacity;
|
int64_t capacity;
|
||||||
// int8_t curInvalid;
|
|
||||||
// int8_t curStopped;
|
|
||||||
TdThreadMutex mutex;
|
TdThreadMutex mutex;
|
||||||
SWalFilterCond cond;
|
SWalFilterCond cond;
|
||||||
// TODO remove it
|
// TODO remove it
|
||||||
|
@ -195,10 +195,11 @@ SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond);
|
||||||
void walCloseReader(SWalReader *pRead);
|
void walCloseReader(SWalReader *pRead);
|
||||||
void walReadReset(SWalReader *pReader);
|
void walReadReset(SWalReader *pReader);
|
||||||
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
||||||
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver);
|
int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver);
|
||||||
int32_t walNextValidMsg(SWalReader *pRead);
|
int32_t walNextValidMsg(SWalReader *pRead);
|
||||||
int64_t walReaderGetCurrentVer(const SWalReader* pReader);
|
int64_t walReaderGetCurrentVer(const SWalReader *pReader);
|
||||||
void walReaderValidVersionRange(SWalReader* pReader, int64_t *sver, int64_t *ever);
|
int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
|
||||||
|
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
|
||||||
|
|
||||||
// only for tq usage
|
// only for tq usage
|
||||||
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);
|
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);
|
||||||
|
|
|
@ -22,19 +22,25 @@ extern "C" {
|
||||||
|
|
||||||
// If the error is in a third-party library, place this header file under the third-party library header file.
|
// If the error is in a third-party library, place this header file under the third-party library header file.
|
||||||
// When you want to use this feature, you should find or add the same function in the following sectio
|
// When you want to use this feature, you should find or add the same function in the following sectio
|
||||||
#if !defined(WINDOWS)
|
// #if !defined(WINDOWS)
|
||||||
|
|
||||||
#ifndef ALLOW_FORBID_FUNC
|
// #ifndef ALLOW_FORBID_FUNC
|
||||||
#define malloc MALLOC_FUNC_TAOS_FORBID
|
// #define malloc MALLOC_FUNC_TAOS_FORBID
|
||||||
#define calloc CALLOC_FUNC_TAOS_FORBID
|
// #define calloc CALLOC_FUNC_TAOS_FORBID
|
||||||
#define realloc REALLOC_FUNC_TAOS_FORBID
|
// #define realloc REALLOC_FUNC_TAOS_FORBID
|
||||||
#define free FREE_FUNC_TAOS_FORBID
|
// #define free FREE_FUNC_TAOS_FORBID
|
||||||
#ifdef strdup
|
// #ifdef strdup
|
||||||
#undef strdup
|
// #undef strdup
|
||||||
#define strdup STRDUP_FUNC_TAOS_FORBID
|
// #define strdup STRDUP_FUNC_TAOS_FORBID
|
||||||
#endif
|
// #endif
|
||||||
#endif // ifndef ALLOW_FORBID_FUNC
|
// #endif // ifndef ALLOW_FORBID_FUNC
|
||||||
#endif // if !defined(WINDOWS)
|
// #endif // if !defined(WINDOWS)
|
||||||
|
|
||||||
|
// // #define taosMemoryFree malloc
|
||||||
|
// #define taosMemoryMalloc malloc
|
||||||
|
// #define taosMemoryCalloc calloc
|
||||||
|
// #define taosMemoryRealloc realloc
|
||||||
|
// #define taosMemoryFree free
|
||||||
|
|
||||||
int32_t taosMemoryDbgInit();
|
int32_t taosMemoryDbgInit();
|
||||||
int32_t taosMemoryDbgInitRestore();
|
int32_t taosMemoryDbgInitRestore();
|
||||||
|
|
|
@ -147,7 +147,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_TSC_CONN_KILLED TAOS_DEF_ERROR_CODE(0, 0x0215)
|
#define TSDB_CODE_TSC_CONN_KILLED TAOS_DEF_ERROR_CODE(0, 0x0215)
|
||||||
#define TSDB_CODE_TSC_SQL_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x0216)
|
#define TSDB_CODE_TSC_SQL_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x0216)
|
||||||
#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217)
|
#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217)
|
||||||
#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218)
|
//#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218)
|
||||||
#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219)
|
#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219)
|
||||||
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A)
|
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A)
|
||||||
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B)
|
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B)
|
||||||
|
@ -262,6 +262,7 @@ int32_t* taosGetErrno();
|
||||||
// #define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) // 2.x
|
// #define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) // 2.x
|
||||||
#define TSDB_CODE_MND_INVALID_STB_OPTION TAOS_DEF_ERROR_CODE(0, 0x036E)
|
#define TSDB_CODE_MND_INVALID_STB_OPTION TAOS_DEF_ERROR_CODE(0, 0x036E)
|
||||||
#define TSDB_CODE_MND_INVALID_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F)
|
#define TSDB_CODE_MND_INVALID_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F)
|
||||||
|
#define TSDB_CODE_MND_FIELD_VALUE_OVERFLOW TAOS_DEF_ERROR_CODE(0, 0x0370)
|
||||||
|
|
||||||
|
|
||||||
// mnode-func
|
// mnode-func
|
||||||
|
@ -406,6 +407,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_SNODE_NOT_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0411)
|
#define TSDB_CODE_SNODE_NOT_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0411)
|
||||||
#define TSDB_CODE_MNODE_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0412) // internal
|
#define TSDB_CODE_MNODE_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0412) // internal
|
||||||
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
|
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
|
||||||
|
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
|
||||||
|
|
||||||
// vnode
|
// vnode
|
||||||
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
|
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
|
||||||
|
@ -442,6 +444,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531)
|
#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531)
|
||||||
#define TSDB_CODE_VND_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0532) // internal
|
#define TSDB_CODE_VND_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0532) // internal
|
||||||
#define TSDB_CODE_VND_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0533) // internal
|
#define TSDB_CODE_VND_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0533) // internal
|
||||||
|
#define TSDB_CODE_VND_DIR_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0534)
|
||||||
|
|
||||||
// tsdb
|
// tsdb
|
||||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||||
|
@ -736,28 +739,21 @@ int32_t* taosGetErrno();
|
||||||
//tsma
|
//tsma
|
||||||
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
||||||
#define TSDB_CODE_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x3101)
|
#define TSDB_CODE_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x3101)
|
||||||
#define TSDB_CODE_TSMA_NO_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x3102)
|
#define TSDB_CODE_TSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3102)
|
||||||
#define TSDB_CODE_TSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3103)
|
#define TSDB_CODE_TSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3103)
|
||||||
#define TSDB_CODE_TSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3104)
|
#define TSDB_CODE_TSMA_INVALID_PTR TAOS_DEF_ERROR_CODE(0, 0x3104)
|
||||||
#define TSDB_CODE_TSMA_INVALID_PTR TAOS_DEF_ERROR_CODE(0, 0x3105)
|
#define TSDB_CODE_TSMA_INVALID_PARA TAOS_DEF_ERROR_CODE(0, 0x3105)
|
||||||
#define TSDB_CODE_TSMA_INVALID_PARA TAOS_DEF_ERROR_CODE(0, 0x3106)
|
|
||||||
#define TSDB_CODE_TSMA_NO_INDEX_IN_CACHE TAOS_DEF_ERROR_CODE(0, 0x3107)
|
|
||||||
|
|
||||||
//rsma
|
//rsma
|
||||||
#define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150)
|
#define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150)
|
||||||
#define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151)
|
#define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151)
|
||||||
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
|
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
|
||||||
#define TSDB_CODE_RSMA_FS_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3153)
|
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3153)
|
||||||
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
|
#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3154)
|
||||||
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
|
#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3155)
|
||||||
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
|
#define TSDB_CODE_RSMA_FS_REF TAOS_DEF_ERROR_CODE(0, 0x3156)
|
||||||
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
|
#define TSDB_CODE_RSMA_FS_SYNC TAOS_DEF_ERROR_CODE(0, 0x3157)
|
||||||
#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
|
#define TSDB_CODE_RSMA_FS_UPDATE TAOS_DEF_ERROR_CODE(0, 0x3158)
|
||||||
#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3159)
|
|
||||||
#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3160)
|
|
||||||
#define TSDB_CODE_RSMA_FS_REF TAOS_DEF_ERROR_CODE(0, 0x3161)
|
|
||||||
#define TSDB_CODE_RSMA_FS_SYNC TAOS_DEF_ERROR_CODE(0, 0x3162)
|
|
||||||
#define TSDB_CODE_RSMA_FS_UPDATE TAOS_DEF_ERROR_CODE(0, 0x3163)
|
|
||||||
|
|
||||||
//index
|
//index
|
||||||
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
||||||
|
@ -771,11 +767,15 @@ int32_t* taosGetErrno();
|
||||||
|
|
||||||
// stream
|
// stream
|
||||||
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
|
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
|
||||||
|
#define TSDB_CODE_STREAM_BACKPRESSURE_OUT_OF_QUEUE TAOS_DEF_ERROR_CODE(0, 0x4101)
|
||||||
|
|
||||||
// TDLite
|
// TDLite
|
||||||
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)
|
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)
|
||||||
#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101)
|
#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101)
|
||||||
|
|
||||||
|
// UTIL
|
||||||
|
#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -267,6 +267,9 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_DNODE_CONFIG_LEN 128
|
#define TSDB_DNODE_CONFIG_LEN 128
|
||||||
#define TSDB_DNODE_VALUE_LEN 256
|
#define TSDB_DNODE_VALUE_LEN 256
|
||||||
|
|
||||||
|
#define TSDB_ACTIVE_KEY_LEN 109 // history 109:?
|
||||||
|
#define TSDB_CONN_ACTIVE_KEY_LEN 257 // history 257:?
|
||||||
|
|
||||||
#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE
|
#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE
|
||||||
|
|
||||||
#define TSDB_PAYLOAD_SIZE TSDB_DEFAULT_PKT_SIZE
|
#define TSDB_PAYLOAD_SIZE TSDB_DEFAULT_PKT_SIZE
|
||||||
|
@ -365,11 +368,11 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_MIN_STT_TRIGGER 1
|
#define TSDB_MIN_STT_TRIGGER 1
|
||||||
#define TSDB_MAX_STT_TRIGGER 16
|
#define TSDB_MAX_STT_TRIGGER 16
|
||||||
#define TSDB_DEFAULT_SST_TRIGGER 1
|
#define TSDB_DEFAULT_SST_TRIGGER 1
|
||||||
#define TSDB_MIN_HASH_PREFIX 0
|
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
|
||||||
#define TSDB_MAX_HASH_PREFIX 128
|
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
|
||||||
#define TSDB_DEFAULT_HASH_PREFIX 0
|
#define TSDB_DEFAULT_HASH_PREFIX 0
|
||||||
#define TSDB_MIN_HASH_SUFFIX 0
|
#define TSDB_MIN_HASH_SUFFIX (2 - TSDB_TABLE_NAME_LEN)
|
||||||
#define TSDB_MAX_HASH_SUFFIX 128
|
#define TSDB_MAX_HASH_SUFFIX (TSDB_TABLE_NAME_LEN - 2)
|
||||||
#define TSDB_DEFAULT_HASH_SUFFIX 0
|
#define TSDB_DEFAULT_HASH_SUFFIX 0
|
||||||
|
|
||||||
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#define _TD_UTIL_LIST_H_
|
#define _TD_UTIL_LIST_H_
|
||||||
|
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
|
#include "talgo.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -222,10 +223,12 @@ void tdListInit(SList *list, int32_t eleSize);
|
||||||
void tdListEmpty(SList *list);
|
void tdListEmpty(SList *list);
|
||||||
SList *tdListNew(int32_t eleSize);
|
SList *tdListNew(int32_t eleSize);
|
||||||
void *tdListFree(SList *list);
|
void *tdListFree(SList *list);
|
||||||
|
void *tdListFreeP(SList *list, FDelete fp);
|
||||||
void tdListPrependNode(SList *list, SListNode *node);
|
void tdListPrependNode(SList *list, SListNode *node);
|
||||||
void tdListAppendNode(SList *list, SListNode *node);
|
void tdListAppendNode(SList *list, SListNode *node);
|
||||||
int32_t tdListPrepend(SList *list, void *data);
|
int32_t tdListPrepend(SList *list, void *data);
|
||||||
int32_t tdListAppend(SList *list, const void *data);
|
int32_t tdListAppend(SList *list, const void *data);
|
||||||
|
SListNode *tdListAdd(SList *list, const void *data);
|
||||||
SListNode *tdListPopHead(SList *list);
|
SListNode *tdListPopHead(SList *list);
|
||||||
SListNode *tdListPopTail(SList *list);
|
SListNode *tdListPopTail(SList *list);
|
||||||
SListNode *tdListGetHead(SList *list);
|
SListNode *tdListGetHead(SList *list);
|
||||||
|
|
|
@ -108,7 +108,6 @@ bool taosAssertRelease(bool condition);
|
||||||
void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo);
|
void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo);
|
||||||
void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd);
|
void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd);
|
||||||
void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile);
|
void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile);
|
||||||
int32_t taosGenCrashJsonMsg(int signum, char **pMsg, int64_t clusterId, int64_t startTime);
|
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||||
|
|
|
@ -84,6 +84,8 @@ struct STaosQueue {
|
||||||
int64_t memOfItems;
|
int64_t memOfItems;
|
||||||
int32_t numOfItems;
|
int32_t numOfItems;
|
||||||
int64_t threadId;
|
int64_t threadId;
|
||||||
|
int64_t memLimit;
|
||||||
|
int64_t itemLimit;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct STaosQset {
|
struct STaosQset {
|
||||||
|
@ -106,12 +108,14 @@ void taosCloseQueue(STaosQueue *queue);
|
||||||
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
|
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
|
||||||
void *taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize);
|
void *taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize);
|
||||||
void taosFreeQitem(void *pItem);
|
void taosFreeQitem(void *pItem);
|
||||||
void taosWriteQitem(STaosQueue *queue, void *pItem);
|
int32_t taosWriteQitem(STaosQueue *queue, void *pItem);
|
||||||
int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
|
int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
|
||||||
bool taosQueueEmpty(STaosQueue *queue);
|
bool taosQueueEmpty(STaosQueue *queue);
|
||||||
void taosUpdateItemSize(STaosQueue *queue, int32_t items);
|
void taosUpdateItemSize(STaosQueue *queue, int32_t items);
|
||||||
int32_t taosQueueItemSize(STaosQueue *queue);
|
int32_t taosQueueItemSize(STaosQueue *queue);
|
||||||
int64_t taosQueueMemorySize(STaosQueue *queue);
|
int64_t taosQueueMemorySize(STaosQueue *queue);
|
||||||
|
void taosSetQueueCapacity(STaosQueue *queue, int64_t size);
|
||||||
|
void taosSetQueueMemoryCapacity(STaosQueue *queue, int64_t mem);
|
||||||
|
|
||||||
STaosQall *taosAllocateQall();
|
STaosQall *taosAllocateQall();
|
||||||
void taosFreeQall(STaosQall *qall);
|
void taosFreeQall(STaosQall *qall);
|
||||||
|
|
|
@ -81,14 +81,22 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
|
||||||
|
|
||||||
static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,
|
static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,
|
||||||
int32_t suffix) {
|
int32_t suffix) {
|
||||||
if (prefix == 0 && suffix == 0) {
|
if ((prefix == 0 && suffix == 0) || (tblen <= (prefix + suffix)) || (tblen <= -1 * (prefix + suffix)) || prefix * suffix < 0) {
|
||||||
return MurmurHash3_32(tbname, tblen);
|
return MurmurHash3_32(tbname, tblen);
|
||||||
|
} else if (prefix > 0 || suffix > 0) {
|
||||||
|
return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
|
||||||
} else {
|
} else {
|
||||||
if (tblen <= (prefix + suffix)) {
|
char tbName[TSDB_TABLE_FNAME_LEN];
|
||||||
return MurmurHash3_32(tbname, tblen);
|
int32_t offset = 0;
|
||||||
} else {
|
if (prefix < 0) {
|
||||||
return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
|
offset = -1 * prefix;
|
||||||
|
strncpy(tbName, tbname, offset);
|
||||||
}
|
}
|
||||||
|
if (suffix < 0) {
|
||||||
|
strncpy(tbName + offset, tbname + tblen + suffix, -1 * suffix);
|
||||||
|
offset += -1 *suffix;
|
||||||
|
}
|
||||||
|
return MurmurHash3_32(tbName, offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +106,8 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
|
||||||
goto LABEL; \
|
goto LABEL; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VND_CHECK_CODE(CODE, LINO, LABEL) TSDB_CHECK_CODE(CODE, LINO, LABEL)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -26,6 +26,38 @@ if pidof taosd &> /dev/null; then
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Stop adapter service if running
|
||||||
|
if pidof taosadapter &> /dev/null; then
|
||||||
|
if pidof systemd &> /dev/null; then
|
||||||
|
${csudo}systemctl stop taosadapter || :
|
||||||
|
elif $(which service &> /dev/null); then
|
||||||
|
${csudo}service taosadapter stop || :
|
||||||
|
else
|
||||||
|
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||||
|
if [ -n "$pid" ]; then
|
||||||
|
${csudo}kill -9 $pid || :
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "Stop taosadapter service success!"
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop keeper service if running
|
||||||
|
if pidof taoskeeper &> /dev/null; then
|
||||||
|
if pidof systemd &> /dev/null; then
|
||||||
|
${csudo}systemctl stop taoskeeper || :
|
||||||
|
elif $(which service &> /dev/null); then
|
||||||
|
${csudo}service taoskeeper stop || :
|
||||||
|
else
|
||||||
|
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
|
||||||
|
if [ -n "$pid" ]; then
|
||||||
|
${csudo}kill -9 $pid || :
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "Stop taoskeeper service success!"
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
|
||||||
# if taos.cfg already softlink, remove it
|
# if taos.cfg already softlink, remove it
|
||||||
cfg_install_dir="/etc/taos"
|
cfg_install_dir="/etc/taos"
|
||||||
install_main_dir="/usr/local/taos"
|
install_main_dir="/usr/local/taos"
|
||||||
|
@ -41,6 +73,11 @@ if [ -f "${install_main_dir}/taosadapter.service" ]; then
|
||||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -f "${install_main_dir}/taoskeeper.toml" ]; then
|
||||||
|
${csudo}rm -f ${install_main_dir}/cfg/taoskeeper.toml || :
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
# there can not libtaos.so*, otherwise ln -s error
|
# there can not libtaos.so*, otherwise ln -s error
|
||||||
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
|
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
|
||||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
|
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
|
||||||
|
|
|
@ -32,6 +32,7 @@ else
|
||||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||||
|
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
|
||||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||||
|
|
|
@ -44,8 +44,31 @@ mkdir -p ${pkg_dir}${install_home_path}/include
|
||||||
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||||
|
|
||||||
|
# download taoskeeper and build
|
||||||
|
if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then
|
||||||
|
arch=amd64
|
||||||
|
elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then
|
||||||
|
arch=386
|
||||||
|
elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then
|
||||||
|
arch=arm
|
||||||
|
elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then
|
||||||
|
arch=arm64
|
||||||
|
else
|
||||||
|
arch=$cpuType
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper"
|
||||||
|
echo "$top_dir=${top_dir}"
|
||||||
|
taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper`
|
||||||
|
echo "taoskeeper_binary: ${taoskeeper_binary}"
|
||||||
|
|
||||||
|
# copy config files
|
||||||
|
cp $(dirname ${taoskeeper_binary})/config/taoskeeper.toml ${pkg_dir}${install_home_path}/cfg
|
||||||
|
cp $(dirname ${taoskeeper_binary})/taoskeeper.service ${pkg_dir}${install_home_path}/cfg
|
||||||
|
|
||||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||||
cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
|
cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
|
||||||
|
|
||||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||||
fi
|
fi
|
||||||
|
@ -53,6 +76,7 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||||
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
cp ${taoskeeper_binary} ${pkg_dir}${install_home_path}/bin
|
||||||
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||||
|
@ -143,6 +167,7 @@ else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
rm -rf ${pkg_dir}/build-taoskeeper
|
||||||
# make deb package
|
# make deb package
|
||||||
dpkg -b ${pkg_dir} $debname
|
dpkg -b ${pkg_dir} $debname
|
||||||
echo "make deb package success!"
|
echo "make deb package success!"
|
||||||
|
@ -150,4 +175,5 @@ echo "make deb package success!"
|
||||||
cp ${pkg_dir}/*.deb ${output_dir}
|
cp ${pkg_dir}/*.deb ${output_dir}
|
||||||
|
|
||||||
# clean temp dir
|
# clean temp dir
|
||||||
|
|
||||||
rm -rf ${pkg_dir}
|
rm -rf ${pkg_dir}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue