Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TS-4229
This commit is contained in:
commit
04e84ba152
14
Jenkinsfile2
14
Jenkinsfile2
|
@ -151,14 +151,6 @@ def pre_test(){
|
||||||
cd ${WKC}
|
cd ${WKC}
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
'''
|
'''
|
||||||
sh '''
|
|
||||||
cd ${WKPY}
|
|
||||||
git reset --hard
|
|
||||||
git pull
|
|
||||||
git log -5
|
|
||||||
echo "python connector log: `git log -5`" >>${WKDIR}/jenkins.log
|
|
||||||
echo >>${WKDIR}/jenkins.log
|
|
||||||
'''
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
def pre_test_build_mac() {
|
def pre_test_build_mac() {
|
||||||
|
@ -363,7 +355,7 @@ pipeline {
|
||||||
}
|
}
|
||||||
parallel {
|
parallel {
|
||||||
stage('check docs') {
|
stage('check docs') {
|
||||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
|
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
|
||||||
steps {
|
steps {
|
||||||
check_docs()
|
check_docs()
|
||||||
}
|
}
|
||||||
|
@ -401,7 +393,7 @@ pipeline {
|
||||||
agent{label " Mac_catalina "}
|
agent{label " Mac_catalina "}
|
||||||
steps {
|
steps {
|
||||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||||
timeout(time: 20, unit: 'MINUTES'){
|
timeout(time: 30, unit: 'MINUTES'){
|
||||||
pre_test()
|
pre_test()
|
||||||
pre_test_build_mac()
|
pre_test_build_mac()
|
||||||
}
|
}
|
||||||
|
@ -409,7 +401,7 @@ pipeline {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('linux test') {
|
stage('linux test') {
|
||||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 "}
|
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
|
||||||
options { skipDefaultCheckout() }
|
options { skipDefaultCheckout() }
|
||||||
when {
|
when {
|
||||||
changeRequest()
|
changeRequest()
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
|
|
||||||
# bdb
|
|
||||||
ExternalProject_Add(bdb
|
|
||||||
GIT_REPOSITORY https://github.com/berkeleydb/libdb.git
|
|
||||||
GIT_TAG v5.3.28
|
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/bdb"
|
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/bdb"
|
|
||||||
#BUILD_IN_SOURCE TRUE
|
|
||||||
CONFIGURE_COMMAND COMMAND ./dist/configure --enable-debug
|
|
||||||
BUILD_COMMAND "$(MAKE)"
|
|
||||||
INSTALL_COMMAND ""
|
|
||||||
TEST_COMMAND ""
|
|
||||||
)
|
|
|
@ -1,5 +1,5 @@
|
||||||
cmake_minimum_required(VERSION 3.0)
|
cmake_minimum_required(VERSION 3.0)
|
||||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||||
|
|
||||||
#set output directory
|
#set output directory
|
||||||
|
@ -97,7 +97,15 @@ ENDIF()
|
||||||
SET(JEMALLOC_ENABLED OFF)
|
SET(JEMALLOC_ENABLED OFF)
|
||||||
IF (TD_WINDOWS)
|
IF (TD_WINDOWS)
|
||||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
IF (${CMAKE_BUILD_TYPE} MATCHES "Release")
|
||||||
|
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
||||||
|
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
||||||
|
|
||||||
|
ELSE ()
|
||||||
|
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
||||||
|
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||||
|
@ -151,6 +159,7 @@ ELSE ()
|
||||||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
||||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||||
|
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||||
|
|
||||||
IF (COMPILER_SUPPORT_SSE42)
|
IF (COMPILER_SUPPORT_SSE42)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
||||||
|
@ -158,11 +167,11 @@ ELSE ()
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
||||||
IF (COMPILER_SUPPORT_FMA)
|
IF (COMPILER_SUPPORT_FMA)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
IF (COMPILER_SUPPORT_AVX)
|
IF (COMPILER_SUPPORT_AVX)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
@ -175,7 +184,13 @@ ELSE ()
|
||||||
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
||||||
MESSAGE(STATUS "avx512 supported by gcc")
|
MESSAGE(STATUS "avx512f/avx512bmi supported by compiler")
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
IF (COMPILER_SUPPORT_AVX512VL)
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
|
||||||
|
MESSAGE(STATUS "avx512vl supported by compiler")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
|
|
@ -151,6 +151,7 @@ IF(${BUILD_S3})
|
||||||
|
|
||||||
IF(${BUILD_WITH_S3})
|
IF(${BUILD_WITH_S3})
|
||||||
|
|
||||||
|
add_definitions(-DUSE_S3)
|
||||||
option(BUILD_WITH_COS "If build with cos" OFF)
|
option(BUILD_WITH_COS "If build with cos" OFF)
|
||||||
|
|
||||||
ELSE ()
|
ELSE ()
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.2.1.0.alpha")
|
SET(TD_VER_NUMBER "3.2.2.0.alpha")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
|
|
||||||
# leveldb
|
|
||||||
ExternalProject_Add(leveldb
|
|
||||||
GIT_REPOSITORY https://github.com/taosdata-contrib/leveldb.git
|
|
||||||
GIT_TAG master
|
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/leveldb"
|
|
||||||
BINARY_DIR ""
|
|
||||||
#BUILD_IN_SOURCE TRUE
|
|
||||||
CONFIGURE_COMMAND ""
|
|
||||||
BUILD_COMMAND ""
|
|
||||||
INSTALL_COMMAND ""
|
|
||||||
TEST_COMMAND ""
|
|
||||||
)
|
|
|
@ -1,12 +0,0 @@
|
||||||
|
|
||||||
# lucene
|
|
||||||
ExternalProject_Add(lucene
|
|
||||||
GIT_REPOSITORY https://github.com/yihaoDeng/LucenePlusPlus.git
|
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/lucene"
|
|
||||||
BINARY_DIR ""
|
|
||||||
#BUILD_IN_SOURCE TRUE
|
|
||||||
CONFIGURE_COMMAND ""
|
|
||||||
BUILD_COMMAND ""
|
|
||||||
INSTALL_COMMAND ""
|
|
||||||
TEST_COMMAND ""
|
|
||||||
)
|
|
|
@ -1,12 +0,0 @@
|
||||||
|
|
||||||
# NuRaft
|
|
||||||
ExternalProject_Add(NuRaft
|
|
||||||
GIT_REPOSITORY https://github.com/eBay/NuRaft.git
|
|
||||||
GIT_TAG v1.3.0
|
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/nuraft"
|
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/nuraft"
|
|
||||||
CONFIGURE_COMMAND "./prepare.sh"
|
|
||||||
BUILD_COMMAND ""
|
|
||||||
INSTALL_COMMAND ""
|
|
||||||
TEST_COMMAND ""
|
|
||||||
)
|
|
|
@ -13,6 +13,6 @@ ExternalProject_Add(xml2
|
||||||
BUILD_IN_SOURCE TRUE
|
BUILD_IN_SOURCE TRUE
|
||||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
||||||
BUILD_COMMAND make -j
|
BUILD_COMMAND make -j
|
||||||
INSTALL_COMMAND make install && ln -s $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||||
TEST_COMMAND ""
|
TEST_COMMAND ""
|
||||||
)
|
)
|
||||||
|
|
|
@ -109,11 +109,6 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
# cJson
|
# cJson
|
||||||
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
# leveldb
|
|
||||||
if(${BUILD_WITH_LEVELDB})
|
|
||||||
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|
||||||
endif(${BUILD_WITH_LEVELDB})
|
|
||||||
|
|
||||||
if (${BUILD_CONTRIB})
|
if (${BUILD_CONTRIB})
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
@ -132,28 +127,11 @@ else()
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# canonical-raft
|
|
||||||
if(${BUILD_WITH_CRAFT})
|
|
||||||
cat("${TD_SUPPORT_DIR}/craft_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|
||||||
SET(BUILD_WITH_UV ON CACHE BOOL "craft need libuv" FORCE)
|
|
||||||
endif(${BUILD_WITH_CRAFT})
|
|
||||||
|
|
||||||
# traft
|
|
||||||
if(${BUILD_WITH_TRAFT})
|
|
||||||
cat("${TD_SUPPORT_DIR}/traft_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|
||||||
SET(BUILD_WITH_UV ON CACHE BOOL "traft need libuv" FORCE)
|
|
||||||
endif(${BUILD_WITH_TRAFT})
|
|
||||||
|
|
||||||
#libuv
|
#libuv
|
||||||
if(${BUILD_WITH_UV})
|
if(${BUILD_WITH_UV})
|
||||||
cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
endif(${BUILD_WITH_UV})
|
endif(${BUILD_WITH_UV})
|
||||||
|
|
||||||
# bdb
|
|
||||||
if(${BUILD_WITH_BDB})
|
|
||||||
cat("${TD_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|
||||||
endif(${BUILD_WITH_BDB})
|
|
||||||
|
|
||||||
# sqlite
|
# sqlite
|
||||||
if(${BUILD_WITH_SQLITE})
|
if(${BUILD_WITH_SQLITE})
|
||||||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
@ -178,17 +156,6 @@ elseif(${BUILD_WITH_COS})
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# lucene
|
|
||||||
if(${BUILD_WITH_LUCENE})
|
|
||||||
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|
||||||
add_definitions(-DUSE_LUCENE)
|
|
||||||
endif(${BUILD_WITH_LUCENE})
|
|
||||||
|
|
||||||
# NuRaft
|
|
||||||
if(${BUILD_WITH_NURAFT})
|
|
||||||
cat("${TD_SUPPORT_DIR}/nuraft_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|
||||||
endif(${BUILD_WITH_NURAFT})
|
|
||||||
|
|
||||||
# crashdump
|
# crashdump
|
||||||
if(${BUILD_CRASHDUMP})
|
if(${BUILD_CRASHDUMP})
|
||||||
cat("${TD_SUPPORT_DIR}/crashdump_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/crashdump_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
@ -317,7 +284,8 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
SET(CMAKE_BUILD_TYPE Release)
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
endif()
|
endif()
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||||
|
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
|
@ -329,8 +297,12 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
|
|
||||||
if (${TD_WINDOWS})
|
if (${TD_WINDOWS})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||||
option(WITH_JNI "" OFF)
|
option(WITH_JNI "" OFF)
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||||
|
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||||
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
|
endif()
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
@ -361,9 +333,11 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
)
|
)
|
||||||
else()
|
else()
|
||||||
if (NOT ${TD_LINUX})
|
if (NOT ${TD_LINUX})
|
||||||
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||||
|
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
if (${TD_DARWIN_ARM64})
|
if (${TD_DARWIN_ARM64})
|
||||||
|
@ -372,8 +346,12 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
|
|
||||||
if (${TD_WINDOWS})
|
if (${TD_WINDOWS})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||||
option(WITH_JNI "" OFF)
|
option(WITH_JNI "" OFF)
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||||
|
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||||
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
|
endif()
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
@ -437,26 +415,11 @@ elseif(${BUILD_WITH_COS})
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# lucene
|
|
||||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
|
||||||
if(${BUILD_WITH_LUCENE})
|
|
||||||
option(ENABLE_TEST "Enable the tests" OFF)
|
|
||||||
add_subdirectory(lucene EXCLUDE_FROM_ALL)
|
|
||||||
target_include_directories(
|
|
||||||
lucene++
|
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lucene/include>
|
|
||||||
)
|
|
||||||
|
|
||||||
endif(${BUILD_WITH_LUCENE})
|
|
||||||
|
|
||||||
# NuRaft
|
|
||||||
if(${BUILD_WITH_NURAFT})
|
|
||||||
add_subdirectory(nuraft EXCLUDE_FROM_ALL)
|
|
||||||
endif(${BUILD_WITH_NURAFT})
|
|
||||||
|
|
||||||
# pthread
|
# pthread
|
||||||
if(${BUILD_PTHREAD})
|
if(${BUILD_PTHREAD})
|
||||||
set(CMAKE_BUILD_TYPE debug)
|
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
|
endif()
|
||||||
add_definitions(-DPTW32_STATIC_LIB)
|
add_definitions(-DPTW32_STATIC_LIB)
|
||||||
add_subdirectory(pthread EXCLUDE_FROM_ALL)
|
add_subdirectory(pthread EXCLUDE_FROM_ALL)
|
||||||
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
|
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
|
||||||
|
@ -524,30 +487,6 @@ if(${BUILD_WCWIDTH})
|
||||||
SET_TARGET_PROPERTIES(wcwidth PROPERTIES OUTPUT_NAME wcwidth)
|
SET_TARGET_PROPERTIES(wcwidth PROPERTIES OUTPUT_NAME wcwidth)
|
||||||
endif(${BUILD_WCWIDTH})
|
endif(${BUILD_WCWIDTH})
|
||||||
|
|
||||||
# CRAFT
|
|
||||||
if(${BUILD_WITH_CRAFT})
|
|
||||||
add_library(craft STATIC IMPORTED GLOBAL)
|
|
||||||
set_target_properties(craft PROPERTIES
|
|
||||||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/craft/.libs/libraft.a"
|
|
||||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/craft/include"
|
|
||||||
)
|
|
||||||
# target_link_libraries(craft
|
|
||||||
# INTERFACE pthread
|
|
||||||
# )
|
|
||||||
endif(${BUILD_WITH_CRAFT})
|
|
||||||
|
|
||||||
# TRAFT
|
|
||||||
if(${BUILD_WITH_TRAFT})
|
|
||||||
add_library(traft STATIC IMPORTED GLOBAL)
|
|
||||||
set_target_properties(traft PROPERTIES
|
|
||||||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/traft/.libs/libraft.a"
|
|
||||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/traft/include"
|
|
||||||
)
|
|
||||||
# target_link_libraries(craft
|
|
||||||
# INTERFACE pthread
|
|
||||||
# )
|
|
||||||
endif(${BUILD_WITH_TRAFT})
|
|
||||||
|
|
||||||
# LIBUV
|
# LIBUV
|
||||||
if(${BUILD_WITH_UV})
|
if(${BUILD_WITH_UV})
|
||||||
if (TD_WINDOWS)
|
if (TD_WINDOWS)
|
||||||
|
@ -559,18 +498,6 @@ if(${BUILD_WITH_UV})
|
||||||
add_subdirectory(libuv EXCLUDE_FROM_ALL)
|
add_subdirectory(libuv EXCLUDE_FROM_ALL)
|
||||||
endif(${BUILD_WITH_UV})
|
endif(${BUILD_WITH_UV})
|
||||||
|
|
||||||
# BDB
|
|
||||||
if(${BUILD_WITH_BDB})
|
|
||||||
add_library(bdb STATIC IMPORTED GLOBAL)
|
|
||||||
set_target_properties(bdb PROPERTIES
|
|
||||||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/bdb/libdb.a"
|
|
||||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/bdb"
|
|
||||||
)
|
|
||||||
target_link_libraries(bdb
|
|
||||||
INTERFACE pthread
|
|
||||||
)
|
|
||||||
endif(${BUILD_WITH_BDB})
|
|
||||||
|
|
||||||
# SQLite
|
# SQLite
|
||||||
# see https://stackoverflow.com/questions/8774593/cmake-link-to-external-library#comment58570736_10550334
|
# see https://stackoverflow.com/questions/8774593/cmake-link-to-external-library#comment58570736_10550334
|
||||||
if(${BUILD_WITH_SQLITE})
|
if(${BUILD_WITH_SQLITE})
|
||||||
|
@ -640,13 +567,18 @@ if(${BUILD_GEOS})
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
SET(CMAKE_BUILD_TYPE Release)
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
endif()
|
endif()
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||||
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
if (${TD_WINDOWS})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||||
|
else ()
|
||||||
|
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
geos_c
|
geos_c
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -243,7 +243,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
|
||||||
taos
|
taos
|
||||||
```
|
```
|
||||||
|
|
||||||
The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](/train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands.
|
The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](../../train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands.
|
||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
taos>
|
taos>
|
||||||
|
|
|
@ -10,7 +10,7 @@ Between official releases, beta versions may be released that contain new featur
|
||||||
|
|
||||||
<PkgList type={0}/>
|
<PkgList type={0}/>
|
||||||
|
|
||||||
For information about installing TDengine, see [Install and Uninstall](/operation/pkg-install).
|
For information about installing TDengine, see [Install and Uninstall](../../operation/pkg-install).
|
||||||
|
|
||||||
For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads)
|
For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads)
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ import StackOverflowSVG from './stackoverflow.svg'
|
||||||
|
|
||||||
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
|
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
|
||||||
|
|
||||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
|
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter).
|
||||||
|
|
||||||
```mdx-code-block
|
```mdx-code-block
|
||||||
import DocCardList from '@theme/DocCardList';
|
import DocCardList from '@theme/DocCardList';
|
||||||
|
|
|
@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
||||||
```
|
```
|
||||||
|
|
||||||
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
|
More configuration about connection, please refer to [Java Connector](../../reference/connector/java)
|
||||||
|
|
|
@ -22,7 +22,7 @@ import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
|
||||||
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
|
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
|
||||||
import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx";
|
import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx";
|
||||||
|
|
||||||
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
||||||
|
|
||||||
## Establish Connection
|
## Establish Connection
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ For REST and native connections, connectors provide similar APIs for performing
|
||||||
Key differences:
|
Key differences:
|
||||||
|
|
||||||
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
||||||
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../reference/connector/cpp#parameter-binding-api), [Subscription](../../reference/connector/cpp#subscription-and-consumption-api), etc.
|
||||||
|
|
||||||
## Install Client Driver taosc
|
## Install Client Driver taosc
|
||||||
|
|
||||||
|
|
|
@ -3,9 +3,9 @@ title: Data Model
|
||||||
description: This document describes the data model of TDengine.
|
description: This document describes the data model of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](../../concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
||||||
|
|
||||||
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
|
Note: before you read this chapter, please make sure you have already read through [Key Concepts](../../concept/), since TDengine introduces new concepts like "one table for one [data collection point](../../concept/#data-collection-point)" and "[super table](../../concept/#super-table-stable)".
|
||||||
|
|
||||||
## Create Database
|
## Create Database
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ In the above SQL statement:
|
||||||
- a new data file will be created every 10 days
|
- a new data file will be created every 10 days
|
||||||
- the size of the write cache pool on each VNode is 16 MB
|
- the size of the write cache pool on each VNode is 16 MB
|
||||||
- the number of vgroups is 100
|
- the number of vgroups is 100
|
||||||
- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database).
|
- WAL is enabled but fsync is disabled For more details please refer to [Database](../../taos-sql/database).
|
||||||
|
|
||||||
After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`.
|
After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`.
|
||||||
|
|
||||||
|
@ -41,13 +41,13 @@ Without the current database specified, table name must be preceded with the cor
|
||||||
|
|
||||||
## Create STable
|
## Create STable
|
||||||
|
|
||||||
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/concept/#model_table1), the SQL statement below can be used to create the super table.
|
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](../../concept/#model_table1), the SQL statement below can be used to create the super table.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
|
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
|
||||||
```
|
```
|
||||||
|
|
||||||
Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details.
|
Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](../../taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](../../taos-sql/stable) for more details.
|
||||||
|
|
||||||
For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices.
|
For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices.
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ A specific table needs to be created for each data collection point. Similar to
|
||||||
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details.
|
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](../../taos-sql/table) for details.
|
||||||
|
|
||||||
It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value.
|
It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value.
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now,
|
||||||
|
|
||||||
In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`.
|
In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`.
|
||||||
|
|
||||||
For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting).
|
For more details please refer to [Create Table Automatically](../../taos-sql/insert#automatically-create-table-when-inserting).
|
||||||
|
|
||||||
## Single Column vs Multiple Column
|
## Single Column vs Multiple Column
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
|
||||||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
|
||||||
|
|
||||||
### Insert Multiple Rows
|
### Insert Multiple Rows
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
|
||||||
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
||||||
```
|
```
|
||||||
|
|
||||||
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
|
||||||
|
|
||||||
### Insert into Multiple Tables
|
### Insert into Multiple Tables
|
||||||
|
|
||||||
|
@ -53,9 +53,9 @@ Data can be inserted into multiple tables in the same SQL statement. The example
|
||||||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
|
||||||
|
|
||||||
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
For more details about `INSERT` please refer to [INSERT](../../../taos-sql/insert).
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ bin/kafka-topics.sh --bootstrap-server=localhost:9092 --describe
|
||||||
|
|
||||||
## Insert into TDengine
|
## Insert into TDengine
|
||||||
|
|
||||||
We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](/develop/insert-data/sql-writing/) or [High Performance Writing](/develop/insert-data/high-volume/) or [Schemaless Writing](/reference/schemaless/).
|
We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](../sql-writing/) or [High Performance Writing](../high-volume/) or [Schemaless Writing](../../../reference/schemaless/).
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](../../../reference/schemaless/#Schemaless-Line-Protocol)
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,7 @@ For more information, see [Aggregate by Window](../../taos-sql/distinguished).
|
||||||
|
|
||||||
### Query
|
### Query
|
||||||
|
|
||||||
In the section describing [Insert](/develop/insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable.
|
In the section describing [Insert](../insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable.
|
||||||
|
|
||||||
<Tabs defaultValue="java" groupId="lang">
|
<Tabs defaultValue="java" groupId="lang">
|
||||||
<TabItem label="Java" value="java">
|
<TabItem label="Java" value="java">
|
||||||
|
|
|
@ -6,7 +6,7 @@ description: This document describes how to use the various components of TDengi
|
||||||
Before creating an application to process time-series data with TDengine, consider the following:
|
Before creating an application to process time-series data with TDengine, consider the following:
|
||||||
|
|
||||||
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
|
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
|
||||||
2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
|
2. Design the data model based on your own use cases. Consider the main [concepts](../concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
|
||||||
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
|
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
|
||||||
4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL.
|
4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL.
|
||||||
5. If you want to run real-time analysis based on time series data, including various dashboards, use the TDengine stream processing component instead of deploying complex systems such as Spark or Flink.
|
5. If you want to run real-time analysis based on time series data, including various dashboards, use the TDengine stream processing component instead of deploying complex systems such as Spark or Flink.
|
||||||
|
@ -14,7 +14,7 @@ Before creating an application to process time-series data with TDengine, consid
|
||||||
7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately.
|
7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately.
|
||||||
8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem.
|
8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem.
|
||||||
|
|
||||||
This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/).
|
This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](../reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/).
|
||||||
|
|
||||||
If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away.
|
If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away.
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ For all the dnodes in a TDengine cluster, the below parameters must be configure
|
||||||
|
|
||||||
## Start Cluster
|
## Start Cluster
|
||||||
|
|
||||||
The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
|
The first dnode can be started following the instructions in [Get Started](../../get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show dnodes;
|
taos> show dnodes;
|
||||||
|
@ -90,7 +90,7 @@ From the above output, it is shown that the end point of the started dnode is "h
|
||||||
|
|
||||||
There are a few steps necessary to add other dnodes in the cluster.
|
There are a few steps necessary to add other dnodes in the cluster.
|
||||||
|
|
||||||
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
|
Second, we can start `taosd` as instructed in [Get Started](../../get-started/).
|
||||||
|
|
||||||
Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
|
Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@ database_option: {
|
||||||
| PAGESIZE value
|
| PAGESIZE value
|
||||||
| PRECISION {'ms' | 'us' | 'ns'}
|
| PRECISION {'ms' | 'us' | 'ns'}
|
||||||
| REPLICA value
|
| REPLICA value
|
||||||
| RETENTIONS ingestion_duration:keep_duration ...
|
|
||||||
| WAL_LEVEL {1 | 2}
|
| WAL_LEVEL {1 | 2}
|
||||||
| VGROUPS value
|
| VGROUPS value
|
||||||
| SINGLE_STABLE {0 | 1}
|
| SINGLE_STABLE {0 | 1}
|
||||||
|
@ -61,7 +60,6 @@ database_option: {
|
||||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||||
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
||||||
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
|
|
||||||
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
||||||
- 1: WAL is enabled but fsync is disabled.
|
- 1: WAL is enabled but fsync is disabled.
|
||||||
- 2: WAL and fsync are both enabled.
|
- 2: WAL and fsync are both enabled.
|
||||||
|
|
|
@ -32,9 +32,6 @@ table_options:
|
||||||
|
|
||||||
table_option: {
|
table_option: {
|
||||||
COMMENT 'string_value'
|
COMMENT 'string_value'
|
||||||
| WATERMARK duration[,duration]
|
|
||||||
| MAX_DELAY duration[,duration]
|
|
||||||
| ROLLUP(func_name [, func_name] ...)
|
|
||||||
| SMA(col_name [, col_name] ...)
|
| SMA(col_name [, col_name] ...)
|
||||||
| TTL value
|
| TTL value
|
||||||
}
|
}
|
||||||
|
@ -54,11 +51,8 @@ table_option: {
|
||||||
|
|
||||||
**Parameter description**
|
**Parameter description**
|
||||||
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables.
|
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables.
|
||||||
2. WATERMARK: specifies the time after which the window is closed. The default value is 5 seconds. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
|
2. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
|
||||||
3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
|
3. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
|
||||||
4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first.
|
|
||||||
5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
|
|
||||||
6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
|
|
||||||
|
|
||||||
## Create Subtables
|
## Create Subtables
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,8 @@ description: This document describes how to insert data into TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
The writing of records supports two syntaxes, normal syntax and super table syntax. In the normal syntax, the table name immediately following `INSERT INTO` represents subtable names or regular table names. In the super table syntax, the table name immediately following `INSERT INTO` represents the super table name.
|
||||||
|
### Normal Syntax
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO
|
INSERT INTO
|
||||||
tb_name
|
tb_name
|
||||||
|
@ -20,6 +21,15 @@ INSERT INTO
|
||||||
|
|
||||||
INSERT INTO tb_name [(field1_name, ...)] subquery
|
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
```
|
```
|
||||||
|
### Super Table Syntax
|
||||||
|
```sql
|
||||||
|
INSERT INTO
|
||||||
|
stb1_name [(field1_name, ...)]
|
||||||
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
|
[stb2_name [(field1_name, ...)]
|
||||||
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
|
...];
|
||||||
|
```
|
||||||
|
|
||||||
**Timestamps**
|
**Timestamps**
|
||||||
|
|
||||||
|
@ -32,26 +42,34 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
|
1. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value.
|
||||||
|
|
||||||
2. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value.
|
2. The VALUES clause inserts one or more rows of data into a table.
|
||||||
|
|
||||||
3. The VALUES clause inserts one or more rows of data into a table.
|
3. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
||||||
|
|
||||||
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
4. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
|
||||||
|
|
||||||
5. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
|
5. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
||||||
|
|
||||||
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||||
```
|
```
|
||||||
|
|
||||||
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
6. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
||||||
|
|
||||||
8. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
|
**Normal Syntax**
|
||||||
|
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
|
||||||
|
|
||||||
|
2. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
|
||||||
|
|
||||||
|
**Super Table Syntax**
|
||||||
|
|
||||||
|
1. The tbname column must be included in the field_name list and represents the name of the child table. This column is of string type, and the use of the . character is not permitted in the tbname column.
|
||||||
|
|
||||||
|
2. Tag columns are eligible for inclusion in the field_name list. If the specified child table doesn't exist, a new child table will be generated with the provided tag values. In the absence of specified tag values, the newly created table will have all NULL tag values. Existing child table tag values remain unchanged.
|
||||||
|
|
||||||
|
3. Param binding is not supported.
|
||||||
## Insert a Record
|
## Insert a Record
|
||||||
|
|
||||||
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
|
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
|
||||||
|
@ -134,3 +152,14 @@ When writing data from a file, you can automatically create the specified subtab
|
||||||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||||
```
|
```
|
||||||
|
## Super Table Syntax
|
||||||
|
|
||||||
|
Automatically creating table and the table name is specified through the `tbname` column
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO meters(tbname, location, groupId, ts, current, phase)
|
||||||
|
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||||
|
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
|
values('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
|
||||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
|
For more information about user-defined functions, see [User-Defined Functions](../../develop/udf).
|
||||||
|
|
||||||
## Manage UDF
|
## Manage UDF
|
||||||
|
|
||||||
|
|
|
@ -59,4 +59,4 @@ Query OK, 9 row(s) affected (0.004763s)
|
||||||
|
|
||||||
## Import using taosdump
|
## Import using taosdump
|
||||||
|
|
||||||
A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
|
A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to the taosdump documentation.
|
||||||
|
|
|
@ -19,4 +19,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
|
||||||
|
|
||||||
## Export Using taosdump
|
## Export Using taosdump
|
||||||
|
|
||||||
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
|
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to the taosdump documentation.
|
||||||
|
|
|
@ -11,8 +11,6 @@ The collection of the monitoring information is enabled by default, but can be d
|
||||||
|
|
||||||
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
|
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
|
||||||
|
|
||||||
Please refer to [TDinsight Grafana Dashboard](../../reference/tdinsight) to learn more details about using TDinsight to monitor TDengine.
|
|
||||||
|
|
||||||
A script `TDinsight.sh` is provided to deploy TDinsight automatically.
|
A script `TDinsight.sh` is provided to deploy TDinsight automatically.
|
||||||
|
|
||||||
Download `TDinsight.sh` with the below command:
|
Download `TDinsight.sh` with the below command:
|
||||||
|
@ -43,7 +41,7 @@ Launch `TDinsight.sh` with the command above and restart Grafana, then open Dash
|
||||||
|
|
||||||
## log database
|
## log database
|
||||||
|
|
||||||
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](../../reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
||||||
|
|
||||||
### cluster\_info table
|
### cluster\_info table
|
||||||
|
|
||||||
|
|
|
@ -514,4 +514,4 @@ Response body:
|
||||||
|
|
||||||
## Reference
|
## Reference
|
||||||
|
|
||||||
[taosAdapter](/reference/taosadapter/)
|
[taosAdapter](../taosadapter/)
|
||||||
|
|
|
@ -24,7 +24,7 @@ The dynamic libraries for the TDengine client driver are located in.
|
||||||
|
|
||||||
## Supported platforms
|
## Supported platforms
|
||||||
|
|
||||||
Please refer to [list of supported platforms](/reference/connector#supported-platforms)
|
Please refer to [list of supported platforms](../#supported-platforms)
|
||||||
|
|
||||||
## Supported versions
|
## Supported versions
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ The version number of the TDengine client driver and the version number of the T
|
||||||
|
|
||||||
## Installation Steps
|
## Installation Steps
|
||||||
|
|
||||||
Please refer to the [Installation Steps](/reference/connector#installation-steps) for TDengine client driver installation
|
Please refer to the [Installation Steps](../#installation-steps) for TDengine client driver installation
|
||||||
|
|
||||||
## Establishing a connection
|
## Establishing a connection
|
||||||
|
|
||||||
|
@ -394,7 +394,7 @@ The specific functions related to the interface are as follows (see also the [pr
|
||||||
|
|
||||||
### Schemaless Writing API
|
### Schemaless Writing API
|
||||||
|
|
||||||
In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](/reference/schemaless/), and the C/C++ API used with it is described here.
|
In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../schemaless/), and the C/C++ API used with it is described here.
|
||||||
|
|
||||||
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
|
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes | TDengine version |
|
| taos-jdbcdriver version | major changes | TDengine version |
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||||
|
| 3.2.7 | Support VARBINARY and GEOMETRY types, and add time zone support for native connections. Support websocket auto reconnection | 3.2.0.0 or later |
|
||||||
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
|
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
|
||||||
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
||||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
||||||
|
@ -147,7 +148,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
|
||||||
|
|
||||||
**Note**: Only TAG supports JSON types
|
**Note**: Only TAG supports JSON types
|
||||||
Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead.
|
Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead.
|
||||||
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type] (/tao-sql/data-type/#Data Types)
|
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../../taos-sql/data-type/)
|
||||||
For WKB specifications, please refer to [Well Known Binary (WKB)]( https://libgeos.org/specifications/wkb/ )
|
For WKB specifications, please refer to [Well Known Binary (WKB)]( https://libgeos.org/specifications/wkb/ )
|
||||||
For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java)
|
For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java)
|
||||||
|
|
||||||
|
@ -159,7 +160,7 @@ For Java connector, the jts library can be used to easily create GEOMETRY type o
|
||||||
Before using Java Connector to connect to the database, the following conditions are required.
|
Before using Java Connector to connect to the database, the following conditions are required.
|
||||||
|
|
||||||
- Java 1.8 or above runtime environment and Maven 3.6 or above installed
|
- Java 1.8 or above runtime environment and Maven 3.6 or above installed
|
||||||
- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](/reference/connector#Install-Client-Driver)
|
- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](../#Install-Client-Driver)
|
||||||
|
|
||||||
### Install the connectors
|
### Install the connectors
|
||||||
|
|
||||||
|
@ -178,7 +179,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.2</version>
|
<version>3.2.7</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -367,7 +368,7 @@ The configuration parameters in properties are as follows.
|
||||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
||||||
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
|
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
|
||||||
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
|
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
|
||||||
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
|
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../config/#Client-Only).
|
||||||
|
|
||||||
### Priority of configuration parameters
|
### Priority of configuration parameters
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ If it is a TDengine error, you can get the error code and error information in t
|
||||||
### Pre-installation preparation
|
### Pre-installation preparation
|
||||||
|
|
||||||
* Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above)
|
* Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above)
|
||||||
* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps
|
* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#install-client-driver) for specific steps
|
||||||
|
|
||||||
Configure the environment variables and check the command.
|
Configure the environment variables and check the command.
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,7 @@ Note: Only TAG supports JSON types
|
||||||
### Pre-installation preparation
|
### Pre-installation preparation
|
||||||
|
|
||||||
* Install the Rust development toolchain
|
* Install the Rust development toolchain
|
||||||
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
|
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](../#install-client-driver)
|
||||||
|
|
||||||
### Install the connectors
|
### Install the connectors
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ description: This document describes taospy, the TDengine Python connector.
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
||||||
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||||
|
|
||||||
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
|
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
|
||||||
|
@ -17,7 +17,7 @@ The direct connection to the server using the native interface provided by the c
|
||||||
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
|
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
|
||||||
## Supported platforms
|
## Supported platforms
|
||||||
|
|
||||||
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
|
- The [supported platforms](../#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
|
||||||
- REST connections are supported on all platforms that can run Python.
|
- REST connections are supported on all platforms that can run Python.
|
||||||
|
|
||||||
### Supported features
|
### Supported features
|
||||||
|
@ -95,7 +95,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
|
||||||
|
|
||||||
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
If you use a native connection, you will also need to [Install Client Driver](../#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||||
|
|
||||||
### Install via pip
|
### Install via pip
|
||||||
|
|
||||||
|
@ -436,11 +436,22 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
|
||||||
:::note
|
:::note
|
||||||
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
|
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
|
||||||
|
|
||||||
|
The best practice for TaosCursor is to create a cursor at the beginning of a query and close it immediately after use. Please avoid reusing the same cursor for multiple executions.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="rest" label="REST connection">
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
|
##### Use of the RestClient class
|
||||||
|
|
||||||
|
The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
||||||
|
|
||||||
|
```python title="Use of RestClient"
|
||||||
|
{{#include docs/examples/python/rest_client_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||||
|
|
||||||
##### Use of TaosRestCursor class
|
##### Use of TaosRestCursor class
|
||||||
|
|
||||||
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
||||||
|
@ -452,15 +463,9 @@ The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
||||||
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
|
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
|
||||||
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
|
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
|
||||||
|
|
||||||
##### Use of the RestClient class
|
:::note
|
||||||
|
The best practice for TaosRestCursor is to create a cursor at the beginning of a query and close it immediately after use. Please avoid reusing the same cursor for multiple executions.
|
||||||
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
:::
|
||||||
|
|
||||||
```python title="Use of RestClient"
|
|
||||||
{{#include docs/examples/python/rest_client_example.py}}
|
|
||||||
```
|
|
||||||
|
|
||||||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="websocket" label="WebSocket connection">
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
@ -496,7 +501,7 @@ The queried results can only be fetched once. For example, only one of `fetch_al
|
||||||
|
|
||||||
<TabItem value="rest" label="REST connection">
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/rest_client_example.py}}
|
{{#include docs/examples/python/rest_client_example.py}}
|
||||||
|
@ -554,6 +559,16 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="rest" label="REST connection">
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
|
##### Use of the RestClient class
|
||||||
|
|
||||||
|
The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
||||||
|
|
||||||
|
```python title="Use of RestClient"
|
||||||
|
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||||
|
|
||||||
##### Use of TaosRestCursor class
|
##### Use of TaosRestCursor class
|
||||||
|
|
||||||
As the way to connect introduced above but add `req_id` argument.
|
As the way to connect introduced above but add `req_id` argument.
|
||||||
|
@ -565,16 +580,6 @@ As the way to connect introduced above but add `req_id` argument.
|
||||||
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
|
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
|
||||||
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
|
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
|
||||||
|
|
||||||
##### Use of the RestClient class
|
|
||||||
|
|
||||||
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
|
||||||
|
|
||||||
```python title="Use of RestClient"
|
|
||||||
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
|
|
||||||
```
|
|
||||||
|
|
||||||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="websocket" label="WebSocket connection">
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
|
@ -28,7 +28,7 @@ The REST connector supports all platforms that can run Node.js.
|
||||||
|
|
||||||
## Version support
|
## Version support
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
Please refer to [version support list](../#version-support)
|
||||||
|
|
||||||
## Supported features
|
## Supported features
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
### Pre-installation preparation
|
### Pre-installation preparation
|
||||||
|
|
||||||
- Install the Node.js development environment
|
- Install the Node.js development environment
|
||||||
- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS.
|
- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS.
|
||||||
|
|
||||||
<Tabs defaultValue="Linux">
|
<Tabs defaultValue="Linux">
|
||||||
<TabItem value="Linux" label="Linux system installation dependencies">
|
<TabItem value="Linux" label="Linux system installation dependencies">
|
||||||
|
|
|
@ -36,7 +36,7 @@ Please note TDengine does not support 32bit Windows any more.
|
||||||
|
|
||||||
## Version support
|
## Version support
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
Please refer to [version support list](../#version-support)
|
||||||
|
|
||||||
## Supported features
|
## Supported features
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
|
|
||||||
* Install the [.NET SDK](https://dotnet.microsoft.com/download)
|
* Install the [.NET SDK](https://dotnet.microsoft.com/download)
|
||||||
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
|
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
|
||||||
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
|
* Install TDengine client driver, please refer to [Install client driver](../#install-client-driver) for details
|
||||||
|
|
||||||
### Install `TDengine.Connector`
|
### Install `TDengine.Connector`
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ Because the version of TDengine client driver is tightly associated with that of
|
||||||
|
|
||||||
### Install TDengine Client Driver
|
### Install TDengine Client Driver
|
||||||
|
|
||||||
Regarding how to install TDengine client driver please refer to [Install Client Driver](/reference/connector#installation-steps)
|
Regarding how to install TDengine client driver please refer to [Install Client Driver](../#installation-steps)
|
||||||
|
|
||||||
### Install php-tdengine
|
### Install php-tdengine
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
|
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
|
||||||
|
|
||||||
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
||||||
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
||||||
|
|
|
@ -186,7 +186,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
|
||||||
|
|
||||||
### TDengine RESTful interface
|
### TDengine RESTful interface
|
||||||
|
|
||||||
You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://<fqdn>:6041/rest/sql`. See the [official documentation](/reference/rest-api/) for details.
|
You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://<fqdn>:6041/rest/sql`. See the [official documentation](../rest-api/) for details.
|
||||||
|
|
||||||
### InfluxDB
|
### InfluxDB
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ Support InfluxDB query parameters as follows.
|
||||||
- `precision` The time precision used by TDengine
|
- `precision` The time precision used by TDengine
|
||||||
- `u` TDengine user name
|
- `u` TDengine user name
|
||||||
- `p` TDengine password
|
- `p` TDengine password
|
||||||
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](../../taos-sql/table/#create-table)
|
||||||
|
|
||||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
|
||||||
|
|
||||||
There are two ways to install taosBenchmark:
|
There are two ways to install taosBenchmark:
|
||||||
|
|
||||||
- Installing the official TDengine installer will automatically install taosBenchmark.
|
- Installing the official TDengine installer will automatically install taosBenchmark.
|
||||||
|
|
||||||
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ There are two ways to install taosdump:
|
||||||
2. backup multiple specified databases: use `-D db1,db2,... ` parameters;
|
2. backup multiple specified databases: use `-D db1,db2,... ` parameters;
|
||||||
3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces.
|
3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces.
|
||||||
4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter.
|
4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter.
|
||||||
5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters.
|
5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](../../taos-sql/escape) for a description of escaped characters.
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.
|
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.
|
||||||
|
|
|
@ -8,7 +8,7 @@ The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](/reference/connector/).
|
If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](../connector/).
|
||||||
|
|
||||||
## Execution
|
## Execution
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ To access the TDengine CLI, you can execute `taos` command-line utility from a t
|
||||||
taos
|
taos
|
||||||
```
|
```
|
||||||
|
|
||||||
TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. See [FAQ](/train-faq/faq) to solve the problem of terminal connection failure to the server. The TDengine CLI prompts as follows:
|
TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. See [FAQ](../../train-faq/faq) to solve the problem of terminal connection failure to the server. The TDengine CLI prompts as follows:
|
||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
taos>
|
taos>
|
||||||
|
|
|
@ -87,7 +87,7 @@ Ensure that your firewall rules do not block TCP port 6042 on any host in the c
|
||||||
| Protocol | Default Port | Description | How to configure |
|
| Protocol | Default Port | Description | How to configure |
|
||||||
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||||
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
||||||
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](../taosadapter/) |
|
||||||
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
||||||
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
|
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
|
||||||
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
|
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
|
||||||
|
|
|
@ -116,7 +116,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
||||||
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
|
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
|
||||||
11. Super table name or child table name are case sensitive.
|
11. Super table name or child table name are case sensitive.
|
||||||
:::tip
|
:::tip
|
||||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](../../taos-sql/limit) for specific constraints in this area.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Time resolution recognition
|
## Time resolution recognition
|
||||||
|
|
|
@ -218,7 +218,7 @@ The example to query the average system memory usage for the specified interval
|
||||||
|
|
||||||
### Importing the Dashboard
|
### Importing the Dashboard
|
||||||
|
|
||||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ Prometheus data can be stored in TDengine via the `remote_write` interface with
|
||||||
|
|
||||||
To write Prometheus data to TDengine requires the following preparations.
|
To write Prometheus data to TDengine requires the following preparations.
|
||||||
- The TDengine cluster is deployed and functioning properly
|
- The TDengine cluster is deployed and functioning properly
|
||||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
|
||||||
- Prometheus has been installed. Please refer to the [official documentation](https://prometheus.io/docs/prometheus/latest/installation/) for installing Prometheus
|
- Prometheus has been installed. Please refer to the [official documentation](https://prometheus.io/docs/prometheus/latest/installation/) for installing Prometheus
|
||||||
|
|
||||||
## Configuration steps
|
## Configuration steps
|
||||||
|
|
|
@ -14,7 +14,7 @@ Telegraf's data can be written to TDengine by simply adding the output configura
|
||||||
|
|
||||||
To write Telegraf data to TDengine requires the following preparations.
|
To write Telegraf data to TDengine requires the following preparations.
|
||||||
- The TDengine cluster is deployed and functioning properly
|
- The TDengine cluster is deployed and functioning properly
|
||||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
|
||||||
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
|
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
|
||||||
- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
|
- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
|
||||||
|
|
||||||
|
@ -73,6 +73,6 @@ Query OK, 3 row(s) in set (0.013269s)
|
||||||
|
|
||||||
- TDengine take influxdb format data and create unique ID for table names by the rule.
|
- TDengine take influxdb format data and create unique ID for table names by the rule.
|
||||||
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
|
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
|
||||||
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](../../reference/schemaless/#Schemaless-Line-Protocol)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ You can write the data collected by collectd to TDengine by simply modifying the
|
||||||
|
|
||||||
Writing collectd data to the TDengine requires several preparations.
|
Writing collectd data to the TDengine requires several preparations.
|
||||||
- The TDengine cluster is deployed and running properly
|
- The TDengine cluster is deployed and running properly
|
||||||
- taosAdapter is installed and running, please refer to [taosAdapter's manual](/reference/taosadapter) for details
|
- taosAdapter is installed and running, please refer to [taosAdapter's manual](../../reference/taosadapter) for details
|
||||||
- collectd has been installed. Please refer to the [official documentation](https://collectd.org/download.shtml) to install collectd
|
- collectd has been installed. Please refer to the [official documentation](https://collectd.org/download.shtml) to install collectd
|
||||||
|
|
||||||
## Configuration steps
|
## Configuration steps
|
||||||
|
|
|
@ -14,7 +14,7 @@ You can write the data collected by icinga2 to TDengine by simply modifying the
|
||||||
|
|
||||||
To write icinga2 data to TDengine requires the following preparations.
|
To write icinga2 data to TDengine requires the following preparations.
|
||||||
- The TDengine cluster is deployed and working properly
|
- The TDengine cluster is deployed and working properly
|
||||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
|
||||||
- icinga2 has been installed. Please refer to the [official documentation](https://icinga.com/docs/icinga-2/latest/doc/02-installation/) for icinga2 installation
|
- icinga2 has been installed. Please refer to the [official documentation](https://icinga.com/docs/icinga-2/latest/doc/02-installation/) for icinga2 installation
|
||||||
|
|
||||||
## Configuration steps
|
## Configuration steps
|
||||||
|
|
|
@ -14,7 +14,7 @@ You can write the data collected by TCollector to TDengine by simply changing th
|
||||||
|
|
||||||
To write data to the TDengine via TCollector requires the following preparations.
|
To write data to the TDengine via TCollector requires the following preparations.
|
||||||
- The TDengine cluster has been deployed and is working properly
|
- The TDengine cluster has been deployed and is working properly
|
||||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
|
||||||
- TCollector has been installed. Please refer to [official documentation](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html#installation-of-tcollector) for TCollector installation
|
- TCollector has been installed. Please refer to [official documentation](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html#installation-of-tcollector) for TCollector installation
|
||||||
|
|
||||||
## Configuration steps
|
## Configuration steps
|
||||||
|
|
|
@ -82,7 +82,7 @@ Edit the resource configuration to add the key/value pairing for Authorization.
|
||||||
Basic cm9vdDp0YW9zZGF0YQ==
|
Basic cm9vdDp0YW9zZGF0YQ==
|
||||||
```
|
```
|
||||||
|
|
||||||
Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details.
|
Please refer to the [ TDengine REST API documentation ](../../reference/rest-api/) for the authorization in details.
|
||||||
|
|
||||||
Enter the rule engine replacement template in the message body:
|
Enter the rule engine replacement template in the message body:
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
||||||
1. Linux operating system
|
1. Linux operating system
|
||||||
2. Java 8 and Maven installed
|
2. Java 8 and Maven installed
|
||||||
3. Git/curl/vi is installed
|
3. Git/curl/vi is installed
|
||||||
4. TDengine is installed and started.
|
4. TDengine is installed and started.
|
||||||
|
|
||||||
## Install Kafka
|
## Install Kafka
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ The output as bellow:
|
||||||
|
|
||||||
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
|
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
|
||||||
|
|
||||||
TDengine Sink Connector internally uses TDengine [modeless write interface](/reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](/develop /insert-data/influxdb-line), [OpenTSDB Telnet protocol format](/develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json).
|
TDengine Sink Connector internally uses TDengine [modeless write interface](../../reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json).
|
||||||
|
|
||||||
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ If you see the above data, the synchronization is successful. If not, check the
|
||||||
|
|
||||||
The role of the TDengine Source Connector is to push all the data of a specific TDengine database after a particular time to Kafka. The implementation principle of TDengine Source Connector is to first pull historical data in batches and then synchronize incremental data with the strategy of the regular query. At the same time, the changes in the table will be monitored, and the newly added table can be automatically synchronized. If Kafka Connect is restarted, synchronization will resume where it left off.
|
The role of the TDengine Source Connector is to push all the data of a specific TDengine database after a particular time to Kafka. The implementation principle of TDengine Source Connector is to first pull historical data in batches and then synchronize incremental data with the strategy of the regular query. At the same time, the changes in the table will be monitored, and the newly added table can be automatically synchronized. If Kafka Connect is restarted, synchronization will resume where it left off.
|
||||||
|
|
||||||
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](../../develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json ) and then write to Kafka.
|
||||||
|
|
||||||
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
|
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
|
||||||
|
|
||||||
|
|
|
@ -14,4 +14,4 @@ Check the running status of taosAdapter.
|
||||||
systemctl status taosadapter
|
systemctl status taosadapter
|
||||||
```
|
```
|
||||||
|
|
||||||
taosAdapter Please refer to the `taosadapter --help` command output and [reference documentation](/reference/taosadapter) for detailed configuration parameters and usage of taosAdapter.
|
taosAdapter Please refer to the `taosadapter --help` command output and [reference documentation](../../reference/taosadapter) for detailed configuration parameters and usage of taosAdapter.
|
||||||
|
|
|
@ -41,7 +41,7 @@ Download and install the [latest version of TDengine](https://docs.tdengine.com/
|
||||||
|
|
||||||
### Install Grafana Plugin and Configure Data Source
|
### Install Grafana Plugin and Configure Data Source
|
||||||
|
|
||||||
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source)
|
Please refer to [Install Grafana Plugin and Configure Data Source](../../third-party/grafana/#install-grafana-plugin-and-configure-data-source)
|
||||||
|
|
||||||
### Modify /etc/telegraf/telegraf.conf
|
### Modify /etc/telegraf/telegraf.conf
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ Download and install the [latest version of TDengine](https://docs.tdengine.com/
|
||||||
|
|
||||||
### Install Grafana Plugin and Configure Data Source
|
### Install Grafana Plugin and Configure Data Source
|
||||||
|
|
||||||
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source)
|
Please refer to [Install Grafana Plugin and Configure Data Source](../../third-party/grafana/#install-grafana-plugin-and-configure-data-source)
|
||||||
|
|
||||||
### Configure collectd
|
### Configure collectd
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ You can use collectd and push the data to taosAdapter utilizing the write_tsdb p
|
||||||
|
|
||||||
- **Tuning the Dashboard system**
|
- **Tuning the Dashboard system**
|
||||||
|
|
||||||
After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana).
|
After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](../../third-party/grafana).
|
||||||
|
|
||||||
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
|
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
|
||||||
|
|
||||||
|
@ -396,7 +396,7 @@ Hard disk writing performance has little effect on TDengine. The TDengine writin
|
||||||
|
|
||||||
### Computational resource estimates
|
### Computational resource estimates
|
||||||
|
|
||||||
Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
|
Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](../../operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
|
||||||
|
|
||||||
In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores.
|
In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores.
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.2.1.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.2.1.0" />
|
||||||
|
|
||||||
## 3.2.0.0
|
## 3.2.0.0
|
||||||
|
|
||||||
<Release type="tdengine" version="3.2.0.0" />
|
<Release type="tdengine" version="3.2.0.0" />
|
||||||
|
|
|
@ -33,6 +33,8 @@ data = cursor.fetchall()
|
||||||
print(column_names)
|
print(column_names)
|
||||||
for row in data:
|
for row in data:
|
||||||
print(row)
|
print(row)
|
||||||
|
# close cursor
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
# output:
|
# output:
|
||||||
# inserted row count: 8
|
# inserted row count: 8
|
||||||
|
|
|
@ -33,6 +33,8 @@ data = cursor.fetchall()
|
||||||
print(column_names)
|
print(column_names)
|
||||||
for row in data:
|
for row in data:
|
||||||
print(row)
|
print(row)
|
||||||
|
# close cursor
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
# output:
|
# output:
|
||||||
# inserted row count: 8
|
# inserted row count: 8
|
||||||
|
|
|
@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||||
|
| 3.2.7 | 支持VARBINARY和GEOMETRY类型,增加native连接的时区设置支持。增加websocket自动重连功能。 | 3.2.0.0 及更高版本 |
|
||||||
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
||||||
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||||
|
@ -177,7 +178,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.2</version>
|
<version>3.2.7</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1097,7 +1098,6 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group), 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
|
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group), 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
|
||||||
|
|
||||||
|
|
||||||
#### 订阅消费数据
|
#### 订阅消费数据
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
|
|
@ -436,11 +436,23 @@ now 为系统内部函数,默认为客户端所在计算机当前时间。 now
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
|
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
|
||||||
|
|
||||||
|
TaosCursor 的最佳实践是,查询开始时创建 cursor,用完之后就关闭,请避免复用同一个 cursor 多次执行。
|
||||||
:::
|
:::
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="rest" label="REST 连接">
|
<TabItem value="rest" label="REST 连接">
|
||||||
|
|
||||||
|
##### RestClient 类的使用
|
||||||
|
|
||||||
|
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
|
||||||
|
|
||||||
|
```python title="RestClient 的使用"
|
||||||
|
{{#include docs/examples/python/rest_client_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
|
||||||
|
|
||||||
##### TaosRestCursor 类的使用
|
##### TaosRestCursor 类的使用
|
||||||
|
|
||||||
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
|
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
|
||||||
|
@ -452,15 +464,10 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
||||||
- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
|
- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
|
||||||
- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
|
- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
|
||||||
|
|
||||||
##### RestClient 类的使用
|
:::note
|
||||||
|
TaosRestCursor 的最佳实践是,查询开始时创建 cursor,用完之后就关闭,请避免复用同一个 cursor 多次执行。
|
||||||
|
:::
|
||||||
|
|
||||||
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
|
|
||||||
|
|
||||||
```python title="RestClient 的使用"
|
|
||||||
{{#include docs/examples/python/rest_client_example.py}}
|
|
||||||
```
|
|
||||||
|
|
||||||
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="websocket" label="WebSocket 连接">
|
<TabItem value="websocket" label="WebSocket 连接">
|
||||||
|
|
||||||
|
@ -557,6 +564,16 @@ RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方
|
||||||
|
|
||||||
类似上文介绍的使用方法,增加 `req_id` 参数。
|
类似上文介绍的使用方法,增加 `req_id` 参数。
|
||||||
|
|
||||||
|
##### RestClient 类的使用
|
||||||
|
|
||||||
|
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
|
||||||
|
|
||||||
|
```python title="RestClient 的使用"
|
||||||
|
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
|
||||||
|
|
||||||
##### TaosRestCursor 类的使用
|
##### TaosRestCursor 类的使用
|
||||||
|
|
||||||
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
|
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
|
||||||
|
@ -568,15 +585,6 @@ RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方
|
||||||
- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
|
- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
|
||||||
- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
|
- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
|
||||||
|
|
||||||
##### RestClient 类的使用
|
|
||||||
|
|
||||||
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
|
|
||||||
|
|
||||||
```python title="RestClient 的使用"
|
|
||||||
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
|
|
||||||
```
|
|
||||||
|
|
||||||
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="websocket" label="WebSocket 连接">
|
<TabItem value="websocket" label="WebSocket 连接">
|
||||||
|
|
|
@ -105,7 +105,7 @@ spec:
|
||||||
# TZ for timezone settings, we recommend to always set it.
|
# TZ for timezone settings, we recommend to always set it.
|
||||||
- name: TZ
|
- name: TZ
|
||||||
value: "Asia/Shanghai"
|
value: "Asia/Shanghai"
|
||||||
# TAOS_ prefix will configured in taos.cfg, strip prefix and camelCase.
|
# Environment variables with prefix TAOS_ will be parsed and converted into corresponding parameter in taos.cfg. For example, serverPort in taos.cfg should be configured by TAOS_SERVER_PORT when using K8S to deploy
|
||||||
- name: TAOS_SERVER_PORT
|
- name: TAOS_SERVER_PORT
|
||||||
value: "6030"
|
value: "6030"
|
||||||
# Must set if you want a cluster.
|
# Must set if you want a cluster.
|
||||||
|
|
|
@ -26,7 +26,6 @@ database_option: {
|
||||||
| PAGESIZE value
|
| PAGESIZE value
|
||||||
| PRECISION {'ms' | 'us' | 'ns'}
|
| PRECISION {'ms' | 'us' | 'ns'}
|
||||||
| REPLICA value
|
| REPLICA value
|
||||||
| RETENTIONS ingestion_duration:keep_duration ...
|
|
||||||
| WAL_LEVEL {1 | 2}
|
| WAL_LEVEL {1 | 2}
|
||||||
| VGROUPS value
|
| VGROUPS value
|
||||||
| SINGLE_STABLE {0 | 1}
|
| SINGLE_STABLE {0 | 1}
|
||||||
|
@ -53,7 +52,7 @@ database_option: {
|
||||||
- 1:表示一阶段压缩。
|
- 1:表示一阶段压缩。
|
||||||
- 2:表示两阶段压缩。
|
- 2:表示两阶段压缩。
|
||||||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||||
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||||
|
@ -61,7 +60,6 @@ database_option: {
|
||||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
||||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||||
- REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
|
- REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
|
||||||
- RETENTIONS:表示数据的聚合周期和保存时长,如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
|
|
||||||
- WAL_LEVEL:WAL 级别,默认为 1。
|
- WAL_LEVEL:WAL 级别,默认为 1。
|
||||||
- 1:写 WAL,但不执行 fsync。
|
- 1:写 WAL,但不执行 fsync。
|
||||||
- 2:写 WAL,而且执行 fsync。
|
- 2:写 WAL,而且执行 fsync。
|
||||||
|
|
|
@ -30,9 +30,6 @@ table_options:
|
||||||
|
|
||||||
table_option: {
|
table_option: {
|
||||||
COMMENT 'string_value'
|
COMMENT 'string_value'
|
||||||
| WATERMARK duration[,duration]
|
|
||||||
| MAX_DELAY duration[,duration]
|
|
||||||
| ROLLUP(func_name [, func_name] ...)
|
|
||||||
| SMA(col_name [, col_name] ...)
|
| SMA(col_name [, col_name] ...)
|
||||||
| TTL value
|
| TTL value
|
||||||
}
|
}
|
||||||
|
@ -52,11 +49,8 @@ table_option: {
|
||||||
**参数说明**
|
**参数说明**
|
||||||
|
|
||||||
1. COMMENT:表注释。可用于超级表、子表和普通表。
|
1. COMMENT:表注释。可用于超级表、子表和普通表。
|
||||||
2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为 0 到 15 分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
|
2. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
|
||||||
3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为 1 毫秒到 15 分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
|
3. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数,当该表的存在时间超过 TTL 指定的时间后,TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间,系统不保证到了时间一定会将其删除,而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0,表示不限制,到期时间为表创建时间加上 TTL 时间。TTL 与数据库 KEEP 参数没有关联,如果 KEEP 比 TTL 小,在表被删除之前数据也可能已经被删除。
|
||||||
4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。作用于超级表除 TS 列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
|
|
||||||
5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
|
|
||||||
6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数,当该表的存在时间超过 TTL 指定的时间后,TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间,系统不保证到了时间一定会将其删除,而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0,表示不限制,到期时间为表创建时间加上 TTL 时间。TTL 与数据库 KEEP 参数没有关联,如果 KEEP 比 TTL 小,在表被删除之前数据也可能已经被删除。
|
|
||||||
|
|
||||||
## 创建子表
|
## 创建子表
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,9 @@ description: 写入数据的详细语法
|
||||||
---
|
---
|
||||||
|
|
||||||
## 写入语法
|
## 写入语法
|
||||||
|
写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名
|
||||||
|
|
||||||
|
### 正常语法
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO
|
INSERT INTO
|
||||||
tb_name
|
tb_name
|
||||||
|
@ -20,6 +22,15 @@ INSERT INTO
|
||||||
|
|
||||||
INSERT INTO tb_name [(field1_name, ...)] subquery
|
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
```
|
```
|
||||||
|
### 超级表语法
|
||||||
|
```sql
|
||||||
|
INSERT INTO
|
||||||
|
stb1_name [(field1_name, ...)]
|
||||||
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
|
[stb2_name [(field1_name, ...)]
|
||||||
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
|
...];
|
||||||
|
```
|
||||||
|
|
||||||
**关于时间戳**
|
**关于时间戳**
|
||||||
|
|
||||||
|
@ -32,26 +43,34 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
|
|
||||||
**语法说明**
|
**语法说明**
|
||||||
|
|
||||||
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
|
1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。
|
||||||
|
|
||||||
2. 可以指定要插入值的列,对于为指定的列数据库将自动填充为 NULL。
|
2. VALUES 语法表示了要插入的一行或多行数据。
|
||||||
|
|
||||||
3. VALUES 语法表示了要插入的一行或多行数据。
|
3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
|
||||||
|
|
||||||
4. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
|
4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
|
||||||
|
|
||||||
5. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
|
5. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
|
||||||
|
|
||||||
6. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
|
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||||
```
|
```
|
||||||
|
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
||||||
|
|
||||||
7. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
**正常语法说明**
|
||||||
|
|
||||||
8. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
|
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
|
||||||
|
|
||||||
|
2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
|
||||||
|
|
||||||
|
**超级表语法说明**
|
||||||
|
|
||||||
|
1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.‘
|
||||||
|
|
||||||
|
2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列,则把所有标签列的值设置为NULL
|
||||||
|
|
||||||
|
3. 不支持参数绑定写入
|
||||||
## 插入一条记录
|
## 插入一条记录
|
||||||
|
|
||||||
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
|
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
|
||||||
|
@ -134,3 +153,12 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
|
||||||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||||
```
|
```
|
||||||
|
## 超级表语法
|
||||||
|
|
||||||
|
自动建表, 表名通过tbname列指定
|
||||||
|
```sql
|
||||||
|
INSERT INTO meters(tbname, location, groupId, ts, current, phase)
|
||||||
|
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||||
|
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
|
values('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||||
|
```
|
||||||
|
|
|
@ -59,4 +59,4 @@ Query OK, 9 row(s) affected (0.004763s)
|
||||||
|
|
||||||
## taosdump 工具导入
|
## taosdump 工具导入
|
||||||
|
|
||||||
TDengine 提供了方便的数据库导入导出工具 taosdump。用户可以将 taosdump 从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见:[TDengine 数据备份工具: taosdump](/reference/taosdump)。
|
TDengine 提供了方便的数据库导入导出工具 taosdump。用户可以将 taosdump 从一个系统导出的数据,导入到其他系统中。具体使用方法,请参考 taosdump 的相关文档。
|
||||||
|
|
|
@ -17,5 +17,4 @@ select * from <tb_name> >> data.csv;
|
||||||
|
|
||||||
## 用 taosdump 导出数据
|
## 用 taosdump 导出数据
|
||||||
|
|
||||||
利用 taosdump,用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见:
|
利用 taosdump,用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参考 taosdump 的相关文档。
|
||||||
[TDengine 数据备份工具: taosdump](/reference/taosdump)。
|
|
|
@ -218,7 +218,7 @@ docker run -d \
|
||||||
|
|
||||||
### 导入 Dashboard
|
### 导入 Dashboard
|
||||||
|
|
||||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper,相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,69 @@
|
||||||
|
---
|
||||||
|
title: TSZ 压缩算法
|
||||||
|
description: TDengine 对浮点数进行高效压缩的算法
|
||||||
|
---
|
||||||
|
|
||||||
|
TSZ 压缩算法是 TDengine 为浮点数据类型提供的可选压缩算法,可以实现浮点数有损至无损全状态压缩,相比默认压缩算法, TSZ 压缩算法压缩率更高,即使切至无损状态,压缩率也会比默认压缩高一倍。
|
||||||
|
|
||||||
|
## 适合场景
|
||||||
|
|
||||||
|
- TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
|
||||||
|
- TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
|
||||||
|
|
||||||
|
## 使用步骤
|
||||||
|
- TDengine 支持版本为 3.2.0.0 或以上
|
||||||
|
- 开启选项
|
||||||
|
在 taos.cfg 配置中增加以下内容,即可开启 TSZ 压缩算法,功能打开后,会替换默认算法。
|
||||||
|
以下表示字段类型是 float 及 double 类型都使用此压缩算法,也可以单独只配置一个
|
||||||
|
|
||||||
|
```sql
|
||||||
|
lossyColumns float|double
|
||||||
|
```
|
||||||
|
|
||||||
|
- 配置需重启服务生效
|
||||||
|
- Taosd 日志输出以下内容,表明功能已生效:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
||||||
|
```
|
||||||
|
|
||||||
|
## 配置参数
|
||||||
|
|
||||||
|
### fPrecision
|
||||||
|
FLOAT 类型精度控制:
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| 适用范围 | 服务器端 |
|
||||||
|
| 含义 | 设置 float 类型浮点数压缩精度 |
|
||||||
|
| 取值范围 | 0.1 ~ 0.00000001 |
|
||||||
|
| 缺省值 | 0.00000001 |
|
||||||
|
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### dPrecision
|
||||||
|
DOUBLE 类型精度控制:
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| 适用范围 | 服务器端 |
|
||||||
|
| 含义 | 设置 double 类型浮点数压缩精度 |
|
||||||
|
| 取值范围 | 0.1 ~ 0.0000000000000001 |
|
||||||
|
| 缺省值 | 0.0000000000000001 |
|
||||||
|
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
|
||||||
|
|
||||||
|
|
||||||
|
### ifAdtFse
|
||||||
|
TSZ 压缩中可选择的算法 FSE,默认为 HUFFMAN:
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| 适用范围 | 服务器端 |
|
||||||
|
| 含义 | 使用 FSE 算法替换 HUFFMAN 算法, FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法 |
|
||||||
|
| 取值范围 | 0:关闭 1:打开 |
|
||||||
|
| 缺省值 | 0:关闭 |
|
||||||
|
|
||||||
|
|
||||||
|
## 注意事项
|
||||||
|
- 打开 TSZ 后生成的存储数据格式,回退至 3.2.0.0 之前的版本,数据将不能被识别
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.2.1.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.2.1.0" />
|
||||||
|
|
||||||
## 3.2.0.0
|
## 3.2.0.0
|
||||||
|
|
||||||
<Release type="tdengine" version="3.2.0.0" />
|
<Release type="tdengine" version="3.2.0.0" />
|
||||||
|
|
|
@ -67,7 +67,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.2.7</version>
|
||||||
<!-- <scope>system</scope>-->
|
<!-- <scope>system</scope>-->
|
||||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
|
@ -43,6 +43,7 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
|
||||||
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path);
|
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path);
|
||||||
void s3EvictCache(const char *path, long object_size);
|
void s3EvictCache(const char *path, long object_size);
|
||||||
long s3Size(const char *object_name);
|
long s3Size(const char *object_name);
|
||||||
|
int32_t s3GetObjectToFile(const char *object_name, char *fileName);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,8 +258,6 @@ typedef struct SQueryTableDataCond {
|
||||||
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
|
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
|
||||||
void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock);
|
void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock);
|
||||||
|
|
||||||
int32_t tEncodeDataBlocks(void** buf, const SArray* blocks);
|
|
||||||
void* tDecodeDataBlocks(const void* buf, SArray** blocks);
|
|
||||||
void colDataDestroy(SColumnInfoData* pColData);
|
void colDataDestroy(SColumnInfoData* pColData);
|
||||||
|
|
||||||
//======================================================================================================================
|
//======================================================================================================================
|
||||||
|
@ -294,7 +292,7 @@ typedef struct STableBlockDistInfo {
|
||||||
int32_t defMaxRows;
|
int32_t defMaxRows;
|
||||||
int32_t firstSeekTimeUs;
|
int32_t firstSeekTimeUs;
|
||||||
uint32_t numOfInmemRows;
|
uint32_t numOfInmemRows;
|
||||||
uint32_t numOfSmallBlocks;
|
uint32_t numOfSttRows;
|
||||||
uint32_t numOfVgroups;
|
uint32_t numOfVgroups;
|
||||||
int32_t blockRowsHisto[20];
|
int32_t blockRowsHisto[20];
|
||||||
} STableBlockDistInfo;
|
} STableBlockDistInfo;
|
||||||
|
|
|
@ -75,12 +75,15 @@ extern int32_t tsElectInterval;
|
||||||
extern int32_t tsHeartbeatInterval;
|
extern int32_t tsHeartbeatInterval;
|
||||||
extern int32_t tsHeartbeatTimeout;
|
extern int32_t tsHeartbeatTimeout;
|
||||||
|
|
||||||
|
// vnode
|
||||||
|
extern int64_t tsVndCommitMaxIntervalMs;
|
||||||
|
|
||||||
// snode
|
// snode
|
||||||
extern int32_t tsRsyncPort;
|
extern int32_t tsRsyncPort;
|
||||||
extern char tsCheckpointBackupDir[];
|
extern char tsCheckpointBackupDir[];
|
||||||
|
|
||||||
// vnode checkpoint
|
// vnode checkpoint
|
||||||
extern char tsSnodeAddress[]; //127.0.0.1:873
|
extern char tsSnodeAddress[]; // 127.0.0.1:873
|
||||||
|
|
||||||
// mnode
|
// mnode
|
||||||
extern int64_t tsMndSdbWriteDelta;
|
extern int64_t tsMndSdbWriteDelta;
|
||||||
|
@ -104,8 +107,8 @@ extern int32_t tsMonitorMaxLogs;
|
||||||
extern bool tsMonitorComp;
|
extern bool tsMonitorComp;
|
||||||
|
|
||||||
// audit
|
// audit
|
||||||
extern bool tsEnableAudit;
|
extern bool tsEnableAudit;
|
||||||
extern bool tsEnableAuditCreateTable;
|
extern bool tsEnableAuditCreateTable;
|
||||||
|
|
||||||
// telem
|
// telem
|
||||||
extern bool tsEnableTelem;
|
extern bool tsEnableTelem;
|
||||||
|
@ -113,9 +116,9 @@ extern int32_t tsTelemInterval;
|
||||||
extern char tsTelemServer[];
|
extern char tsTelemServer[];
|
||||||
extern uint16_t tsTelemPort;
|
extern uint16_t tsTelemPort;
|
||||||
extern bool tsEnableCrashReport;
|
extern bool tsEnableCrashReport;
|
||||||
extern char *tsTelemUri;
|
extern char * tsTelemUri;
|
||||||
extern char *tsClientCrashReportUri;
|
extern char * tsClientCrashReportUri;
|
||||||
extern char *tsSvrCrashReportUri;
|
extern char * tsSvrCrashReportUri;
|
||||||
|
|
||||||
// query buffer management
|
// query buffer management
|
||||||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||||
|
|
|
@ -35,12 +35,14 @@ extern "C" {
|
||||||
#define TD_MSG_NUMBER_
|
#define TD_MSG_NUMBER_
|
||||||
#undef TD_MSG_DICT_
|
#undef TD_MSG_DICT_
|
||||||
#undef TD_MSG_INFO_
|
#undef TD_MSG_INFO_
|
||||||
|
#undef TD_MSG_RANGE_CODE_
|
||||||
#undef TD_MSG_SEG_CODE_
|
#undef TD_MSG_SEG_CODE_
|
||||||
#include "tmsgdef.h"
|
#include "tmsgdef.h"
|
||||||
|
|
||||||
#undef TD_MSG_NUMBER_
|
#undef TD_MSG_NUMBER_
|
||||||
#undef TD_MSG_DICT_
|
#undef TD_MSG_DICT_
|
||||||
#undef TD_MSG_INFO_
|
#undef TD_MSG_INFO_
|
||||||
|
#undef TD_MSG_RANGE_CODE_
|
||||||
#define TD_MSG_SEG_CODE_
|
#define TD_MSG_SEG_CODE_
|
||||||
#include "tmsgdef.h"
|
#include "tmsgdef.h"
|
||||||
|
|
||||||
|
@ -48,33 +50,31 @@ extern "C" {
|
||||||
#undef TD_MSG_DICT_
|
#undef TD_MSG_DICT_
|
||||||
#undef TD_MSG_INFO_
|
#undef TD_MSG_INFO_
|
||||||
#undef TD_MSG_SEG_CODE_
|
#undef TD_MSG_SEG_CODE_
|
||||||
|
#undef TD_MSG_RANGE_CODE_
|
||||||
#include "tmsgdef.h"
|
#include "tmsgdef.h"
|
||||||
|
|
||||||
extern char* tMsgInfo[];
|
extern char* tMsgInfo[];
|
||||||
extern int32_t tMsgDict[];
|
extern int32_t tMsgDict[];
|
||||||
|
extern int32_t tMsgRangeDict[];
|
||||||
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
|
|
||||||
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
|
|
||||||
#define TMSG_INFO(TYPE) \
|
|
||||||
((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
|
|
||||||
(TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG) || \
|
|
||||||
(TYPE) < TDMT_VND_STREAM_MSG || (TYPE) < TDMT_VND_TMQ_MSG || (TYPE) < TDMT_VND_TMQ_MAX_MSG \
|
|
||||||
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
|
|
||||||
: 0
|
|
||||||
|
|
||||||
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
|
|
||||||
|
|
||||||
typedef uint16_t tmsg_t;
|
typedef uint16_t tmsg_t;
|
||||||
|
|
||||||
|
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
|
||||||
|
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
|
||||||
|
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
|
||||||
|
|
||||||
static inline bool tmsgIsValid(tmsg_t type) {
|
static inline bool tmsgIsValid(tmsg_t type) {
|
||||||
if (type < TDMT_DND_MAX_MSG || type < TDMT_MND_MAX_MSG || type < TDMT_VND_MAX_MSG || type < TDMT_SCH_MAX_MSG ||
|
// static int8_t sz = sizeof(tMsgRangeDict) / sizeof(tMsgRangeDict[0]);
|
||||||
type < TDMT_STREAM_MAX_MSG || type < TDMT_MON_MAX_MSG || type < TDMT_SYNC_MAX_MSG || type < TDMT_VND_STREAM_MSG ||
|
int8_t maxSegIdx = TMSG_SEG_CODE(TDMT_MAX_MSG);
|
||||||
type < TDMT_VND_TMQ_MSG || type < TDMT_VND_TMQ_MAX_MSG) {
|
int segIdx = TMSG_SEG_CODE(type);
|
||||||
return true;
|
if (segIdx >= 0 && segIdx < maxSegIdx) {
|
||||||
} else {
|
return type < tMsgRangeDict[segIdx];
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define TMSG_INFO(type) (tmsgIsValid(type) ? tMsgInfo[TMSG_INDEX(type)] : "unKnown")
|
||||||
|
|
||||||
static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
||||||
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
|
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
|
||||||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
|
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
|
||||||
|
@ -171,14 +171,14 @@ typedef enum _mgmt_table {
|
||||||
#define TSDB_FILL_PREV 6
|
#define TSDB_FILL_PREV 6
|
||||||
#define TSDB_FILL_NEXT 7
|
#define TSDB_FILL_NEXT 7
|
||||||
|
|
||||||
#define TSDB_ALTER_USER_PASSWD 0x1
|
#define TSDB_ALTER_USER_PASSWD 0x1
|
||||||
#define TSDB_ALTER_USER_SUPERUSER 0x2
|
#define TSDB_ALTER_USER_SUPERUSER 0x2
|
||||||
#define TSDB_ALTER_USER_ENABLE 0x3
|
#define TSDB_ALTER_USER_ENABLE 0x3
|
||||||
#define TSDB_ALTER_USER_SYSINFO 0x4
|
#define TSDB_ALTER_USER_SYSINFO 0x4
|
||||||
#define TSDB_ALTER_USER_ADD_PRIVILEGES 0x5
|
#define TSDB_ALTER_USER_ADD_PRIVILEGES 0x5
|
||||||
#define TSDB_ALTER_USER_DEL_PRIVILEGES 0x6
|
#define TSDB_ALTER_USER_DEL_PRIVILEGES 0x6
|
||||||
#define TSDB_ALTER_USER_ADD_WHITE_LIST 0x7
|
#define TSDB_ALTER_USER_ADD_WHITE_LIST 0x7
|
||||||
#define TSDB_ALTER_USER_DROP_WHITE_LIST 0x8
|
#define TSDB_ALTER_USER_DROP_WHITE_LIST 0x8
|
||||||
|
|
||||||
#define TSDB_KILL_MSG_LEN 30
|
#define TSDB_KILL_MSG_LEN 30
|
||||||
|
|
||||||
|
@ -351,7 +351,7 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_RESTORE_MNODE_STMT,
|
QUERY_NODE_RESTORE_MNODE_STMT,
|
||||||
QUERY_NODE_RESTORE_VNODE_STMT,
|
QUERY_NODE_RESTORE_VNODE_STMT,
|
||||||
QUERY_NODE_PAUSE_STREAM_STMT,
|
QUERY_NODE_PAUSE_STREAM_STMT,
|
||||||
QUERY_NODE_RESUME_STREAM_STMT,
|
QUERY_NODE_RESUME_STREAM_STMT,
|
||||||
QUERY_NODE_CREATE_VIEW_STMT,
|
QUERY_NODE_CREATE_VIEW_STMT,
|
||||||
QUERY_NODE_DROP_VIEW_STMT,
|
QUERY_NODE_DROP_VIEW_STMT,
|
||||||
|
|
||||||
|
@ -795,7 +795,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
||||||
int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
||||||
void tFreeSMDropStbReq(SMDropStbReq *pReq);
|
void tFreeSMDropStbReq(SMDropStbReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_TABLE_FNAME_LEN];
|
char name[TSDB_TABLE_FNAME_LEN];
|
||||||
|
@ -876,18 +876,18 @@ int32_t tSerializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq
|
||||||
int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
|
int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
} SDropUserReq, SDropAcctReq;
|
} SDropUserReq, SDropAcctReq;
|
||||||
|
|
||||||
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||||
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||||
void tFreeSDropUserReq(SDropUserReq *pReq);
|
void tFreeSDropUserReq(SDropUserReq* pReq);
|
||||||
|
|
||||||
typedef struct SIpV4Range{
|
typedef struct SIpV4Range {
|
||||||
uint32_t ip;
|
uint32_t ip;
|
||||||
uint32_t mask;
|
uint32_t mask;
|
||||||
} SIpV4Range;
|
} SIpV4Range;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -897,21 +897,21 @@ typedef struct {
|
||||||
|
|
||||||
SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList);
|
SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList);
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t createType;
|
int8_t createType;
|
||||||
int8_t superUser; // denote if it is a super user or not
|
int8_t superUser; // denote if it is a super user or not
|
||||||
int8_t sysInfo;
|
int8_t sysInfo;
|
||||||
int8_t enable;
|
int8_t enable;
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
char pass[TSDB_USET_PASSWORD_LEN];
|
char pass[TSDB_USET_PASSWORD_LEN];
|
||||||
int32_t numIpRanges;
|
int32_t numIpRanges;
|
||||||
SIpV4Range* pIpRanges;
|
SIpV4Range* pIpRanges;
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
} SCreateUserReq;
|
} SCreateUserReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||||
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||||
void tFreeSCreateUserReq(SCreateUserReq *pReq);
|
void tFreeSCreateUserReq(SCreateUserReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t ver;
|
int64_t ver;
|
||||||
|
@ -938,22 +938,22 @@ int32_t tSerializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq
|
||||||
int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq);
|
int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t alterType;
|
int8_t alterType;
|
||||||
int8_t superUser;
|
int8_t superUser;
|
||||||
int8_t sysInfo;
|
int8_t sysInfo;
|
||||||
int8_t enable;
|
int8_t enable;
|
||||||
int8_t isView;
|
int8_t isView;
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
char pass[TSDB_USET_PASSWORD_LEN];
|
char pass[TSDB_USET_PASSWORD_LEN];
|
||||||
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
||||||
char tabName[TSDB_TABLE_NAME_LEN];
|
char tabName[TSDB_TABLE_NAME_LEN];
|
||||||
char* tagCond;
|
char* tagCond;
|
||||||
int32_t tagCondLen;
|
int32_t tagCondLen;
|
||||||
int32_t numIpRanges;
|
int32_t numIpRanges;
|
||||||
SIpV4Range* pIpRanges;
|
SIpV4Range* pIpRanges;
|
||||||
int64_t privileges;
|
int64_t privileges;
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
} SAlterUserReq;
|
} SAlterUserReq;
|
||||||
|
|
||||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
|
@ -983,9 +983,9 @@ typedef struct {
|
||||||
SHashObj* alterTbs;
|
SHashObj* alterTbs;
|
||||||
SHashObj* readViews;
|
SHashObj* readViews;
|
||||||
SHashObj* writeViews;
|
SHashObj* writeViews;
|
||||||
SHashObj* alterViews;
|
SHashObj* alterViews;
|
||||||
SHashObj* useDbs;
|
SHashObj* useDbs;
|
||||||
int64_t whiteListVer;
|
int64_t whiteListVer;
|
||||||
} SGetUserAuthRsp;
|
} SGetUserAuthRsp;
|
||||||
|
|
||||||
int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp);
|
int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp);
|
||||||
|
@ -1000,8 +1000,8 @@ int32_t tSerializeSGetUserWhiteListReq(void* buf, int32_t bufLen, SGetUserWhiteL
|
||||||
int32_t tDeserializeSGetUserWhiteListReq(void* buf, int32_t bufLen, SGetUserWhiteListReq* pReq);
|
int32_t tDeserializeSGetUserWhiteListReq(void* buf, int32_t bufLen, SGetUserWhiteListReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
int32_t numWhiteLists;
|
int32_t numWhiteLists;
|
||||||
SIpV4Range* pWhiteLists;
|
SIpV4Range* pWhiteLists;
|
||||||
} SGetUserWhiteListRsp;
|
} SGetUserWhiteListRsp;
|
||||||
|
|
||||||
|
@ -1174,8 +1174,8 @@ int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||||
void tFreeSAlterDbReq(SAlterDbReq* pReq);
|
void tFreeSAlterDbReq(SAlterDbReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char db[TSDB_DB_FNAME_LEN];
|
char db[TSDB_DB_FNAME_LEN];
|
||||||
int8_t ignoreNotExists;
|
int8_t ignoreNotExists;
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
} SDropDbReq;
|
} SDropDbReq;
|
||||||
|
@ -1383,7 +1383,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||||
int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||||
void tFreeSCompactDbReq(SCompactDbReq *pReq);
|
void tFreeSCompactDbReq(SCompactDbReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t compactId;
|
int32_t compactId;
|
||||||
|
@ -1870,7 +1870,6 @@ int32_t tSerializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
|
||||||
int32_t tDeserializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
|
int32_t tDeserializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
|
||||||
void tFreeSViewHbRsp(SViewHbRsp* pRsp);
|
void tFreeSViewHbRsp(SViewHbRsp* pRsp);
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t numOfTables;
|
int32_t numOfTables;
|
||||||
int32_t numOfVgroup;
|
int32_t numOfVgroup;
|
||||||
|
@ -2060,7 +2059,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq);
|
void tFreeSRestoreDnodeReq(SRestoreDnodeReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
|
@ -2072,7 +2071,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
||||||
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
||||||
void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq);
|
void tFreeSMCfgDnodeReq(SMCfgDnodeReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char config[TSDB_DNODE_CONFIG_LEN];
|
char config[TSDB_DNODE_CONFIG_LEN];
|
||||||
|
@ -2091,7 +2090,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
|
int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
|
||||||
int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
|
int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
|
||||||
void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq);
|
void tFreeSMCreateQnodeReq(SMCreateQnodeReq* pReq);
|
||||||
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq);
|
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq);
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t replica;
|
int8_t replica;
|
||||||
|
@ -2133,7 +2132,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
||||||
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
||||||
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq);
|
void tFreeSBalanceVgroupReq(SBalanceVgroupReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId1;
|
int32_t vgId1;
|
||||||
|
@ -2154,7 +2153,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
||||||
int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
||||||
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq);
|
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t useless;
|
int32_t useless;
|
||||||
|
@ -2165,7 +2164,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
||||||
int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
||||||
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq);
|
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
|
@ -2557,15 +2556,15 @@ typedef struct {
|
||||||
} SMVSubscribeRsp;
|
} SMVSubscribeRsp;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_TOPIC_FNAME_LEN];
|
char name[TSDB_TOPIC_FNAME_LEN];
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
} SMDropTopicReq;
|
} SMDropTopicReq;
|
||||||
|
|
||||||
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||||
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||||
void tFreeSMDropTopicReq(SMDropTopicReq *pReq);
|
void tFreeSMDropTopicReq(SMDropTopicReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||||
|
@ -3136,8 +3135,8 @@ typedef struct {
|
||||||
} SMqVDeleteRsp;
|
} SMqVDeleteRsp;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_STREAM_FNAME_LEN];
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
} SMDropStreamReq;
|
} SMDropStreamReq;
|
||||||
|
@ -3166,7 +3165,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
|
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
|
||||||
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
|
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
|
||||||
void tFreeSMDropStreamReq(SMDropStreamReq* pReq);
|
void tFreeMDropStreamReq(SMDropStreamReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_STREAM_FNAME_LEN];
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
@ -3325,7 +3324,7 @@ typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
} SVPauseStreamTaskReq, SVResetStreamTaskReq;
|
} SVPauseStreamTaskReq, SVResetStreamTaskReq, SVDropHTaskReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t reserved;
|
int8_t reserved;
|
||||||
|
@ -3974,7 +3973,7 @@ int32_t tDeserializeSCMDropViewReq(void* buf, int32_t bufLen, SCMDropViewReq* pR
|
||||||
void tFreeSCMDropViewReq(SCMDropViewReq* pReq);
|
void tFreeSCMDropViewReq(SCMDropViewReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char fullname[TSDB_VIEW_FNAME_LEN];
|
char fullname[TSDB_VIEW_FNAME_LEN];
|
||||||
} SViewMetaReq;
|
} SViewMetaReq;
|
||||||
int32_t tSerializeSViewMetaReq(void* buf, int32_t bufLen, const SViewMetaReq* pReq);
|
int32_t tSerializeSViewMetaReq(void* buf, int32_t bufLen, const SViewMetaReq* pReq);
|
||||||
int32_t tDeserializeSViewMetaReq(void* buf, int32_t bufLen, SViewMetaReq* pReq);
|
int32_t tDeserializeSViewMetaReq(void* buf, int32_t bufLen, SViewMetaReq* pReq);
|
||||||
|
@ -3996,7 +3995,6 @@ int32_t tSerializeSViewMetaRsp(void* buf, int32_t bufLen, const SViewMetaRsp* pR
|
||||||
int32_t tDeserializeSViewMetaRsp(void* buf, int32_t bufLen, SViewMetaRsp* pRsp);
|
int32_t tDeserializeSViewMetaRsp(void* buf, int32_t bufLen, SViewMetaRsp* pRsp);
|
||||||
void tFreeSViewMetaRsp(SViewMetaRsp* pRsp);
|
void tFreeSViewMetaRsp(SViewMetaRsp* pRsp);
|
||||||
|
|
||||||
|
|
||||||
#pragma pack(pop)
|
#pragma pack(pop)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -24,50 +24,72 @@
|
||||||
|
|
||||||
#if defined(TD_MSG_INFO_)
|
#if defined(TD_MSG_INFO_)
|
||||||
|
|
||||||
#undef TD_NEW_MSG_SEG
|
#undef TD_NEW_MSG_SEG
|
||||||
#undef TD_DEF_MSG_TYPE
|
#undef TD_DEF_MSG_TYPE
|
||||||
#define TD_NEW_MSG_SEG(TYPE) "null",
|
#undef TD_CLOSE_MSG_TYPE
|
||||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) MSG, MSG "-rsp",
|
#define TD_NEW_MSG_SEG(TYPE) "null",
|
||||||
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) MSG, MSG "-rsp",
|
||||||
|
#define TD_CLOSE_MSG_TYPE(TYPE)
|
||||||
|
|
||||||
char *tMsgInfo[] = {
|
char *tMsgInfo[] = {
|
||||||
|
|
||||||
|
#elif defined(TD_MSG_RANGE_CODE_)
|
||||||
|
|
||||||
|
#undef TD_NEW_MSG_SEG
|
||||||
|
#undef TD_DEF_MSG_TYPE
|
||||||
|
#undef TD_CLOSE_MSG_TYPE
|
||||||
|
#define TD_NEW_MSG_SEG(TYPE)
|
||||||
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
|
||||||
|
#define TD_CLOSE_MSG_TYPE(TYPE) TYPE,
|
||||||
|
int32_t tMsgRangeDict[] = {
|
||||||
|
|
||||||
#elif defined(TD_MSG_NUMBER_)
|
#elif defined(TD_MSG_NUMBER_)
|
||||||
|
|
||||||
#undef TD_NEW_MSG_SEG
|
#undef TD_NEW_MSG_SEG
|
||||||
#undef TD_DEF_MSG_TYPE
|
#undef TD_DEF_MSG_TYPE
|
||||||
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
|
#undef TD_CLOSE_MSG_TYPE
|
||||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE##_NUM, TYPE##_RSP_NUM,
|
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
|
||||||
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE##_NUM, TYPE##_RSP_NUM,
|
||||||
|
#define TD_CLOSE_MSG_TYPE(TYPE)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
||||||
#elif defined(TD_MSG_DICT_)
|
#elif defined(TD_MSG_DICT_)
|
||||||
|
|
||||||
#undef TD_NEW_MSG_SEG
|
#undef TD_NEW_MSG_SEG
|
||||||
#undef TD_DEF_MSG_TYPE
|
#undef TD_DEF_MSG_TYPE
|
||||||
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
|
#undef TD_CLOSE_MSG_TYPE
|
||||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
|
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
|
||||||
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
|
||||||
|
#define TD_CLOSE_MSG_TYPE(type)
|
||||||
|
|
||||||
|
int32_t tMsgDict[] = {
|
||||||
|
|
||||||
int32_t tMsgDict[] = {
|
|
||||||
|
|
||||||
#elif defined(TD_MSG_SEG_CODE_)
|
#elif defined(TD_MSG_SEG_CODE_)
|
||||||
|
|
||||||
#undef TD_NEW_MSG_SEG
|
#undef TD_NEW_MSG_SEG
|
||||||
#undef TD_DEF_MSG_TYPE
|
#undef TD_DEF_MSG_TYPE
|
||||||
#define TD_NEW_MSG_SEG(TYPE) TYPE##_SEG_CODE,
|
#undef TD_CLOSE_MSG_TYPE
|
||||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
|
#define TD_NEW_MSG_SEG(TYPE) TYPE##_SEG_CODE,
|
||||||
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
|
||||||
|
#define TD_CLOSE_MSG_TYPE(TYPE)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#undef TD_NEW_MSG_SEG
|
#else
|
||||||
#undef TD_DEF_MSG_TYPE
|
|
||||||
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
|
|
||||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
|
|
||||||
|
|
||||||
enum { // WARN: new msg should be appended to segment tail
|
#undef TD_NEW_MSG_SEG
|
||||||
|
#undef TD_DEF_MSG_TYPE
|
||||||
|
#undef TD_CLOSE_MSG_TYPE
|
||||||
|
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
|
||||||
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
|
||||||
|
#define TD_CLOSE_MSG_TYPE(TYPE) TYPE,
|
||||||
|
|
||||||
|
enum { // WARN: new msg should be appended to segment tail
|
||||||
#endif
|
#endif
|
||||||
TD_NEW_MSG_SEG(TDMT_DND_MSG)
|
TD_NEW_MSG_SEG(TDMT_DND_MSG) // 0<<8
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_DROP_MNODE, "dnode-drop-mnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_DROP_MNODE, "dnode-drop-mnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_QNODE, "dnode-create-qnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_QNODE, "dnode-create-qnode", NULL, NULL)
|
||||||
|
@ -82,12 +104,14 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_UNUSED_CODE, "dnd-unused", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_DND_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_MND_MSG)
|
TD_NEW_MSG_SEG(TDMT_MND_MSG) // 1<<8
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
|
||||||
|
@ -186,6 +210,7 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE, "stream-checkpoint-remain", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)
|
||||||
|
@ -195,8 +220,9 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_MND_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "create-table", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "create-table", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "alter-table", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "alter-table", NULL, NULL)
|
||||||
|
@ -232,7 +258,7 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL)
|
||||||
|
@ -246,8 +272,9 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_QUERY_COMPACT_PROGRESS, "vnode-query-compact-progress", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_QUERY_COMPACT_PROGRESS, "vnode-query-compact-progress", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_KILL_COMPACT, "kill-compact", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_KILL_COMPACT, "kill-compact", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_VND_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_SCH_MSG)
|
TD_NEW_MSG_SEG(TDMT_SCH_MSG) // 3<<8
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_MERGE_QUERY, "merge-query", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_MERGE_QUERY, "merge-query", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY_CONTINUE, "query-continue", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY_CONTINUE, "query-continue", NULL, NULL)
|
||||||
|
@ -260,27 +287,31 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_SCH_MSG)
|
||||||
|
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_STREAM_MSG)
|
TD_NEW_MSG_SEG(TDMT_STREAM_MSG) //4 << 8
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) //1025 1026
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DROP, "stream-task-drop", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DROP, "stream-task-drop", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RUN, "stream-task-run", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RUN, "stream-task-run", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL) //1035 1036
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT_READY, "stream-checkpoint-ready", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT_READY, "stream-checkpoint-ready", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_STREAM_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8
|
||||||
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_MON_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_SYNC_MSG)
|
TD_NEW_MSG_SEG(TDMT_SYNC_MSG) //6 << 8
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT_ELECTION, "sync-elect", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT_ELECTION, "sync-elect", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PING_REPLY, "sync-ping-reply", NULL, NULL) // no longer used
|
TD_DEF_MSG_TYPE(TDMT_SYNC_PING_REPLY, "sync-ping-reply", NULL, NULL) // no longer used
|
||||||
|
@ -310,8 +341,10 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_SYNC_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
|
||||||
|
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG) //7 << 8
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
|
||||||
|
@ -319,8 +352,9 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_VND_STREAM_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG)
|
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG) //8 << 8
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_SUBSCRIBE, "vnode-tmq-subscribe", SMqRebVgReq, SMqRebVgRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_SUBSCRIBE, "vnode-tmq-subscribe", SMqRebVgReq, SMqRebVgRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_DELETE_SUB, "vnode-tmq-delete-sub", SMqVDeleteReq, SMqVDeleteRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_DELETE_SUB, "vnode-tmq-delete-sub", SMqVDeleteReq, SMqVDeleteRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_COMMIT_OFFSET, "vnode-tmq-commit-offset", STqOffset, STqOffset)
|
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_COMMIT_OFFSET, "vnode-tmq-commit-offset", STqOffset, STqOffset)
|
||||||
|
@ -332,9 +366,15 @@ enum { // WARN: new msg should be appended to segment tail
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_COMMITTEDINFO, "vnode-tmq-committedinfo", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_COMMITTEDINFO, "vnode-tmq-committedinfo", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL)
|
||||||
|
TD_CLOSE_MSG_TYPE(TDMT_END_TMQ_MSG)
|
||||||
|
|
||||||
|
TD_NEW_MSG_SEG(TDMT_MAX_MSG) // msg end mark
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#if defined(TD_MSG_NUMBER_)
|
#if defined(TD_MSG_NUMBER_)
|
||||||
TDMT_MAX
|
TDMT_MAX
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
|
@ -45,6 +45,7 @@ typedef struct {
|
||||||
*/
|
*/
|
||||||
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption);
|
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption);
|
||||||
|
|
||||||
|
int32_t sndInit(SSnode * pSnode);
|
||||||
/**
|
/**
|
||||||
* @brief Stop Snode in Dnode.
|
* @brief Stop Snode in Dnode.
|
||||||
*
|
*
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TDENGINE_TQ_COMMON_H
|
||||||
|
#define TDENGINE_TQ_COMMON_H
|
||||||
|
|
||||||
|
// message process
|
||||||
|
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart);
|
||||||
|
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored);
|
||||||
|
int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessScanHistoryFinishReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
||||||
|
int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader, bool restored);
|
||||||
|
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen);
|
||||||
|
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
||||||
|
int32_t startStreamTasks(SStreamMeta* pMeta);
|
||||||
|
int32_t resetStreamTaskStatus(SStreamMeta* pMeta);
|
||||||
|
|
||||||
|
#endif // TDENGINE_TQ_COMMON_H
|
|
@ -150,19 +150,6 @@ typedef struct {
|
||||||
int32_t colNum;
|
int32_t colNum;
|
||||||
} SMetaStbStats;
|
} SMetaStbStats;
|
||||||
|
|
||||||
// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
|
||||||
// int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
|
||||||
// int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
|
||||||
// int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
|
||||||
// bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
|
|
||||||
// bool tqCurrentBlockConsumed(const STqReader* pReader);
|
|
||||||
// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
|
|
||||||
// bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
|
|
||||||
// bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
|
|
||||||
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
|
|
||||||
// *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
|
|
||||||
// *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
|
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
/*-------------------------------------------------new api format---------------------------------------------------*/
|
/*-------------------------------------------------new api format---------------------------------------------------*/
|
||||||
typedef struct TsdReader {
|
typedef struct TsdReader {
|
||||||
|
@ -197,27 +184,6 @@ typedef struct SStoreCacheReader {
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
/*------------------------------------------------------------------------------------------------------------------*/
|
/*------------------------------------------------------------------------------------------------------------------*/
|
||||||
/*
|
|
||||||
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
|
||||||
int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
|
||||||
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
|
||||||
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
|
||||||
bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
|
|
||||||
bool tqCurrentBlockConsumed(const STqReader* pReader);
|
|
||||||
|
|
||||||
int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
|
|
||||||
bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
|
|
||||||
bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
|
|
||||||
|
|
||||||
int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char* idstr);
|
|
||||||
STqReader *tqReaderOpen(void *pVnode);
|
|
||||||
void tqReaderClose(STqReader *);
|
|
||||||
|
|
||||||
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
|
|
||||||
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
|
|
||||||
SWalReader* tqGetWalReader(STqReader* pReader);
|
|
||||||
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
|
|
||||||
*/
|
|
||||||
// todo rename
|
// todo rename
|
||||||
typedef struct SStoreTqReader {
|
typedef struct SStoreTqReader {
|
||||||
struct STqReader* (*tqReaderOpen)();
|
struct STqReader* (*tqReaderOpen)();
|
||||||
|
@ -281,28 +247,18 @@ typedef struct SStoreMeta {
|
||||||
|
|
||||||
void* (*storeGetIndexInfo)();
|
void* (*storeGetIndexInfo)();
|
||||||
void* (*getInvertIndex)(void* pVnode);
|
void* (*getInvertIndex)(void* pVnode);
|
||||||
int32_t (*getChildTableList)(
|
// support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
|
||||||
void* pVnode, int64_t suid,
|
int32_t (*getChildTableList)( void* pVnode, int64_t suid, SArray* list);
|
||||||
SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
|
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList);
|
||||||
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
|
|
||||||
void* storeGetVersionRange;
|
void* storeGetVersionRange;
|
||||||
void* storeGetLastTimestamp;
|
void* storeGetLastTimestamp;
|
||||||
|
|
||||||
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
|
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
|
||||||
|
int32_t (*getNumOfChildTables)( void* pVnode, int64_t uid, int64_t* numOfTables, int32_t* numOfCols);
|
||||||
|
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables);
|
||||||
|
|
||||||
// db name, vgId, numOfTables, numOfSTables
|
|
||||||
int32_t (*getNumOfChildTables)(
|
|
||||||
void* pVnode, int64_t uid, int64_t* numOfTables,
|
|
||||||
int32_t* numOfCols); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
|
|
||||||
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
|
|
||||||
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
|
|
||||||
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
|
|
||||||
int64_t (*getNumOfRowsInMem)(void* pVnode);
|
int64_t (*getNumOfRowsInMem)(void* pVnode);
|
||||||
/**
|
|
||||||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
|
||||||
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
|
||||||
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
|
|
||||||
*/
|
|
||||||
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
|
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
|
||||||
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
|
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
|
||||||
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
|
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
|
||||||
|
@ -388,11 +344,15 @@ typedef struct SStateStore {
|
||||||
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||||
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||||
|
int32_t (*streamStateSessionAllocWinBuffByNextPosition)(SStreamState* pState, SStreamStateCur* pCur,
|
||||||
|
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||||
|
|
||||||
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
|
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
|
||||||
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
|
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
|
||||||
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
|
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
|
||||||
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
|
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
|
||||||
|
bool (*isIncrementalTimeStamp)(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
|
||||||
|
|
||||||
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
|
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
|
||||||
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
|
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
|
||||||
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);
|
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);
|
||||||
|
|
|
@ -449,6 +449,7 @@ typedef struct SVnodeModifyOpStmt {
|
||||||
SHashObj* pSubTableHashObj; // SHashObj<table_name, STableMeta*>
|
SHashObj* pSubTableHashObj; // SHashObj<table_name, STableMeta*>
|
||||||
SHashObj* pTableNameHashObj; // set of table names for refreshing meta, sync mode
|
SHashObj* pTableNameHashObj; // set of table names for refreshing meta, sync mode
|
||||||
SHashObj* pDbFNameHashObj; // set of db names for refreshing meta, sync mode
|
SHashObj* pDbFNameHashObj; // set of db names for refreshing meta, sync mode
|
||||||
|
SHashObj* pTableCxtHashObj; // temp SHashObj<tuid, STableDataCxt*> for single request
|
||||||
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
|
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
|
||||||
SVCreateTbReq* pCreateTblReq;
|
SVCreateTbReq* pCreateTblReq;
|
||||||
TdFilePtr fp;
|
TdFilePtr fp;
|
||||||
|
|
|
@ -35,6 +35,7 @@ int32_t streamStateBegin(SStreamState* pState);
|
||||||
int32_t streamStateCommit(SStreamState* pState);
|
int32_t streamStateCommit(SStreamState* pState);
|
||||||
void streamStateDestroy(SStreamState* pState, bool remove);
|
void streamStateDestroy(SStreamState* pState, bool remove);
|
||||||
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
|
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
|
||||||
|
int32_t streamStateDelTaskDb(SStreamState* pState);
|
||||||
|
|
||||||
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||||
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
|
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
|
||||||
|
@ -57,6 +58,8 @@ int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
|
||||||
int32_t streamStateSessionClear(SStreamState* pState);
|
int32_t streamStateSessionClear(SStreamState* pState);
|
||||||
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||||
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||||
|
int32_t streamStateSessionAllocWinBuffByNextPosition(SStreamState* pState, SStreamStateCur* pCur,
|
||||||
|
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||||
|
|
||||||
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
|
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
|
||||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
|
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
|
||||||
|
@ -66,6 +69,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, cons
|
||||||
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||||
|
|
||||||
|
// fill
|
||||||
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
|
||||||
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||||
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
|
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
|
||||||
|
@ -130,4 +134,4 @@ char* streamStateIntervalDump(SStreamState* pState);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* ifndef _STREAM_STATE_H_ */
|
#endif /* ifndef _STREAM_STATE_H_ */
|
|
@ -34,28 +34,33 @@ extern "C" {
|
||||||
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
|
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
|
||||||
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
|
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
|
||||||
|
|
||||||
|
#define TASK_DOWNSTREAM_READY 0x0
|
||||||
|
#define TASK_DOWNSTREAM_NOT_READY 0x1
|
||||||
|
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
|
||||||
|
#define TASK_UPSTREAM_NEW_STAGE 0x3
|
||||||
|
|
||||||
#define TASK_DOWNSTREAM_READY 0x0
|
#define NODE_ROLE_UNINIT 0x1
|
||||||
#define TASK_DOWNSTREAM_NOT_READY 0x1
|
#define NODE_ROLE_LEADER 0x2
|
||||||
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
|
#define NODE_ROLE_FOLLOWER 0x3
|
||||||
#define TASK_UPSTREAM_NEW_STAGE 0x3
|
|
||||||
|
|
||||||
#define NODE_ROLE_UNINIT 0x1
|
#define HAS_RELATED_FILLHISTORY_TASK(_t) ((_t)->hTaskInfo.id.taskId != 0)
|
||||||
#define NODE_ROLE_LEADER 0x2
|
|
||||||
#define NODE_ROLE_FOLLOWER 0x3
|
|
||||||
|
|
||||||
#define HAS_RELATED_FILLHISTORY_TASK(_t) ((_t)->hTaskInfo.id.taskId != 0)
|
|
||||||
#define CLEAR_RELATED_FILLHISTORY_TASK(_t) \
|
#define CLEAR_RELATED_FILLHISTORY_TASK(_t) \
|
||||||
do { \
|
do { \
|
||||||
(_t)->hTaskInfo.id.taskId = 0; \
|
(_t)->hTaskInfo.id.taskId = 0; \
|
||||||
(_t)->hTaskInfo.id.streamId = 0; \
|
(_t)->hTaskInfo.id.streamId = 0; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1)
|
||||||
|
#define STREAM_EXEC_START_ALL_TASKS_ID (-2)
|
||||||
|
#define STREAM_EXEC_RESTART_ALL_TASKS_ID (-3)
|
||||||
|
|
||||||
typedef struct SStreamTask SStreamTask;
|
typedef struct SStreamTask SStreamTask;
|
||||||
typedef struct SStreamQueue SStreamQueue;
|
typedef struct SStreamQueue SStreamQueue;
|
||||||
typedef struct SStreamTaskSM SStreamTaskSM;
|
typedef struct SStreamTaskSM SStreamTaskSM;
|
||||||
|
|
||||||
#define SSTREAM_TASK_VER 2
|
#define SSTREAM_TASK_VER 2
|
||||||
|
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
|
||||||
|
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
STREAM_STATUS__NORMAL = 0,
|
STREAM_STATUS__NORMAL = 0,
|
||||||
|
@ -107,6 +112,7 @@ typedef enum {
|
||||||
TASK_LEVEL__SOURCE = 1,
|
TASK_LEVEL__SOURCE = 1,
|
||||||
TASK_LEVEL__AGG,
|
TASK_LEVEL__AGG,
|
||||||
TASK_LEVEL__SINK,
|
TASK_LEVEL__SINK,
|
||||||
|
TASK_LEVEL_SMA,
|
||||||
} ETASK_LEVEL;
|
} ETASK_LEVEL;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -140,8 +146,6 @@ typedef enum EStreamTaskEvent {
|
||||||
TASK_EVENT_RESUME = 0x9,
|
TASK_EVENT_RESUME = 0x9,
|
||||||
TASK_EVENT_HALT = 0xA,
|
TASK_EVENT_HALT = 0xA,
|
||||||
TASK_EVENT_DROPPING = 0xB,
|
TASK_EVENT_DROPPING = 0xB,
|
||||||
TASK_EVENT_SCAN_TSDB = 0xC,
|
|
||||||
TASK_EVENT_SCAN_WAL = 0xD,
|
|
||||||
} EStreamTaskEvent;
|
} EStreamTaskEvent;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -161,7 +165,7 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
int64_t ver;
|
int64_t ver;
|
||||||
SArray* submits; // SArray<SPackedSubmit>
|
SArray* submits; // SArray<SPackedSubmit>
|
||||||
} SStreamMergedSubmit;
|
} SStreamMergedSubmit;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -212,9 +216,6 @@ int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
|
||||||
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
|
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int32_t streamInit();
|
|
||||||
void streamCleanUp();
|
|
||||||
|
|
||||||
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
|
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
|
||||||
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
|
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
|
||||||
|
|
||||||
|
@ -253,7 +254,7 @@ typedef struct {
|
||||||
} SScanhistoryDataInfo;
|
} SScanhistoryDataInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t idleDuration; // idle time before use time slice the continue execute scan-history
|
int32_t idleDuration; // idle time before use time slice the continue execute scan-history
|
||||||
int32_t numOfTicks;
|
int32_t numOfTicks;
|
||||||
tmr_h pTimer;
|
tmr_h pTimer;
|
||||||
int32_t execCount;
|
int32_t execCount;
|
||||||
|
@ -303,10 +304,16 @@ typedef struct SStreamTaskId {
|
||||||
typedef struct SCheckpointInfo {
|
typedef struct SCheckpointInfo {
|
||||||
int64_t startTs;
|
int64_t startTs;
|
||||||
int64_t checkpointId;
|
int64_t checkpointId;
|
||||||
int64_t checkpointVer; // latest checkpointId version
|
|
||||||
int64_t processedVer; // already processed ver, that has generated results version.
|
int64_t checkpointVer; // latest checkpointId version
|
||||||
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
int64_t processedVer;
|
||||||
int64_t failedId; // record the latest failed checkpoint id
|
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
||||||
|
int64_t failedId; // record the latest failed checkpoint id
|
||||||
|
int64_t checkpointingId;
|
||||||
|
int32_t downstreamAlignNum;
|
||||||
|
int32_t checkpointNotReadyTasks;
|
||||||
|
bool dispatchCheckpointTrigger;
|
||||||
|
int64_t msgVer;
|
||||||
} SCheckpointInfo;
|
} SCheckpointInfo;
|
||||||
|
|
||||||
typedef struct SStreamStatus {
|
typedef struct SStreamStatus {
|
||||||
|
@ -333,22 +340,22 @@ typedef struct SSTaskBasicInfo {
|
||||||
int32_t selfChildId;
|
int32_t selfChildId;
|
||||||
int32_t totalLevel;
|
int32_t totalLevel;
|
||||||
int8_t taskLevel;
|
int8_t taskLevel;
|
||||||
int8_t fillHistory; // is fill history task or not
|
int8_t fillHistory; // is fill history task or not
|
||||||
int64_t triggerParam; // in msec
|
int64_t triggerParam; // in msec
|
||||||
} SSTaskBasicInfo;
|
} SSTaskBasicInfo;
|
||||||
|
|
||||||
typedef struct SStreamDispatchReq SStreamDispatchReq;
|
typedef struct SStreamDispatchReq SStreamDispatchReq;
|
||||||
typedef struct STokenBucket STokenBucket;
|
typedef struct STokenBucket STokenBucket;
|
||||||
typedef struct SMetaHbInfo SMetaHbInfo;
|
typedef struct SMetaHbInfo SMetaHbInfo;
|
||||||
|
|
||||||
typedef struct SDispatchMsgInfo {
|
typedef struct SDispatchMsgInfo {
|
||||||
SStreamDispatchReq* pData; // current dispatch data
|
SStreamDispatchReq* pData; // current dispatch data
|
||||||
int8_t dispatchMsgType;
|
int8_t dispatchMsgType;
|
||||||
int16_t msgType; // dispatch msg type
|
int16_t msgType; // dispatch msg type
|
||||||
int32_t retryCount; // retry send data count
|
int32_t retryCount; // retry send data count
|
||||||
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
|
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
|
||||||
SArray* pRetryList; // current dispatch successfully completed node of downstream
|
SArray* pRetryList; // current dispatch successfully completed node of downstream
|
||||||
void* pTimer; // used to dispatch data after a given time duration
|
void* pTimer; // used to dispatch data after a given time duration
|
||||||
} SDispatchMsgInfo;
|
} SDispatchMsgInfo;
|
||||||
|
|
||||||
typedef struct STaskQueue {
|
typedef struct STaskQueue {
|
||||||
|
@ -357,8 +364,8 @@ typedef struct STaskQueue {
|
||||||
} STaskQueue;
|
} STaskQueue;
|
||||||
|
|
||||||
typedef struct STaskSchedInfo {
|
typedef struct STaskSchedInfo {
|
||||||
int8_t status;
|
int8_t status;
|
||||||
void* pTimer;
|
void* pTimer;
|
||||||
} STaskSchedInfo;
|
} STaskSchedInfo;
|
||||||
|
|
||||||
typedef struct SSinkRecorder {
|
typedef struct SSinkRecorder {
|
||||||
|
@ -393,6 +400,7 @@ typedef struct SHistoryTaskInfo {
|
||||||
int32_t retryTimes;
|
int32_t retryTimes;
|
||||||
int32_t waitInterval;
|
int32_t waitInterval;
|
||||||
int64_t haltVer; // offset in wal when halt the stream task
|
int64_t haltVer; // offset in wal when halt the stream task
|
||||||
|
bool operatorOpen; // false by default
|
||||||
} SHistoryTaskInfo;
|
} SHistoryTaskInfo;
|
||||||
|
|
||||||
typedef struct STaskOutputInfo {
|
typedef struct STaskOutputInfo {
|
||||||
|
@ -445,12 +453,11 @@ struct SStreamTask {
|
||||||
int64_t checkReqId;
|
int64_t checkReqId;
|
||||||
SArray* checkReqIds; // shuffle
|
SArray* checkReqIds; // shuffle
|
||||||
int32_t refCnt;
|
int32_t refCnt;
|
||||||
int64_t checkpointingId;
|
|
||||||
int32_t checkpointAlignCnt;
|
|
||||||
int32_t checkpointNotReadyTasks;
|
|
||||||
int32_t transferStateAlignCnt;
|
int32_t transferStateAlignCnt;
|
||||||
struct SStreamMeta* pMeta;
|
struct SStreamMeta* pMeta;
|
||||||
SSHashObj* pNameMap;
|
SSHashObj* pNameMap;
|
||||||
|
void* pBackend;
|
||||||
|
int64_t backendRefId;
|
||||||
char reserve[256];
|
char reserve[256];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -458,9 +465,9 @@ typedef struct STaskStartInfo {
|
||||||
int64_t startTs;
|
int64_t startTs;
|
||||||
int64_t readyTs;
|
int64_t readyTs;
|
||||||
int32_t tasksWillRestart;
|
int32_t tasksWillRestart;
|
||||||
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
|
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
|
||||||
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||||
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
|
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
|
||||||
int64_t elapsedTime;
|
int64_t elapsedTime;
|
||||||
} STaskStartInfo;
|
} STaskStartInfo;
|
||||||
|
|
||||||
|
@ -488,20 +495,25 @@ typedef struct SStreamMeta {
|
||||||
int32_t walScanCounter;
|
int32_t walScanCounter;
|
||||||
void* streamBackend;
|
void* streamBackend;
|
||||||
int64_t streamBackendRid;
|
int64_t streamBackendRid;
|
||||||
SHashObj* pTaskBackendUnique;
|
SHashObj* pTaskDbUnique;
|
||||||
TdThreadMutex backendMutex;
|
TdThreadMutex backendMutex;
|
||||||
SMetaHbInfo* pHbInfo;
|
SMetaHbInfo* pHbInfo;
|
||||||
STaskUpdateInfo updateInfo;
|
STaskUpdateInfo updateInfo;
|
||||||
|
SHashObj* pUpdateTaskSet;
|
||||||
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
|
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
|
||||||
int32_t numOfPausedTasks;
|
int32_t numOfPausedTasks;
|
||||||
int32_t chkptNotReadyTasks;
|
|
||||||
int64_t rid;
|
int64_t rid;
|
||||||
|
|
||||||
int64_t chkpId;
|
int64_t chkpId;
|
||||||
|
int32_t chkpCap;
|
||||||
SArray* chkpSaved;
|
SArray* chkpSaved;
|
||||||
SArray* chkpInUse;
|
SArray* chkpInUse;
|
||||||
int32_t chkpCap;
|
|
||||||
SRWLatch chkpDirLock;
|
SRWLatch chkpDirLock;
|
||||||
|
|
||||||
|
void* qHandle;
|
||||||
|
int32_t pauseTaskNum;
|
||||||
|
|
||||||
|
void* bkdChkptMgt;
|
||||||
} SStreamMeta;
|
} SStreamMeta;
|
||||||
|
|
||||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||||
|
@ -533,7 +545,7 @@ struct SStreamDispatchReq {
|
||||||
int64_t stage; // nodeId from upstream task
|
int64_t stage; // nodeId from upstream task
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
int32_t msgId; // msg id to identify if the incoming msg from the same sender
|
int32_t msgId; // msg id to identify if the incoming msg from the same sender
|
||||||
int32_t srcVgId;
|
int32_t srcVgId;
|
||||||
int32_t upstreamTaskId;
|
int32_t upstreamTaskId;
|
||||||
int32_t upstreamChildId;
|
int32_t upstreamChildId;
|
||||||
|
@ -592,7 +604,7 @@ typedef struct {
|
||||||
int32_t downstreamNodeId;
|
int32_t downstreamNodeId;
|
||||||
int32_t downstreamTaskId;
|
int32_t downstreamTaskId;
|
||||||
int32_t childId;
|
int32_t childId;
|
||||||
int32_t oldStage;
|
int64_t oldStage;
|
||||||
int8_t status;
|
int8_t status;
|
||||||
} SStreamTaskCheckRsp;
|
} SStreamTaskCheckRsp;
|
||||||
|
|
||||||
|
@ -657,24 +669,28 @@ int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointRea
|
||||||
typedef struct STaskStatusEntry {
|
typedef struct STaskStatusEntry {
|
||||||
STaskId id;
|
STaskId id;
|
||||||
int32_t status;
|
int32_t status;
|
||||||
int32_t stage;
|
int32_t statusLastDuration; // to record the last duration of current status
|
||||||
|
int64_t stage;
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
int64_t verStart; // start version in WAL, only valid for source task
|
int64_t verStart; // start version in WAL, only valid for source task
|
||||||
int64_t verEnd; // end version in WAL, only valid for source task
|
int64_t verEnd; // end version in WAL, only valid for source task
|
||||||
int64_t processedVer; // only valid for source task
|
int64_t processedVer; // only valid for source task
|
||||||
|
int32_t relatedHTask; // has related fill-history task
|
||||||
int64_t activeCheckpointId; // current active checkpoint id
|
int64_t activeCheckpointId; // current active checkpoint id
|
||||||
bool checkpointFailed; // denote if the checkpoint is failed or not
|
bool checkpointFailed; // denote if the checkpoint is failed or not
|
||||||
|
bool inputQChanging; // inputQ is changing or not
|
||||||
|
int64_t inputQUnchangeCounter;
|
||||||
double inputQUsed; // in MiB
|
double inputQUsed; // in MiB
|
||||||
double inputRate;
|
double inputRate;
|
||||||
double sinkQuota; // existed quota size for sink task
|
double sinkQuota; // existed quota size for sink task
|
||||||
double sinkDataSize; // sink to dest data size
|
double sinkDataSize; // sink to dst data size
|
||||||
} STaskStatusEntry;
|
} STaskStatusEntry;
|
||||||
|
|
||||||
typedef struct SStreamHbMsg {
|
typedef struct SStreamHbMsg {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
int32_t numOfTasks;
|
int32_t numOfTasks;
|
||||||
SArray* pTaskStatus; // SArray<STaskStatusEntry>
|
SArray* pTaskStatus; // SArray<STaskStatusEntry>
|
||||||
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
|
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
|
||||||
} SStreamHbMsg;
|
} SStreamHbMsg;
|
||||||
|
|
||||||
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
|
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
|
||||||
|
@ -698,7 +714,7 @@ typedef struct SNodeUpdateInfo {
|
||||||
} SNodeUpdateInfo;
|
} SNodeUpdateInfo;
|
||||||
|
|
||||||
typedef struct SStreamTaskNodeUpdateMsg {
|
typedef struct SStreamTaskNodeUpdateMsg {
|
||||||
int32_t transId; // to identify the msg
|
int32_t transId; // to identify the msg
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
SArray* pNodeList; // SArray<SNodeUpdateInfo>
|
SArray* pNodeList; // SArray<SNodeUpdateInfo>
|
||||||
|
@ -755,12 +771,13 @@ const char* streamTaskGetStatusStr(ETaskStatus status);
|
||||||
void streamTaskResetStatus(SStreamTask* pTask);
|
void streamTaskResetStatus(SStreamTask* pTask);
|
||||||
void streamTaskSetStatusReady(SStreamTask* pTask);
|
void streamTaskSetStatusReady(SStreamTask* pTask);
|
||||||
|
|
||||||
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
||||||
|
|
||||||
// recover and fill history
|
// recover and fill history
|
||||||
void streamTaskCheckDownstream(SStreamTask* pTask);
|
void streamTaskCheckDownstream(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage);
|
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage,
|
||||||
|
int64_t* oldStage);
|
||||||
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
||||||
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
||||||
bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
|
bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
|
||||||
|
@ -786,18 +803,17 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer)
|
||||||
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
||||||
|
|
||||||
// common
|
// common
|
||||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||||
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||||
void streamTaskResume(SStreamTask* pTask);
|
void streamTaskResume(SStreamTask* pTask);
|
||||||
void streamTaskEnablePause(SStreamTask* pTask);
|
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
||||||
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||||
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||||
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask);
|
||||||
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask);
|
int32_t streamTaskReleaseState(SStreamTask* pTask);
|
||||||
int32_t streamTaskReleaseState(SStreamTask* pTask);
|
int32_t streamTaskReloadState(SStreamTask* pTask);
|
||||||
int32_t streamTaskReloadState(SStreamTask* pTask);
|
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
||||||
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
||||||
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
|
||||||
|
|
||||||
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
||||||
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
||||||
|
@ -806,7 +822,7 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
||||||
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||||
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||||
SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st);
|
SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st);
|
||||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||||
|
|
||||||
// agg level
|
// agg level
|
||||||
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, SRpcHandleInfo* pInfo);
|
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, SRpcHandleInfo* pInfo);
|
||||||
|
@ -828,12 +844,14 @@ int32_t streamMetaReopen(SStreamMeta* pMeta);
|
||||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||||
|
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta);
|
||||||
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
||||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||||
|
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
|
||||||
void streamMetaStartHb(SStreamMeta* pMeta);
|
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||||
void streamMetaInitForSnode(SStreamMeta* pMeta);
|
|
||||||
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs, int64_t endTs, bool succ);
|
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
||||||
|
int64_t endTs, bool ready);
|
||||||
void streamMetaRLock(SStreamMeta* pMeta);
|
void streamMetaRLock(SStreamMeta* pMeta);
|
||||||
void streamMetaRUnLock(SStreamMeta* pMeta);
|
void streamMetaRUnLock(SStreamMeta* pMeta);
|
||||||
void streamMetaWLock(SStreamMeta* pMeta);
|
void streamMetaWLock(SStreamMeta* pMeta);
|
||||||
|
@ -844,7 +862,7 @@ void streamMetaResetStartInfo(STaskStartInfo* pMeta);
|
||||||
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||||
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
|
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
|
||||||
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
|
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
|
||||||
void streamTaskClearCheckInfo(SStreamTask* pTask);
|
void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg);
|
||||||
int32_t streamAlignTransferState(SStreamTask* pTask);
|
int32_t streamAlignTransferState(SStreamTask* pTask);
|
||||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
|
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
|
||||||
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
|
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
|
||||||
|
@ -852,8 +870,10 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
|
||||||
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
|
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
|
||||||
int8_t isSucceed);
|
int8_t isSucceed);
|
||||||
|
|
||||||
|
SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask);
|
||||||
|
void* streamDestroyStateMachine(SStreamTaskSM* pSM);
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* ifndef _STREAM_H_ */
|
#endif /* ifndef _STREAM_H_ */
|
|
@ -76,8 +76,10 @@ int32_t getRowStateRowSize(SStreamFileState* pFileState);
|
||||||
int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen);
|
int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen);
|
||||||
int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||||
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||||
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen);
|
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void* key, size_t keyLen);
|
||||||
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
|
||||||
|
int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStreamStateCur* pCur,
|
||||||
|
const SSessionKey* pWinKey, void** ppVal, int32_t* pVLen);
|
||||||
|
|
||||||
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
|
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
|
||||||
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
|
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
|
||||||
|
|
|
@ -55,6 +55,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *p
|
||||||
int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
|
int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
|
||||||
void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count);
|
void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count);
|
||||||
void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count);
|
void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count);
|
||||||
|
bool isIncrementalTimeStamp(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,8 +45,8 @@ extern "C" {
|
||||||
|
|
||||||
#define SYNC_HEARTBEAT_SLOW_MS 1500
|
#define SYNC_HEARTBEAT_SLOW_MS 1500
|
||||||
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
||||||
#define SYNC_SNAP_RESEND_MS 1000 * 60
|
#define SYNC_SNAP_RESEND_MS 1000 * 300
|
||||||
#define SYNC_SNAP_TIMEOUT_MS 1000 * 600
|
#define SYNC_SNAP_TIMEOUT_MS 1000 * 1800
|
||||||
|
|
||||||
#define SYNC_VND_COMMIT_MIN_MS 3000
|
#define SYNC_VND_COMMIT_MIN_MS 3000
|
||||||
|
|
||||||
|
|
|
@ -54,15 +54,18 @@ extern "C" {
|
||||||
|
|
||||||
typedef struct TdFile *TdFilePtr;
|
typedef struct TdFile *TdFilePtr;
|
||||||
|
|
||||||
#define TD_FILE_CREATE 0x0001
|
#define TD_FILE_CREATE 0x0001
|
||||||
#define TD_FILE_WRITE 0x0002
|
#define TD_FILE_WRITE 0x0002
|
||||||
#define TD_FILE_READ 0x0004
|
#define TD_FILE_READ 0x0004
|
||||||
#define TD_FILE_TRUNC 0x0008
|
#define TD_FILE_TRUNC 0x0008
|
||||||
#define TD_FILE_APPEND 0x0010
|
#define TD_FILE_APPEND 0x0010
|
||||||
#define TD_FILE_TEXT 0x0020
|
#define TD_FILE_TEXT 0x0020
|
||||||
#define TD_FILE_AUTO_DEL 0x0040
|
#define TD_FILE_AUTO_DEL 0x0040
|
||||||
#define TD_FILE_EXCL 0x0080
|
#define TD_FILE_EXCL 0x0080
|
||||||
#define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile
|
#define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile
|
||||||
|
#define TD_FILE_WRITE_THROUGH 0x0200
|
||||||
|
#define TD_FILE_CLOEXEC 0x0400
|
||||||
|
|
||||||
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions);
|
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions);
|
||||||
TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions);
|
TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions);
|
||||||
|
|
||||||
|
|
|
@ -124,6 +124,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_INVALID_CFG_VALUE TAOS_DEF_ERROR_CODE(0, 0x0133)
|
#define TSDB_CODE_INVALID_CFG_VALUE TAOS_DEF_ERROR_CODE(0, 0x0133)
|
||||||
|
|
||||||
#define TSDB_CODE_IP_NOT_IN_WHITE_LIST TAOS_DEF_ERROR_CODE(0, 0x0134)
|
#define TSDB_CODE_IP_NOT_IN_WHITE_LIST TAOS_DEF_ERROR_CODE(0, 0x0134)
|
||||||
|
#define TSDB_CODE_FAILED_TO_CONNECT_S3 TAOS_DEF_ERROR_CODE(0, 0x0135)
|
||||||
|
|
||||||
//client
|
//client
|
||||||
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
|
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
|
||||||
|
|
|
@ -139,6 +139,8 @@ int32_t getWordLength(char type);
|
||||||
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
|
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
|
||||||
int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output);
|
int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output);
|
||||||
int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output);
|
int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output);
|
||||||
|
int32_t tsDecompressTimestampAvx512(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
|
||||||
|
int32_t tsDecompressTimestampAvx2(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
|
||||||
|
|
||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
* STREAM COMPRESSION
|
* STREAM COMPRESSION
|
||||||
|
|
|
@ -305,7 +305,7 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_SYNC_APPLYQ_SIZE_LIMIT 512
|
#define TSDB_SYNC_APPLYQ_SIZE_LIMIT 512
|
||||||
#define TSDB_SYNC_NEGOTIATION_WIN 512
|
#define TSDB_SYNC_NEGOTIATION_WIN 512
|
||||||
|
|
||||||
#define TSDB_SYNC_SNAP_BUFFER_SIZE 2048
|
#define TSDB_SYNC_SNAP_BUFFER_SIZE 1024
|
||||||
|
|
||||||
#define TSDB_TBNAME_COLUMN_INDEX (-1)
|
#define TSDB_TBNAME_COLUMN_INDEX (-1)
|
||||||
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
|
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
|
||||||
|
|
|
@ -66,6 +66,7 @@ extern int32_t udfDebugFlag;
|
||||||
extern int32_t smaDebugFlag;
|
extern int32_t smaDebugFlag;
|
||||||
extern int32_t idxDebugFlag;
|
extern int32_t idxDebugFlag;
|
||||||
extern int32_t tdbDebugFlag;
|
extern int32_t tdbDebugFlag;
|
||||||
|
extern int32_t sndDebugFlag;
|
||||||
|
|
||||||
int32_t taosInitLog(const char *logName, int32_t maxFiles);
|
int32_t taosInitLog(const char *logName, int32_t maxFiles);
|
||||||
void taosCloseLog();
|
void taosCloseLog();
|
||||||
|
|
|
@ -1140,3 +1140,5 @@ elif [ "$verType" == "client" ]; then
|
||||||
else
|
else
|
||||||
echo "please input correct verType"
|
echo "please input correct verType"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ function install_bin() {
|
||||||
if [ "$osType" != "Darwin" ]; then
|
if [ "$osType" != "Darwin" ]; then
|
||||||
[ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || :
|
[ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || :
|
||||||
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || :
|
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || :
|
||||||
[ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || :
|
[ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || :
|
||||||
fi
|
fi
|
||||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript2} || :
|
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript2} || :
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -281,6 +281,11 @@ fi
|
||||||
chmod a+x ${install_dir}/install.sh
|
chmod a+x ${install_dir}/install.sh
|
||||||
|
|
||||||
if [[ $dbName == "taos" ]]; then
|
if [[ $dbName == "taos" ]]; then
|
||||||
|
cp ${top_dir}/../enterprise/packaging/start-all.sh ${install_dir}
|
||||||
|
cp ${top_dir}/../enterprise/packaging/stop-all.sh ${install_dir}
|
||||||
|
cp ${top_dir}/../enterprise/packaging/README.md ${install_dir}
|
||||||
|
chmod a+x ${install_dir}/start-all.sh
|
||||||
|
chmod a+x ${install_dir}/stop-all.sh
|
||||||
# Copy example code
|
# Copy example code
|
||||||
mkdir -p ${install_dir}/examples
|
mkdir -p ${install_dir}/examples
|
||||||
examples_dir="${top_dir}/examples"
|
examples_dir="${top_dir}/examples"
|
||||||
|
@ -360,12 +365,6 @@ if [ "$verMode" == "cluster" ]; then
|
||||||
git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust
|
git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust
|
||||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||||
|
|
||||||
cp ${top_dir}/../enterprise/packaging/start-all.sh ${install_dir}
|
|
||||||
cp ${top_dir}/../enterprise/packaging/stop-all.sh ${install_dir}
|
|
||||||
cp ${top_dir}/../enterprise/packaging/README.md ${install_dir}
|
|
||||||
chmod a+x ${install_dir}/start-all.sh
|
|
||||||
chmod a+x ${install_dir}/stop-all.sh
|
|
||||||
|
|
||||||
# copy taosx
|
# copy taosx
|
||||||
if [ -d ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ]; then
|
if [ -d ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ]; then
|
||||||
cp -r ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ${install_dir}
|
cp -r ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ${install_dir}
|
||||||
|
@ -443,4 +442,4 @@ if [ -n "${taostools_bin_files}" ] && [ "$verMode" != "cloud" ]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd ${curr_dir}
|
cd ${curr_dir}
|
||||||
|
|
|
@ -38,3 +38,4 @@ source /etc/profile
|
||||||
${csudo}mkdir -p ${corePath} ||:
|
${csudo}mkdir -p ${corePath} ||:
|
||||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
||||||
|
${csudo}echo "kernel.core_pattern = ${corePath}/core_%e-%p" >> /etc/sysctl.conf ||:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue