Merge branch '3.0' into fix/TD-32563
This commit is contained in:
commit
226a766a17
|
@ -0,0 +1,58 @@
|
|||
name: TaosKeeper CI
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- tools/keeper/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run unit tests
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y build-essential cmake libgeos-dev
|
||||
|
||||
- name: Install TDengine
|
||||
run: |
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
sudo echo "supportVnodes 256" >> taos.cfg
|
||||
nohup sudo taosd -c taos.cfg &
|
||||
|
||||
- name: Start taosadapter
|
||||
run: nohup sudo taosadapter &
|
||||
|
||||
- name: Run tests with coverage
|
||||
working-directory: tools/keeper
|
||||
run: |
|
||||
go mod tidy
|
||||
sudo go test -v -ldflags="-X 'github.com/taosdata/taoskeeper/version.IsEnterprise=true'" -coverpkg=./... -coverprofile=coverage.out ./...
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if pgrep taosd; then sudo pkill taosd; fi
|
||||
if pgrep taosadapter; then sudo pkill taosadapter; fi
|
|
@ -121,6 +121,7 @@ TAGS
|
|||
contrib/*
|
||||
!contrib/CMakeLists.txt
|
||||
!contrib/test
|
||||
!contrib/azure-cmake
|
||||
sql
|
||||
debug*/
|
||||
.env
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
project(
|
||||
TDengine
|
||||
VERSION 3.0
|
||||
DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)"
|
||||
TDengine
|
||||
VERSION 3.0
|
||||
DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)"
|
||||
)
|
||||
|
||||
if (NOT DEFINED TD_SOURCE_DIR)
|
||||
set( TD_SOURCE_DIR ${PROJECT_SOURCE_DIR} )
|
||||
if(NOT DEFINED TD_SOURCE_DIR)
|
||||
set(TD_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||
|
@ -15,13 +15,11 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
|||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||
|
||||
|
||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
|
||||
|
||||
# contrib
|
||||
add_subdirectory(contrib)
|
||||
|
||||
|
@ -33,8 +31,8 @@ target_include_directories(api INTERFACE "include/client")
|
|||
|
||||
# src
|
||||
if(${BUILD_TEST})
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
add_subdirectory(source)
|
||||
|
@ -44,5 +42,5 @@ add_subdirectory(examples/c)
|
|||
add_subdirectory(tests)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# docs
|
||||
# docs
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
|
|
@ -348,7 +348,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
|
|||
|
||||
# 成为社区贡献者
|
||||
|
||||
点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
|
||||
点击 [这里](https://www.taosdata.com/contributor),了解如何成为 TDengine 的贡献者。
|
||||
|
||||
# 加入技术交流群
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# azure
|
||||
ExternalProject_Add(azure
|
||||
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
#BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -1,8 +1,9 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
set(TD_BUILD_KEEPER_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
# set output directory
|
||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
|
||||
SET(TD_TESTS_OUTPUT_DIR ${PROJECT_BINARY_DIR}/test)
|
||||
|
@ -12,204 +13,227 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
|||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||
|
||||
if (NOT DEFINED TD_GRANT)
|
||||
SET(TD_GRANT FALSE)
|
||||
endif()
|
||||
IF(NOT DEFINED TD_GRANT)
|
||||
SET(TD_GRANT FALSE)
|
||||
ENDIF()
|
||||
|
||||
IF (NOT DEFINED BUILD_WITH_RAND_ERR)
|
||||
SET(BUILD_WITH_RAND_ERR FALSE)
|
||||
ELSE ()
|
||||
SET(BUILD_WITH_RAND_ERR TRUE)
|
||||
endif()
|
||||
IF(NOT DEFINED BUILD_WITH_RAND_ERR)
|
||||
SET(BUILD_WITH_RAND_ERR FALSE)
|
||||
ELSE()
|
||||
SET(BUILD_WITH_RAND_ERR TRUE)
|
||||
ENDIF()
|
||||
|
||||
IF ("${WEBSOCKET}" MATCHES "true")
|
||||
IF("${WEBSOCKET}" MATCHES "true")
|
||||
SET(TD_WEBSOCKET TRUE)
|
||||
MESSAGE("Enable websocket")
|
||||
ADD_DEFINITIONS(-DWEBSOCKET)
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(TD_WEBSOCKET FALSE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF ("${BUILD_HTTP}" STREQUAL "")
|
||||
IF (TD_LINUX)
|
||||
IF (TD_ARM_32)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
IF("${BUILD_HTTP}" STREQUAL "")
|
||||
IF(TD_LINUX)
|
||||
IF(TD_ARM_32)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF()
|
||||
ELSEIF(TD_DARWIN)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF()
|
||||
ELSEIF(${BUILD_HTTP} MATCHES "false")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
ELSEIF(${BUILD_HTTP} MATCHES "true")
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE ()
|
||||
ELSEIF(${BUILD_HTTP} MATCHES "internal")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "false")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "true")
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "internal")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF (TD_BUILD_HTTP)
|
||||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||
ENDIF ()
|
||||
IF(TD_BUILD_HTTP)
|
||||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||
ENDIF()
|
||||
|
||||
IF ("${BUILD_TOOLS}" STREQUAL "")
|
||||
IF (TD_LINUX)
|
||||
IF (TD_ARM_32)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSEIF (TD_ARM_64)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE ()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE ()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
IF("${BUILD_KEEPER}" STREQUAL "")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ELSEIF(${BUILD_KEEPER} MATCHES "false")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ELSEIF(${BUILD_KEEPER} MATCHES "true")
|
||||
SET(TD_BUILD_KEEPER TRUE)
|
||||
ELSEIF(${BUILD_KEEPER} MATCHES "internal")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
SET(TD_BUILD_KEEPER_INTERNAL TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ENDIF()
|
||||
|
||||
IF ("${BUILD_TOOLS}" MATCHES "false")
|
||||
IF("${BUILD_TOOLS}" STREQUAL "")
|
||||
IF(TD_LINUX)
|
||||
IF(TD_ARM_32)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSEIF(TD_ARM_64)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF()
|
||||
ELSEIF(TD_DARWIN)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF("${BUILD_TOOLS}" MATCHES "false")
|
||||
MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}")
|
||||
SET(TD_TAOS_TOOLS FALSE)
|
||||
ELSE ()
|
||||
ELSE()
|
||||
MESSAGE("")
|
||||
MESSAGE("${Green} Will build taos_tools! ${ColourReset}")
|
||||
MESSAGE("")
|
||||
SET(TD_TAOS_TOOLS TRUE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF (${TD_WINDOWS})
|
||||
IF(${TD_WINDOWS})
|
||||
SET(TAOS_LIB taos_static)
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(TAOS_LIB taos)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
# build TSZ by default
|
||||
IF ("${TSZ_ENABLED}" MATCHES "false")
|
||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
|
||||
IF("${TSZ_ENABLED}" MATCHES "false")
|
||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty")
|
||||
ELSE()
|
||||
# define add
|
||||
MESSAGE(STATUS "build with TSZ enabled")
|
||||
ADD_DEFINITIONS(-DTD_TSZ)
|
||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
|
||||
# define add
|
||||
MESSAGE(STATUS "build with TSZ enabled")
|
||||
ADD_DEFINITIONS(-DTD_TSZ)
|
||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz")
|
||||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
IF (${CMAKE_BUILD_TYPE} MATCHES "Release")
|
||||
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
||||
|
||||
ELSE ()
|
||||
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
IF(TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
|
||||
IF(${CMAKE_BUILD_TYPE} MATCHES "Release")
|
||||
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
||||
|
||||
ELSE()
|
||||
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
ENDIF()
|
||||
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_C)
|
||||
IF(CMAKE_DEPFILE_FLAGS_C)
|
||||
SET(CMAKE_DEPFILE_FLAGS_C "")
|
||||
ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_CXX)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_DEPFILE_FLAGS_CXX)
|
||||
SET(CMAKE_DEPFILE_FLAGS_CXX "")
|
||||
ENDIF ()
|
||||
IF (CMAKE_C_FLAGS_DEBUG)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_C_FLAGS_DEBUG)
|
||||
SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
||||
ENDIF ()
|
||||
IF (CMAKE_CXX_FLAGS_DEBUG)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_CXX_FLAGS_DEBUG)
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
||||
ELSE ()
|
||||
IF (${TD_DARWIN})
|
||||
ELSE()
|
||||
IF(${TD_DARWIN})
|
||||
set(CMAKE_MACOSX_RPATH 0)
|
||||
ENDIF ()
|
||||
IF (${COVER} MATCHES "true")
|
||||
ENDIF()
|
||||
|
||||
IF(${COVER} MATCHES "true")
|
||||
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
||||
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
# disable all assert
|
||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
||||
IF((${DISABLE_ASSERT} MATCHES "true") OR(${DISABLE_ASSERTS} MATCHES "true"))
|
||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||
MESSAGE(STATUS "Disable all asserts")
|
||||
ENDIF()
|
||||
|
||||
INCLUDE(CheckCCompilerFlag)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
|
||||
IF(TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_SSE42 false)
|
||||
ELSEIF (("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR ("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
||||
ELSEIF(("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
||||
SET(COMPILER_SUPPORT_SSE42 true)
|
||||
MESSAGE(STATUS "Always enable sse4.2 for Clang/AppleClang")
|
||||
ELSE()
|
||||
CHECK_C_COMPILER_FLAG("-msse4.2" COMPILER_SUPPORT_SSE42)
|
||||
ENDIF()
|
||||
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_FMA false)
|
||||
SET(COMPILER_SUPPORT_AVX false)
|
||||
SET(COMPILER_SUPPORT_AVX2 false)
|
||||
SET(COMPILER_SUPPORT_AVX512F false)
|
||||
SET(COMPILER_SUPPORT_AVX512BMI false)
|
||||
SET(COMPILER_SUPPORT_AVX512VL false)
|
||||
IF(TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_FMA false)
|
||||
SET(COMPILER_SUPPORT_AVX false)
|
||||
SET(COMPILER_SUPPORT_AVX2 false)
|
||||
SET(COMPILER_SUPPORT_AVX512F false)
|
||||
SET(COMPILER_SUPPORT_AVX512BMI false)
|
||||
SET(COMPILER_SUPPORT_AVX512VL false)
|
||||
ELSE()
|
||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
|
||||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
|
||||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||
ENDIF()
|
||||
|
||||
IF (COMPILER_SUPPORT_SSE42)
|
||||
IF(COMPILER_SUPPORT_SSE42)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||
ENDIF()
|
||||
|
||||
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
||||
IF (COMPILER_SUPPORT_FMA)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||
IF("${SIMD_SUPPORT}" MATCHES "true")
|
||||
IF(COMPILER_SUPPORT_FMA)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||
MESSAGE(STATUS "FMA instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
IF(COMPILER_SUPPORT_AVX)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
MESSAGE(STATUS "AVX instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
IF(COMPILER_SUPPORT_AVX2)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
||||
MESSAGE(STATUS "AVX2 instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
MESSAGE(STATUS "FMA instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_AVX)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
MESSAGE(STATUS "AVX instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_AVX2)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
||||
MESSAGE(STATUS "AVX2 instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
IF ("${SIMD_AVX512_SUPPORT}" MATCHES "true")
|
||||
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||
IF("${SIMD_AVX512_SUPPORT}" MATCHES "true")
|
||||
IF(COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
||||
MESSAGE(STATUS "avx512f/avx512bmi enabled by compiler")
|
||||
ENDIF()
|
||||
|
||||
IF (COMPILER_SUPPORT_AVX512VL)
|
||||
IF(COMPILER_SUPPORT_AVX512VL)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
|
||||
MESSAGE(STATUS "avx512vl enabled by compiler")
|
||||
MESSAGE(STATUS "avx512vl enabled by compiler")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
|
@ -217,16 +241,17 @@ ELSE ()
|
|||
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
|
||||
IF (${BUILD_SANITIZER})
|
||||
IF(${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
|
||||
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF(${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
|
|
@ -144,6 +144,12 @@ option(
|
|||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_ANALYSIS
|
||||
"If build with analysis"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
IF(NOT TD_ENTERPRISE)
|
||||
|
@ -151,8 +157,15 @@ MESSAGE("switch s3 off with community version")
|
|||
set(BUILD_S3 OFF)
|
||||
set(BUILD_WITH_S3 OFF)
|
||||
set(BUILD_WITH_COS OFF)
|
||||
set(BUILD_WITH_ANALYSIS OFF)
|
||||
ENDIF ()
|
||||
|
||||
IF(${BUILD_WITH_ANALYSIS})
|
||||
message("build with analysis")
|
||||
set(BUILD_S3 ON)
|
||||
set(BUILD_WITH_S3 ON)
|
||||
ENDIF()
|
||||
|
||||
IF(${BUILD_S3})
|
||||
|
||||
IF(${BUILD_WITH_S3})
|
||||
|
|
|
@ -9,6 +9,5 @@ ExternalProject_Add(stub
|
|||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -10,39 +10,36 @@ if(${BUILD_WITH_S3})
|
|||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
||||
|
||||
elseif(${BUILD_WITH_COS})
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
if(${BUILD_WITH_COS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
endif(${BUILD_WITH_COS})
|
||||
if(${BUILD_WITH_COS})
|
||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
endif()
|
||||
|
||||
|
||||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
|
@ -59,7 +56,7 @@ endif()
|
|||
# taosadapter
|
||||
if(${BUILD_HTTP})
|
||||
MESSAGE("BUILD_HTTP is on")
|
||||
else ()
|
||||
else()
|
||||
MESSAGE("BUILD_HTTP is off, use taosAdapter")
|
||||
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
@ -110,19 +107,18 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|||
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
# xz
|
||||
#cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
#lzma2
|
||||
# lzma2
|
||||
cat("${TD_SUPPORT_DIR}/lzma_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
|
||||
if (${BUILD_CONTRIB})
|
||||
if(${BUILD_CONTRIB})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif()
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
if(NOT ${TD_LINUX})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
|
@ -134,9 +130,9 @@ else()
|
|||
endif()
|
||||
endif()
|
||||
|
||||
#cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
#libuv
|
||||
# libuv
|
||||
if(${BUILD_WITH_UV})
|
||||
cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_UV})
|
||||
|
@ -152,17 +148,17 @@ if(${BUILD_WITH_S3})
|
|||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_S3)
|
||||
|
||||
# cos
|
||||
elseif(${BUILD_WITH_COS})
|
||||
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
|
||||
endif()
|
||||
|
||||
# crashdump
|
||||
|
@ -191,9 +187,9 @@ endif()
|
|||
# download dependencies
|
||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
# ================================================================================================
|
||||
# Build
|
||||
|
@ -206,25 +202,27 @@ if(${BUILD_TEST})
|
|||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src>
|
||||
)
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
target_include_directories(
|
||||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_win>
|
||||
)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
if(${TD_LINUX})
|
||||
target_include_directories(
|
||||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_linux>
|
||||
)
|
||||
endif(${TD_LINUX})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_include_directories(
|
||||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
|
||||
)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
# cJson
|
||||
|
@ -236,15 +234,16 @@ option(CJSON_BUILD_SHARED_LIBS "Overrides BUILD_SHARED_LIBS if CJSON_OVERRIDE_BU
|
|||
add_subdirectory(cJson EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cjson
|
||||
|
||||
# see https://stackoverflow.com/questions/25676277/cmake-target-include-directories-prints-an-error-when-i-try-to-add-the-source
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cJson>
|
||||
)
|
||||
unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
||||
|
||||
# xml2
|
||||
#if(${BUILD_WITH_S3})
|
||||
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
||||
#endif()
|
||||
# if(${BUILD_WITH_S3})
|
||||
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
||||
# endif()
|
||||
|
||||
# lz4
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
|
@ -255,16 +254,26 @@ target_include_directories(
|
|||
|
||||
# zlib
|
||||
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
||||
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=deprecated-non-prototype")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-non-prototype")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
add_subdirectory(zlib EXCLUDE_FROM_ALL)
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(zlibstatic PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
target_include_directories(
|
||||
zlibstatic
|
||||
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib
|
||||
)
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(zlib PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif()
|
||||
target_include_directories(
|
||||
zlib
|
||||
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib
|
||||
|
@ -274,9 +283,9 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
|||
|
||||
# add_subdirectory(xz EXCLUDE_FROM_ALL)
|
||||
# target_include_directories(
|
||||
# xz
|
||||
# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz
|
||||
# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz
|
||||
# xz
|
||||
# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz
|
||||
# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz
|
||||
# )
|
||||
|
||||
# leveldb
|
||||
|
@ -291,24 +300,27 @@ endif(${BUILD_WITH_LEVELDB})
|
|||
|
||||
# rocksdb
|
||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||
if (${BUILD_WITH_UV})
|
||||
if(${BUILD_WITH_UV})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
endif (${BUILD_WITH_UV})
|
||||
endif(${BUILD_WITH_UV})
|
||||
|
||||
if (${BUILD_WITH_ROCKSDB})
|
||||
if (${BUILD_CONTRIB})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
if(${BUILD_CONTRIB})
|
||||
if(${TD_LINUX})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
|
||||
MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||
|
||||
|
@ -316,22 +328,23 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
if(${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
if(${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||
option(WITH_JNI "" OFF)
|
||||
|
||||
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
endif()
|
||||
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
if(${TD_DARWIN})
|
||||
option(HAVE_THREAD_LOCAL "" OFF)
|
||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||
|
@ -357,30 +370,32 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
if(NOT ${TD_LINUX})
|
||||
MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
if(${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
if(${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||
option(WITH_JNI "" OFF)
|
||||
|
||||
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
endif()
|
||||
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
if(${TD_DARWIN})
|
||||
option(HAVE_THREAD_LOCAL "" OFF)
|
||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||
|
@ -406,44 +421,44 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(${BUILD_WITH_S3})
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include)
|
||||
MESSAGE("build with s3: ${BUILD_WITH_S3}")
|
||||
|
||||
# cos
|
||||
elseif(${BUILD_WITH_COS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
#MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
# ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
|
||||
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cos_c_sdk
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
|
||||
)
|
||||
# MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
|
||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||
else()
|
||||
|
||||
endif(${TD_LINUX})
|
||||
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cos_c_sdk
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
|
||||
)
|
||||
|
||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||
else()
|
||||
endif(${TD_LINUX})
|
||||
endif()
|
||||
|
||||
# pthread
|
||||
if(${BUILD_PTHREAD})
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
|
||||
add_definitions(-DPTW32_STATIC_LIB)
|
||||
add_subdirectory(pthread EXCLUDE_FROM_ALL)
|
||||
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
|
||||
|
@ -451,16 +466,15 @@ if(${BUILD_PTHREAD})
|
|||
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
||||
endif()
|
||||
|
||||
|
||||
# jemalloc
|
||||
if(${JEMALLOC_ENABLED})
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(jemalloc
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls
|
||||
BUILD_COMMAND ${MAKE}
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls
|
||||
BUILD_COMMAND ${MAKE}
|
||||
)
|
||||
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
|
||||
endif()
|
||||
|
@ -514,12 +528,13 @@ endif(${BUILD_WCWIDTH})
|
|||
|
||||
# LIBUV
|
||||
if(${BUILD_WITH_UV})
|
||||
if (TD_WINDOWS)
|
||||
if(TD_WINDOWS)
|
||||
# There is no GetHostNameW function on win7.
|
||||
file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT)
|
||||
string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}")
|
||||
file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
add_subdirectory(libuv EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_WITH_UV})
|
||||
|
||||
|
@ -535,6 +550,7 @@ if(${BUILD_WITH_SQLITE})
|
|||
INTERFACE m
|
||||
INTERFACE pthread
|
||||
)
|
||||
|
||||
if(NOT TD_WINDOWS)
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE dl
|
||||
|
@ -545,36 +561,38 @@ endif(${BUILD_WITH_SQLITE})
|
|||
# addr2line
|
||||
if(${BUILD_ADDR2LINE})
|
||||
if(NOT ${TD_WINDOWS})
|
||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
||||
check_include_file( "string.h" HAVE_STRING_H )
|
||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
||||
check_include_file( "stdint.h" HAVE_STDINT_H )
|
||||
check_include_file( "unistd.h" HAVE_UNISTD_H )
|
||||
check_include_file( "sgidefs.h" HAVE_SGIDEFS_H )
|
||||
check_include_file( "stdafx.h" HAVE_STDAFX_H )
|
||||
check_include_file( "elf.h" HAVE_ELF_H )
|
||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file( "alloca.h" HAVE_ALLOCA_H )
|
||||
check_include_file( "elfaccess.h" HAVE_ELFACCESS_H)
|
||||
check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H )
|
||||
check_include_file( "sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H)
|
||||
check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||
check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H )
|
||||
check_include_file("sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file("sys/stat.h" HAVE_SYS_STAT_H)
|
||||
check_include_file("inttypes.h" HAVE_INTTYPES_H)
|
||||
check_include_file("stddef.h" HAVE_STDDEF_H)
|
||||
check_include_file("stdlib.h" HAVE_STDLIB_H)
|
||||
check_include_file("string.h" HAVE_STRING_H)
|
||||
check_include_file("memory.h" HAVE_MEMORY_H)
|
||||
check_include_file("strings.h" HAVE_STRINGS_H)
|
||||
check_include_file("stdint.h" HAVE_STDINT_H)
|
||||
check_include_file("unistd.h" HAVE_UNISTD_H)
|
||||
check_include_file("sgidefs.h" HAVE_SGIDEFS_H)
|
||||
check_include_file("stdafx.h" HAVE_STDAFX_H)
|
||||
check_include_file("elf.h" HAVE_ELF_H)
|
||||
check_include_file("libelf.h" HAVE_LIBELF_H)
|
||||
check_include_file("libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file("alloca.h" HAVE_ALLOCA_H)
|
||||
check_include_file("elfaccess.h" HAVE_ELFACCESS_H)
|
||||
check_include_file("sys/elf_386.h" HAVE_SYS_ELF_386_H)
|
||||
check_include_file("sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H)
|
||||
check_include_file("sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||
check_include_file("sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H)
|
||||
set(VERSION 0.3.1)
|
||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||
configure_file(libdwarf/cmake/config.h.cmake config.h)
|
||||
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
|
||||
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
|
||||
set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf")
|
||||
|
||||
if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H)
|
||||
target_link_libraries(libdwarf PUBLIC libelf)
|
||||
endif()
|
||||
|
||||
target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR})
|
||||
file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT)
|
||||
string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}")
|
||||
|
@ -583,7 +601,7 @@ if(${BUILD_ADDR2LINE})
|
|||
file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}")
|
||||
add_library(addr2line STATIC "addr2line/addr2line.c")
|
||||
target_link_libraries(addr2line PUBLIC libdwarf dl z)
|
||||
target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf" )
|
||||
target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf")
|
||||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
|
@ -592,31 +610,41 @@ if(${BUILD_GEOS})
|
|||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
|
||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||
if (${TD_WINDOWS})
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||
else ()
|
||||
else()
|
||||
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
target_include_directories(
|
||||
geos_c
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||
)
|
||||
endif(${BUILD_GEOS})
|
||||
|
||||
if (${BUILD_PCRE2})
|
||||
if(${BUILD_PCRE2})
|
||||
add_subdirectory(pcre2 EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_PCRE2})
|
||||
|
||||
if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
# ================================================================================================
|
||||
# Build test
|
||||
# ================================================================================================
|
||||
MESSAGE("build with dependency tests: ${BUILD_DEPENDENCY_TESTS}")
|
||||
|
||||
if(${BUILD_DEPENDENCY_TESTS})
|
||||
add_subdirectory(test EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_DEPENDENCY_TESTS})
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
# lib_azure_sdk
|
||||
set(AZURE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1")
|
||||
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||
|
||||
file(GLOB AZURE_SDK_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/credentials/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||
${AZURE_SDK_SRC}
|
||||
)
|
||||
|
||||
set(AZURE_SDK_INCLUDES
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
||||
)
|
||||
|
||||
add_library(_azure_sdk STATIC ${AZURE_SDK_UNIFIED_SRC})
|
||||
target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER)
|
||||
|
||||
target_include_directories(
|
||||
_azure_sdk
|
||||
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CURL_LIBRARY curl)
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CoreFoundation_Library CoreFoundation)
|
||||
# find_library(SystemConfiguration_Library SystemConfiguration)
|
||||
target_link_libraries(
|
||||
_azure_sdk
|
||||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
|
||||
# PRIVATE xml2
|
||||
PRIVATE zlib
|
||||
|
||||
# PRIVATE ${CoreFoundation_Library}
|
||||
# PRIVATE ${SystemConfiguration_Library}
|
||||
)
|
||||
|
||||
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
||||
if(TARGET OpenSSL::SSL)
|
||||
target_link_libraries(_azure_sdk PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
|
||||
# Originally, on Windows azure-core is built with winhttp by default
|
||||
if(TARGET td_contrib::curl)
|
||||
target_link_libraries(_azure_sdk PRIVATE td_contrib::curl)
|
||||
endif()
|
||||
|
||||
target_include_directories(_azure_sdk SYSTEM BEFORE PUBLIC ${AZURE_SDK_INCLUDES})
|
||||
add_library(td_contrib::azure_sdk ALIAS _azure_sdk)
|
|
@ -28,5 +28,6 @@ if(${BUILD_WITH_TRAFT})
|
|||
# add_subdirectory(traft)
|
||||
endif(${BUILD_WITH_TRAFT})
|
||||
|
||||
add_subdirectory(azure)
|
||||
add_subdirectory(tdev)
|
||||
add_subdirectory(lz4)
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED True)
|
||||
|
||||
add_executable(
|
||||
azure-test
|
||||
main.cpp
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CoreFoundation_Library CoreFoundation)
|
||||
# find_library(SystemConfiguration_Library SystemConfiguration)
|
||||
target_link_libraries(
|
||||
azure-test
|
||||
PRIVATE _azure_sdk
|
||||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE dl
|
||||
PRIVATE pthread
|
||||
)
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
// Include the necessary SDK headers
|
||||
#include <azure/core.hpp>
|
||||
#include <azure/storage/blobs.hpp>
|
||||
|
||||
// Add appropriate using namespace directives
|
||||
using namespace Azure::Storage;
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
// Secrets should be stored & retrieved from secure locations such as Azure::KeyVault. For
|
||||
// convenience and brevity of samples, the secrets are retrieved from environment variables.
|
||||
|
||||
std::string GetEndpointUrl() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_URL");
|
||||
std::string accountId = getenv("ablob_account_id");
|
||||
if (accountId.empty()) {
|
||||
return accountId;
|
||||
}
|
||||
|
||||
return accountId + ".blob.core.windows.net";
|
||||
}
|
||||
|
||||
std::string GetAccountName() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_NAME");
|
||||
return getenv("ablob_account_id");
|
||||
}
|
||||
|
||||
std::string GetAccountKey() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_KEY");
|
||||
|
||||
return getenv("ablob_account_secret");
|
||||
}
|
||||
|
||||
int main() {
|
||||
std::string endpointUrl = GetEndpointUrl();
|
||||
std::string accountName = GetAccountName();
|
||||
std::string accountKey = GetAccountKey();
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
std::string accountURL = "https://fd2d01cd892f844eeaa2273.blob.core.windows.net";
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = "myblobcontainer";
|
||||
// auto containerClient = blobServiceClient.GetBlobContainerClient("myblobcontainer");
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient("td-test");
|
||||
|
||||
// Create the container if it does not exist
|
||||
std::cout << "Creating container: " << containerName << std::endl;
|
||||
// containerClient.CreateIfNotExists();
|
||||
|
||||
std::string blobName = "blob.txt";
|
||||
uint8_t blobContent[] = "Hello Azure!";
|
||||
// Create the block blob client
|
||||
BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||
|
||||
// Upload the blob
|
||||
std::cout << "Uploading blob: " << blobName << std::endl;
|
||||
blobClient.UploadFrom(blobContent, sizeof(blobContent));
|
||||
/*
|
||||
auto blockBlobClient = BlockBlobClient(endpointUrl, sharedKeyCredential);
|
||||
|
||||
// Create some data to upload into the blob.
|
||||
std::vector<uint8_t> data = {1, 2, 3, 4};
|
||||
Azure::Core::IO::MemoryBodyStream stream(data);
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobResult> response = blockBlobClient.Upload(stream);
|
||||
|
||||
Models::UploadBlockBlobResult model = response.Value;
|
||||
std::cout << "Last modified date of uploaded blob: " << model.LastModified.ToString()
|
||||
<< std::endl;
|
||||
*/
|
||||
} catch (const Azure::Core::RequestFailedException& e) {
|
||||
std::cout << "Status Code: " << static_cast<int>(e.StatusCode) << ", Reason Phrase: " << e.ReasonPhrase
|
||||
<< std::endl;
|
||||
std::cout << e.what() << std::endl;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -46,7 +46,7 @@ For more details on features, please read through the entire documentation.
|
|||
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases with the following advantages.
|
||||
|
||||
- **[High-Performance](https://tdengine.com/high-performance/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
- **[High-Performance](https://tdengine.com/high-performance/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while outperforming other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
- **[Simplified Solution](https://tdengine.com/comprehensive-industrial-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
|
|
|
@ -75,9 +75,9 @@ taos>
|
|||
|
||||
## TDegnine Graphic User Interface
|
||||
|
||||
From TDengine 3.3.0.0, there is a new componenet called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features.
|
||||
From TDengine 3.3.0.0, there is a new component called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features.
|
||||
|
||||
To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. When you use it the first time, you need to register with your enterprise email, then can logon using your user name and password in the TDengine database management system.
|
||||
To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. The default username and password to log in to the TDengine Database Management System is "root/taosdata".
|
||||
|
||||
## Test data insert performance
|
||||
|
||||
|
|
|
@ -364,6 +364,9 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
|
||||
|
||||
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value.
|
||||
|
||||
- **scalingFactor**: Floating-point precision enhancement factor, which takes effect only when the data type is float/double. It has a valid range of positive integers from 1 to 1,000,000. It is used to enhance the precision of generated floating-point numbers, particularly when the min or max values are small. This property enhances the precision after the decimal point by powers of 10: scalingFactor of 10 indicates an enhancement of 1 decimal precision, 100 indicates an enhancement of 2 decimal precision, and so on.
|
||||
|
||||
- **fun**: This column of data is filled with functions. Currently, only the sin and cos functions are supported. The input parameter is the timestamp and converted to an angle value. The conversion formula is: angle x=input time column ts value % 360. At the same time, it supports coefficient adjustment and random fluctuation factor adjustment, presented in a fixed format expression, such as fun="10\*sin(x)+100\*random(5)", where x represents the angle, ranging from 0 to 360 degrees, and the growth step size is consistent with the time column step size. 10 represents the coefficient of multiplication, 100 represents the coefficient of addition or subtraction, and 5 represents the fluctuation range within a random range of 5%. The currently supported data types are int, bigint, float, and double. Note: The expression is fixed and cannot be reversed.
|
||||
|
||||
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
||||
|
|
|
@ -1187,7 +1187,7 @@ CSUM(expr)
|
|||
### DERIVATIVE
|
||||
|
||||
```sql
|
||||
DERIVATIVE(expr, time_inerval, ignore_negative)
|
||||
DERIVATIVE(expr, time_interval, ignore_negative)
|
||||
|
||||
ignore_negative: {
|
||||
0
|
||||
|
|
|
@ -80,7 +80,7 @@ These pseudocolumns occur after the aggregation clause.
|
|||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||
|
||||
1. NONE: No fill (the default fill mode)
|
||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`.
|
||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note that only exprs in select list that contains normal cols need to specify fill value, exprs like `_wstart`, `_wend`, `_wduration`, `_wstart + 1a`, `now`, `1+1`, partition keys like tbname(when using partition by) don't need to specify fill value. But exprs like `timediff(last(ts), _wstart)` need to specify fill value.
|
||||
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
|
|
|
@ -27,11 +27,15 @@ The preceding SQL command shows all dnodes in the cluster with the ID, endpoint,
|
|||
## Delete a DNODE
|
||||
|
||||
```sql
|
||||
DROP DNODE dnode_id
|
||||
DROP DNODE dnode_id [force] [unsafe]
|
||||
```
|
||||
|
||||
Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted.
|
||||
|
||||
Only online node is allowed to be deleted. Drop is executed with force option if the offline node need to be deleted.
|
||||
|
||||
Drop is executed with unsafe option if the node with single replica is offline, and the data on it is not able to be restored.
|
||||
|
||||
## Modify Dnode Configuration
|
||||
|
||||
```sql
|
||||
|
|
|
@ -41,7 +41,7 @@ In this article, it specifically refers to the level within the secondary compre
|
|||
### Create Table with Compression
|
||||
|
||||
```sql
|
||||
CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other cerate_definition]...])
|
||||
CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other create_definition]...])
|
||||
```
|
||||
|
||||
**Parameter Description**
|
||||
|
@ -58,7 +58,7 @@ CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compr
|
|||
### Change Compression Method
|
||||
|
||||
```sql
|
||||
ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'ecode_type'] [COMPRESS 'compress_type'] [LEVEL "high"]
|
||||
ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'encode_type'] [COMPRESS 'compress_type'] [LEVEL "high"]
|
||||
```
|
||||
|
||||
**Parameter Description**
|
||||
|
|
|
@ -125,7 +125,7 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.
|
|||
Starting from `TDengine 3.0.3.0`, `taosAdapter` provides a configuration parameter `httpCodeServerError` to set whether to return a non-200 http status code when the C interface returns an error
|
||||
|
||||
| **Description** | **httpCodeServerError false** | **httpCodeServerError true** |
|
||||
|--------------------|---------------------------- ------|---------------------------------------|
|
||||
|--------------------|----------------------------------|---------------------------------------|
|
||||
| taos_errno() returns 0 | 200 | 200 |
|
||||
| taos_errno() returns non-0 | 200 (except authentication error) | 500 (except authentication error and 400/502 error) |
|
||||
| Parameter error | 400 (only handle HTTP request URL parameter error) | 400 (handle HTTP request URL parameter error and taosd return error) |
|
||||
|
|
|
@ -701,15 +701,6 @@ The charset that takes effect is UTF-8.
|
|||
| Type | String |
|
||||
| Default Value | _tag_null |
|
||||
|
||||
### smlDataFormat
|
||||
|
||||
| Attribute | Description |
|
||||
| ----------- | ----------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
|
||||
| Value Range | 0: not consistent; 1: consistent. |
|
||||
| Default | 0 |
|
||||
|
||||
### smlTsDefaultName
|
||||
|
||||
| Attribute | Description |
|
||||
|
@ -719,6 +710,16 @@ The charset that takes effect is UTF-8.
|
|||
| Type | String |
|
||||
| Default Value | _ts |
|
||||
|
||||
### smlDot2Underline
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Convert the dot in the supertable name to an underscore |
|
||||
| Type | Bool |
|
||||
| Default Value | true |
|
||||
|
||||
|
||||
## Compress Parameters
|
||||
|
||||
### compressMsgSize
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: Load Balance
|
|||
description: This document describes how TDengine implements load balancing.
|
||||
---
|
||||
|
||||
The load balance in TDengine is mainly about processing data series data. TDengine employes builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table.
|
||||
The load balance in TDengine is mainly about processing data series data. TDengine employs builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table.
|
||||
|
||||
The number of vgroup can be specified when creating a database, using the parameter `vgroups`.
|
||||
|
||||
|
@ -12,10 +12,10 @@ The number of vgroup can be specified when creating a database, using the parame
|
|||
create database db0 vgroups 100;
|
||||
```
|
||||
|
||||
The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may downgrad the system performance significantly. If multiple databases are to be created in the system, then the total number of `vroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases.
|
||||
The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may degrade the system performance significantly. If multiple databases are to be created in the system, then the total number of `vgroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases.
|
||||
|
||||
Furthermode, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distribution is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck.
|
||||
Furthermore, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distribution is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck.
|
||||
|
||||
TDegnine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput.
|
||||
TDengine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput.
|
||||
|
||||
Once the load balance is achieved, after some operations like deleting tables or dropping databases, the load across all dnodes may become imbalanced, the method of rebalance will be provided in later versions. However, even without explicit rebalancing, TDengine will try its best to achieve new balance without manual interfering when a new database is created.
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
|
||||
## TDengine Spring JDBC Template Demo
|
||||
|
||||
`Spring JDBC Template` 简化了原生 JDBC Connection 获取释放等操作,使得操作数据库更加方便。
|
||||
`Spring JDBC Template` simplifies the operations of acquiring and releasing native JDBC Connections, making database operations more convenient.
|
||||
|
||||
### 配置
|
||||
### Configuration
|
||||
|
||||
修改 `src/main/resources/applicationContext.xml` 文件中 TDengine 的配置信息:
|
||||
Modify the TDengine configuration in the `src/main/resources/applicationContext.xml` file:
|
||||
|
||||
```xml
|
||||
<bean id="dataSource" class="org.springframework.jdbc.datasource.DriverManagerDataSource">
|
||||
|
@ -20,13 +20,15 @@
|
|||
</bean>
|
||||
```
|
||||
|
||||
### 打包运行
|
||||
### Package and run
|
||||
|
||||
Navigate to the `TDengine/tests/examples/JDBC/SpringJdbcTemplate` directory and execute the following commands to generate an executable jar file.
|
||||
|
||||
进入 `TDengine/tests/examples/JDBC/SpringJdbcTemplate` 目录下,执行以下命令可以生成可执行 jar 包。
|
||||
```shell
|
||||
mvn clean package
|
||||
```
|
||||
打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试:
|
||||
After successfully packaging, navigate to the `target/` directory and execute the following commands to run the tests:
|
||||
|
||||
```shell
|
||||
java -jar target/SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar
|
||||
```
|
||||
```
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<!-- druid -->
|
||||
<dependency>
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
### 设置###
|
||||
### Settings###
|
||||
log4j.rootLogger=debug,stdout,DebugLog,ErrorLog
|
||||
### 输出信息到控制抬 ###
|
||||
### Output information to the console ###
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.Target=System.out
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
|
||||
### 输出DEBUG 级别以上的日志到=logs/debug.log
|
||||
### Output logs of DEBUG level and above to logs/debug.log
|
||||
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DebugLog.File=logs/debug.log
|
||||
log4j.appender.DebugLog.Append=true
|
||||
log4j.appender.DebugLog.Threshold=DEBUG
|
||||
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||
### 输出ERROR 级别以上的日志到=logs/error.log
|
||||
### Output logs of ERROR level and above to logs/error.log
|
||||
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.ErrorLog.File=logs/error.log
|
||||
log4j.appender.ErrorLog.Append=true
|
||||
log4j.appender.ErrorLog.Threshold=ERROR
|
||||
log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||
log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||
|
|
|
@ -1,27 +1,28 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<something-else-entirely>
|
||||
<proxool>
|
||||
<!-- Alias for the data source -->
|
||||
<alias>ds</alias>
|
||||
<!--数据源的别名-->
|
||||
<!-- URL connection string -->
|
||||
<driver-url>jdbc:TAOS-RS://127.0.0.1:6041/log</driver-url>
|
||||
<!--url连接串-->
|
||||
<!-- Driver class -->
|
||||
<driver-class>com.taosdata.jdbc.rs.RestfulDriver</driver-class>
|
||||
<!--驱动类-->
|
||||
|
||||
<driver-properties>
|
||||
<property name="user" value="root"/>
|
||||
<property name="password" value="taosdata"/>
|
||||
</driver-properties>
|
||||
<!--最大连接数(默认5个),超过了这个连接数,再有请求时,就排在队列中等候,最大的等待请求数由maximum-new-connections决定 -->
|
||||
<!-- Maximum connection count (default is 5). If this number is exceeded, new requests will be queued. The maximum number of queued requests is determined by maximum-new-connections -->
|
||||
<maximum-connection-count>100</maximum-connection-count>
|
||||
<!-- 定义连接池中的最大连接数 -->
|
||||
<!-- Defines the maximum number of connections in the connection pool -->
|
||||
<maximum-active-time>100</maximum-active-time>
|
||||
<!--最少保持的空闲连接数(默认2个)-->
|
||||
<!-- Minimum number of idle connections to maintain (default is 2) -->
|
||||
<prototype-count>1</prototype-count>
|
||||
<!--最小连接数(默认2个)-->
|
||||
<!-- Minimum connection count (default is 2) -->
|
||||
<minimum-connection-count>5</minimum-connection-count>
|
||||
<!--proxool自动侦察各个连接状态的时间间隔(毫秒),侦察到空闲的连接就马上回收,超时的销毁 默认30秒-->
|
||||
<!-- Interval (in milliseconds) for Proxool to automatically check the status of each connection. Idle connections are immediately reclaimed, and timed-out connections are destroyed. Default is 30 seconds -->
|
||||
<house-keeping-sleep-time>30000</house-keeping-sleep-time>
|
||||
<!--用于保持连接的测试语句 -->
|
||||
<!-- Test statement used to maintain the connection -->
|
||||
<house-keeping-test-sql>select server_version()</house-keeping-test-sql>
|
||||
</proxool>
|
||||
</something-else-entirely>
|
||||
</something-else-entirely>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
|
|
|
@ -35,17 +35,18 @@ public class Worker implements Runnable {
|
|||
public void run() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
// 控制请求频率
|
||||
// Control request rate
|
||||
if (semaphore.tryAcquire()) {
|
||||
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
|
||||
pool.submit(() -> {
|
||||
RateLimiter limiter = RateLimiter.create(rate);
|
||||
try {
|
||||
for (ConsumerRecord<Bean> record : records) {
|
||||
// 流量控制
|
||||
// Traffic control
|
||||
limiter.acquire();
|
||||
// 业务处理数据
|
||||
System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
|
||||
// Business data processing
|
||||
System.out.println("[" + LocalDateTime.now() + "] Thread id:"
|
||||
+ Thread.currentThread().getId() + " -> " + record.value());
|
||||
}
|
||||
} finally {
|
||||
semaphore.release();
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# 使用说明
|
||||
# Instructions
|
||||
|
||||
## 创建使用db
|
||||
## Create and use the database
|
||||
```shell
|
||||
$ taos
|
||||
|
||||
> create database mp_test
|
||||
```
|
||||
|
||||
## 执行测试用例
|
||||
## Execute test cases
|
||||
|
||||
```shell
|
||||
$ mvn clean test
|
||||
```
|
||||
```
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
## TDengine SpringBoot + Mybatis Demo
|
||||
|
||||
## 需要提前创建 test 数据库
|
||||
## Need to create a test database in advance
|
||||
|
||||
```
|
||||
$ taos -s 'create database if not exists test'
|
||||
|
@ -8,7 +8,7 @@ $ taos -s 'create database if not exists test'
|
|||
$ curl http://localhost:8080/weather/init
|
||||
```
|
||||
|
||||
### 配置 application.properties
|
||||
### Configure application.properties
|
||||
```properties
|
||||
# datasource config
|
||||
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
|
||||
|
@ -38,9 +38,9 @@ mybatis.mapper-locations=classpath:mapper/*.xml
|
|||
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
||||
```
|
||||
|
||||
### 主要功能
|
||||
### Main functions
|
||||
|
||||
* 创建数据库和表
|
||||
* Create databases and tables
|
||||
```xml
|
||||
<!-- weatherMapper.xml -->
|
||||
<update id="createDB" >
|
||||
|
@ -52,14 +52,14 @@ logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
|||
</update>
|
||||
```
|
||||
|
||||
* 插入单条记录
|
||||
* Insert a single record
|
||||
```xml
|
||||
<!-- weatherMapper.xml -->
|
||||
<insert id="insert" parameterType="Weather" >
|
||||
insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT})
|
||||
</insert>
|
||||
```
|
||||
* 插入多条记录
|
||||
* Insert multiple records
|
||||
```xml
|
||||
<!-- weatherMapper.xml -->
|
||||
<insert id="batchInsert" parameterType="java.util.List" >
|
||||
|
@ -69,7 +69,7 @@ logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
|||
</foreach>
|
||||
</insert>
|
||||
```
|
||||
* 分页查询
|
||||
* Pagination query
|
||||
```xml
|
||||
<!-- weatherMapper.xml -->
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
```
|
||||
cd tests/examples/JDBC/taosdemo
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
# 先建表,再插入的
|
||||
# Create tables first, then insert data
|
||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
# 不建表,直接插入的
|
||||
# Insert data directly without creating tables
|
||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
```
|
||||
|
||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
||||
|
||||
|
||||
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.
|
||||
|
|
|
@ -24,14 +24,14 @@ public class TaosDemoApplication {
|
|||
private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
// 读配置参数
|
||||
// Read configuration parameters
|
||||
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
|
||||
boolean isHelp = Arrays.asList(args).contains("--help");
|
||||
if (isHelp || config.host == null || config.host.isEmpty()) {
|
||||
JdbcTaosdemoConfig.printHelp();
|
||||
System.exit(0);
|
||||
}
|
||||
// 初始化
|
||||
//
|
||||
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user,
|
||||
config.password);
|
||||
if (config.executeSql != null && !config.executeSql.isEmpty()
|
||||
|
@ -50,7 +50,7 @@ public class TaosDemoApplication {
|
|||
final SuperTableService superTableService = new SuperTableService(dataSource);
|
||||
final SubTableService subTableService = new SubTableService(dataSource);
|
||||
|
||||
// 创建数据库
|
||||
// create database
|
||||
long start = System.currentTimeMillis();
|
||||
Map<String, String> databaseParam = new HashMap<>();
|
||||
databaseParam.put("database", config.database);
|
||||
|
@ -81,13 +81,13 @@ public class TaosDemoApplication {
|
|||
config.prefixOfFields, config.numOfTags, config.prefixOfTags);
|
||||
}
|
||||
/**********************************************************************************/
|
||||
// 建表
|
||||
// create table
|
||||
start = System.currentTimeMillis();
|
||||
if (config.doCreateTable) {
|
||||
superTableService.drop(superTableMeta.getDatabase(), superTableMeta.getName());
|
||||
superTableService.create(superTableMeta);
|
||||
if (!config.autoCreateTable) {
|
||||
// 批量建子表
|
||||
// create sub tables in batch
|
||||
subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable,
|
||||
config.numOfThreadsForCreate);
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ public class TaosDemoApplication {
|
|||
end = System.currentTimeMillis();
|
||||
logger.info(">>> create table time cost : " + (end - start) + " ms.");
|
||||
/**********************************************************************************/
|
||||
// 插入
|
||||
// insert data
|
||||
long tableSize = config.numOfTables;
|
||||
int threadSize = config.numOfThreadsForInsert;
|
||||
long startTime = getProperStartTime(config.startTime, config.days);
|
||||
|
@ -111,10 +111,9 @@ public class TaosDemoApplication {
|
|||
end = System.currentTimeMillis();
|
||||
logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms");
|
||||
/**********************************************************************************/
|
||||
// 查询
|
||||
|
||||
/**********************************************************************************/
|
||||
// 删除表
|
||||
// drop table
|
||||
if (config.dropTable) {
|
||||
superTableService.drop(config.database, config.superTable);
|
||||
}
|
||||
|
|
|
@ -9,21 +9,22 @@ import java.util.List;
|
|||
@Repository
|
||||
public interface SubTableMapper {
|
||||
|
||||
// 创建:子表
|
||||
// Create: SubTable
|
||||
void createUsingSuperTable(SubTableMeta subTableMeta);
|
||||
|
||||
// 插入:一张子表多个values
|
||||
// Insert: Multiple records into one SubTable
|
||||
int insertOneTableMultiValues(SubTableValue subTableValue);
|
||||
|
||||
// 插入:一张子表多个values, 自动建表
|
||||
// Insert: Multiple records into one SubTable, auto create SubTables
|
||||
int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue);
|
||||
|
||||
// 插入:多张表多个values
|
||||
// Insert: Multiple records into multiple SubTable
|
||||
int insertMultiTableMultiValues(List<SubTableValue> tables);
|
||||
|
||||
// 插入:多张表多个values,自动建表
|
||||
// Insert: Multiple records into multiple SubTable, auto create SubTables
|
||||
int insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables);
|
||||
|
||||
//<!-- TODO:修改子表标签值 alter table ${tablename} set tag tagName=newTagValue-->
|
||||
// <!-- TODO: Modify SubTable tag value: alter table ${tablename} set tag
|
||||
// tagName=newTagValue-->
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,24 +6,26 @@ import org.springframework.stereotype.Repository;
|
|||
@Repository
|
||||
public interface SuperTableMapper {
|
||||
|
||||
// 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...)
|
||||
// Create super table: create table if not exists xxx.xxx (f1 type1, f2 type2,
|
||||
// ... ) tags( t1 type1, t2 type2 ...)
|
||||
void createSuperTable(SuperTableMeta tableMetadata);
|
||||
|
||||
// 删除超级表 drop table if exists xxx;
|
||||
// Drop super table: drop table if exists xxx;
|
||||
void dropSuperTable(String database, String name);
|
||||
|
||||
//<!-- TODO:查询所有超级表信息 show stables -->
|
||||
// <!-- TODO: Query all super table information show stables -->
|
||||
|
||||
//<!-- TODO:查询表结构 describe stable -->
|
||||
// <!-- TODO: Query table structure describe stable -->
|
||||
|
||||
//<!-- TODO:增加列 alter table ${tablename} add column fieldName dataType -->
|
||||
// <!-- TODO: Add column alter table ${tablename} add column fieldName dataType
|
||||
// -->
|
||||
|
||||
//<!-- TODO:删除列 alter table ${tablename} drop column fieldName -->
|
||||
// <!-- TODO: Drop column alter table ${tablename} drop column fieldName -->
|
||||
|
||||
//<!-- TODO:添加标签 alter table ${tablename} add tag new_tagName tag_type -->
|
||||
// <!-- TODO: Add tag alter table ${tablename} add tag new_tagName tag_type -->
|
||||
|
||||
//<!-- TODO:删除标签 alter table ${tablename} drop tag_name -->
|
||||
|
||||
//<!-- TODO:修改标签名 alter table ${tablename} change tag old_tagName new_tagName -->
|
||||
// <!-- TODO: Drop tag alter table ${tablename} drop tag_name -->
|
||||
|
||||
// <!-- TODO: Change tag name alter table ${tablename} change tag old_tagName
|
||||
// new_tagName -->
|
||||
}
|
||||
|
|
|
@ -9,19 +9,18 @@ import java.util.List;
|
|||
@Repository
|
||||
public interface TableMapper {
|
||||
|
||||
// 创建:普通表
|
||||
// Create: Normal table
|
||||
void create(TableMeta tableMeta);
|
||||
|
||||
// 插入:一张表多个value
|
||||
// Insert: Multiple records into one table
|
||||
int insertOneTableMultiValues(TableValue values);
|
||||
|
||||
// 插入: 一张表多个value,指定的列
|
||||
// Insert: Multiple records into one table, specified columns
|
||||
int insertOneTableMultiValuesWithColumns(TableValue values);
|
||||
|
||||
// 插入:多个表多个value
|
||||
// Insert: Multiple records into multiple tables
|
||||
int insertMultiTableMultiValues(List<TableValue> tables);
|
||||
|
||||
// 插入:多个表多个value, 指定的列
|
||||
// Insert: Multiple records into multiple tables, specified columns
|
||||
int insertMultiTableMultiValuesWithColumns(List<TableValue> tables);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,12 +14,12 @@ public class DatabaseService {
|
|||
this.databaseMapper = new DatabaseMapperImpl(dataSource);
|
||||
}
|
||||
|
||||
// 建库,指定 name
|
||||
// Create database with specified name
|
||||
public void createDatabase(String database) {
|
||||
databaseMapper.createDatabase(database);
|
||||
}
|
||||
|
||||
// 建库,指定参数 keep,days,replica等
|
||||
// Create database with specified parameters such as keep, days, replica, etc.
|
||||
public void createDatabase(Map<String, String> map) {
|
||||
if (map.isEmpty())
|
||||
return;
|
||||
|
|
|
@ -27,7 +27,8 @@ public class SubTableService extends AbstractService {
|
|||
this.mapper = new SubTableMapperImpl(datasource);
|
||||
}
|
||||
|
||||
public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable, int numOfThreadsForCreate) {
|
||||
public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable,
|
||||
int numOfThreadsForCreate) {
|
||||
ExecutorService executor = Executors.newFixedThreadPool(numOfThreadsForCreate);
|
||||
for (long i = 0; i < numOfTables; i++) {
|
||||
long tableIndex = i;
|
||||
|
@ -35,54 +36,58 @@ public class SubTableService extends AbstractService {
|
|||
}
|
||||
executor.shutdown();
|
||||
try {
|
||||
executor.awaitTermination(Long.MAX_VALUE,TimeUnit.NANOSECONDS);
|
||||
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void createSubTable(SuperTableMeta superTableMeta, String tableName) {
|
||||
// 构造数据
|
||||
// Construct data
|
||||
SubTableMeta meta = SubTableMetaGenerator.generate(superTableMeta, tableName);
|
||||
createSubTable(meta);
|
||||
}
|
||||
|
||||
// 创建一张子表,可以指定database,supertable,tablename,tag值
|
||||
// Create a sub-table, specifying database, super table, table name, and tag
|
||||
// values
|
||||
public void createSubTable(SubTableMeta subTableMeta) {
|
||||
mapper.createUsingSuperTable(subTableMeta);
|
||||
}
|
||||
|
||||
/*************************************************************************************************************************/
|
||||
// 插入:多线程,多表
|
||||
// Insert: Multi-threaded, multiple tables
|
||||
public int insert(List<SubTableValue> subTableValues, int threadSize, int frequency) {
|
||||
ExecutorService executor = Executors.newFixedThreadPool(threadSize);
|
||||
Future<Integer> future = executor.submit(() -> insert(subTableValues));
|
||||
executor.shutdown();
|
||||
//TODO:frequency
|
||||
// TODO:frequency
|
||||
return getAffectRows(future);
|
||||
}
|
||||
|
||||
// 插入:单表,insert into xxx values(),()...
|
||||
// Insert: Single table, insert into xxx values(),()...
|
||||
public int insert(SubTableValue subTableValue) {
|
||||
return mapper.insertOneTableMultiValues(subTableValue);
|
||||
}
|
||||
|
||||
// 插入: 多表,insert into xxx values(),()... xxx values(),()...
|
||||
// Insert: Multiple tables, insert into xxx values(),()... xxx values(),()...
|
||||
public int insert(List<SubTableValue> subTableValues) {
|
||||
return mapper.insertMultiTableMultiValues(subTableValues);
|
||||
}
|
||||
|
||||
// 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()...
|
||||
// Insert: Single table, auto-create table, insert into xxx using xxx tags(...)
|
||||
// values(),()...
|
||||
public int insertAutoCreateTable(SubTableValue subTableValue) {
|
||||
return mapper.insertOneTableMultiValuesUsingSuperTable(subTableValue);
|
||||
}
|
||||
|
||||
// 插入:多表,自动建表, insert into xxx using XXX tags(...) values(),()... xxx using XXX tags(...) values(),()...
|
||||
// Insert: Multiple tables, auto-create tables, insert into xxx using XXX
|
||||
// tags(...) values(),()... xxx using XXX tags(...) values(),()...
|
||||
public int insertAutoCreateTable(List<SubTableValue> subTableValues) {
|
||||
return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues);
|
||||
}
|
||||
|
||||
public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime, long gap, JdbcTaosdemoConfig config) {
|
||||
public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime,
|
||||
long gap, JdbcTaosdemoConfig config) {
|
||||
List<FutureTask> taskList = new ArrayList<>();
|
||||
List<Thread> threads = IntStream.range(0, threadSize)
|
||||
.mapToObj(i -> {
|
||||
|
@ -94,8 +99,7 @@ public class SubTableService extends AbstractService {
|
|||
startTime, config.timeGap,
|
||||
config.numOfRowsPerTable, config.numOfTablesPerSQL, config.numOfValuesPerSQL,
|
||||
config.order, config.rate, config.range,
|
||||
config.prefixOfTable, config.autoCreateTable)
|
||||
);
|
||||
config.prefixOfTable, config.autoCreateTable));
|
||||
taskList.add(task);
|
||||
return new Thread(task, "InsertThread-" + i);
|
||||
}).collect(Collectors.toList());
|
||||
|
@ -126,7 +130,7 @@ public class SubTableService extends AbstractService {
|
|||
private class InsertTask implements Callable<Integer> {
|
||||
|
||||
private final long startTableInd; // included
|
||||
private final long endTableInd; // excluded
|
||||
private final long endTableInd; // excluded
|
||||
private final long startTime;
|
||||
private final long timeGap;
|
||||
private final long numOfRowsPerTable;
|
||||
|
@ -140,10 +144,10 @@ public class SubTableService extends AbstractService {
|
|||
private final boolean autoCreateTable;
|
||||
|
||||
public InsertTask(SuperTableMeta superTableMeta, long startTableInd, long endTableInd,
|
||||
long startTime, long timeGap,
|
||||
long numOfRowsPerTable, long numOfTablesPerSQL, long numOfValuesPerSQL,
|
||||
int order, int rate, long range,
|
||||
String prefixOfTable, boolean autoCreateTable) {
|
||||
long startTime, long timeGap,
|
||||
long numOfRowsPerTable, long numOfTablesPerSQL, long numOfValuesPerSQL,
|
||||
int order, int rate, long range,
|
||||
String prefixOfTable, boolean autoCreateTable) {
|
||||
this.superTableMeta = superTableMeta;
|
||||
this.startTableInd = startTableInd;
|
||||
this.endTableInd = endTableInd;
|
||||
|
@ -159,7 +163,6 @@ public class SubTableService extends AbstractService {
|
|||
this.autoCreateTable = autoCreateTable;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Integer call() {
|
||||
|
||||
|
@ -171,23 +174,27 @@ public class SubTableService extends AbstractService {
|
|||
|
||||
int affectRows = 0;
|
||||
// row
|
||||
for (long rowCnt = 0; rowCnt < numOfRowsPerTable; ) {
|
||||
for (long rowCnt = 0; rowCnt < numOfRowsPerTable;) {
|
||||
long rowSize = numOfValuesPerSQL;
|
||||
if (rowCnt + rowSize > numOfRowsPerTable) {
|
||||
rowSize = numOfRowsPerTable - rowCnt;
|
||||
}
|
||||
//table
|
||||
for (long tableCnt = startTableInd; tableCnt < endTableInd; ) {
|
||||
// table
|
||||
for (long tableCnt = startTableInd; tableCnt < endTableInd;) {
|
||||
long tableSize = numOfTablesPerSQL;
|
||||
if (tableCnt + tableSize > endTableInd) {
|
||||
tableSize = endTableInd - tableCnt;
|
||||
}
|
||||
long startTime = this.startTime + rowCnt * timeGap;
|
||||
// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt + ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: " + timeGap + "");
|
||||
// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " +
|
||||
// rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt +
|
||||
// ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: "
|
||||
// + timeGap + "");
|
||||
/***********************************************/
|
||||
// 生成数据
|
||||
List<SubTableValue> data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt, tableSize, rowSize, startTime, timeGap);
|
||||
// 乱序
|
||||
// Construct data
|
||||
List<SubTableValue> data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt,
|
||||
tableSize, rowSize, startTime, timeGap);
|
||||
// disorder
|
||||
if (order != 0)
|
||||
SubTableValueGenerator.disrupt(data, rate, range);
|
||||
// insert
|
||||
|
@ -205,5 +212,4 @@ public class SubTableService extends AbstractService {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ public class SuperTableService {
|
|||
this.superTableMapper = new SuperTableMapperImpl(dataSource);
|
||||
}
|
||||
|
||||
// 创建超级表,指定每个field的名称和类型,每个tag的名称和类型
|
||||
// Create super table, specifying the name and type of each field and each tag
|
||||
public void create(SuperTableMeta superTableMeta) {
|
||||
superTableMapper.createSuperTable(superTableMeta);
|
||||
}
|
||||
|
|
|
@ -11,15 +11,14 @@ public class TableService extends AbstractService {
|
|||
|
||||
private TableMapper tableMapper;
|
||||
|
||||
//创建一张表
|
||||
// Create a table
|
||||
public void create(TableMeta tableMeta) {
|
||||
tableMapper.create(tableMeta);
|
||||
}
|
||||
|
||||
//创建多张表
|
||||
// Create multiple tables
|
||||
public void create(List<TableMeta> tables) {
|
||||
tables.stream().forEach(this::create);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -11,7 +11,8 @@ public class FieldValueGenerator {
|
|||
|
||||
public static Random random = new Random(System.currentTimeMillis());
|
||||
|
||||
// 生成start到end的时间序列,时间戳为顺序,不含有乱序,field的value为随机生成
|
||||
// Generate a time series from start to end, timestamps are in order without
|
||||
// disorder, field values are randomly generated
|
||||
public static List<RowValue> generate(long start, long end, long timeGap, List<FieldMeta> fieldMetaList) {
|
||||
List<RowValue> values = new ArrayList<>();
|
||||
|
||||
|
@ -29,9 +30,12 @@ public class FieldValueGenerator {
|
|||
return values;
|
||||
}
|
||||
|
||||
// 生成start到end的时间序列,时间戳为顺序,含有乱序,rate为乱序的比例,range为乱序前跳范围,field的value为随机生成
|
||||
// Generate a time series from start to end, timestamps are in order but include
|
||||
// disorder, rate is the proportion of disorder, range is the jump range before
|
||||
// disorder, field values are randomly generated
|
||||
public static List<RowValue> disrupt(List<RowValue> values, int rate, long range) {
|
||||
long timeGap = (long) (values.get(1).getFields().get(0).getValue()) - (long) (values.get(0).getFields().get(0).getValue());
|
||||
long timeGap = (long) (values.get(1).getFields().get(0).getValue())
|
||||
- (long) (values.get(0).getFields().get(0).getValue());
|
||||
int bugSize = values.size() * rate / 100;
|
||||
Set<Integer> bugIndSet = new HashSet<>();
|
||||
while (bugIndSet.size() < bugSize) {
|
||||
|
|
|
@ -9,7 +9,7 @@ import java.util.List;
|
|||
|
||||
public class SubTableMetaGenerator {
|
||||
|
||||
// 创建tableSize张子表,使用tablePrefix作为子表名的前缀,使用superTableMeta的元数据
|
||||
// Create tableSize sub-tables, using tablePrefix as the prefix for sub-table names, and using the metadata from superTableMeta
|
||||
// create table xxx using XXX tags(XXX)
|
||||
public static List<SubTableMeta> generate(SuperTableMeta superTableMeta, int tableSize, String tablePrefix) {
|
||||
List<SubTableMeta> subTableMetaList = new ArrayList<>();
|
||||
|
|
|
@ -10,10 +10,11 @@ import java.util.List;
|
|||
|
||||
public class SuperTableMetaGenerator {
|
||||
|
||||
// 创建超级表,使用指定SQL语句
|
||||
// Create super table using the specified SQL statement
|
||||
public static SuperTableMeta generate(String superTableSQL) {
|
||||
SuperTableMeta tableMeta = new SuperTableMeta();
|
||||
// for example : create table superTable (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)
|
||||
// for example : create table superTable (ts timestamp, temperature float,
|
||||
// humidity int) tags(location nchar(64), groupId int)
|
||||
superTableSQL = superTableSQL.trim().toLowerCase();
|
||||
if (!superTableSQL.startsWith("create"))
|
||||
throw new RuntimeException("invalid create super table SQL");
|
||||
|
@ -54,8 +55,9 @@ public class SuperTableMetaGenerator {
|
|||
return tableMeta;
|
||||
}
|
||||
|
||||
// 创建超级表,指定field和tag的个数
|
||||
public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize, String tagPrefix) {
|
||||
// Create super table with specified number of fields and tags
|
||||
public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize,
|
||||
String tagPrefix) {
|
||||
if (fieldSize < 2 || tagSize < 1) {
|
||||
throw new RuntimeException("create super table but fieldSize less than 2 or tagSize less than 1");
|
||||
}
|
||||
|
@ -66,7 +68,8 @@ public class SuperTableMetaGenerator {
|
|||
List<FieldMeta> fields = new ArrayList<>();
|
||||
fields.add(new FieldMeta("ts", "timestamp"));
|
||||
for (int i = 1; i <= fieldSize; i++) {
|
||||
fields.add(new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length]));
|
||||
fields.add(
|
||||
new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length]));
|
||||
}
|
||||
tableMetadata.setFields(fields);
|
||||
// tags
|
||||
|
|
|
@ -9,7 +9,7 @@ import java.util.List;
|
|||
|
||||
public class TagValueGenerator {
|
||||
|
||||
// 创建标签值:使用tagMetas
|
||||
// Create tag values using tagMetas
|
||||
public static List<TagValue> generate(List<TagMeta> tagMetas) {
|
||||
List<TagValue> tagValues = new ArrayList<>();
|
||||
for (int i = 0; i < tagMetas.size(); i++) {
|
||||
|
|
|
@ -41,17 +41,17 @@ public class TimeStampUtil {
|
|||
if (start == 0)
|
||||
start = now - size * timeGap;
|
||||
|
||||
// 如果size小于1异常
|
||||
// If size is less than 1, throw an exception
|
||||
if (size < 1)
|
||||
throw new IllegalArgumentException("size less than 1.");
|
||||
// 如果timeGap为1,已经超长,需要前移start
|
||||
// If timeGap is 1 and it exceeds the limit, move start forward
|
||||
if (start + size > now) {
|
||||
start = now - size;
|
||||
return new TimeTuple(start, now, 1);
|
||||
}
|
||||
long end = start + (long) (timeGap * size);
|
||||
if (end > now) {
|
||||
//压缩timeGap
|
||||
// Compress timeGap
|
||||
end = now;
|
||||
double gap = (end - start) / (size * 1.0f);
|
||||
if (gap < 1.0f) {
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
### 设置###
|
||||
### Settings ###
|
||||
log4j.rootLogger=info,stdout
|
||||
### 输出信息到控制抬 ###
|
||||
### Output information to the console ###
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.Target=System.out
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
|
||||
### 输出DEBUG 级别以上的日志到=logs/debug.log
|
||||
### Output logs of DEBUG level and above to logs/debug.log ###
|
||||
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DebugLog.File=logs/debug.log
|
||||
log4j.appender.DebugLog.Append=true
|
||||
log4j.appender.DebugLog.Threshold=DEBUG
|
||||
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||
### 输出ERROR 级别以上的日志到=logs/error.log
|
||||
### Output logs of ERROR level and above to logs/error.log ###
|
||||
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.ErrorLog.File=logs/error.log
|
||||
log4j.appender.ErrorLog.Append=true
|
||||
log4j.appender.ErrorLog.Threshold=ERROR
|
||||
log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||
log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.tmq.*;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.*;
|
||||
import java.time.Duration;
|
||||
|
@ -60,7 +61,7 @@ public class ConsumerLoopFull {
|
|||
// ANCHOR_END: create_consumer
|
||||
}
|
||||
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: poll_data_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -73,7 +74,7 @@ public class ConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
|
@ -91,7 +92,7 @@ public class ConsumerLoopFull {
|
|||
// ANCHOR_END: poll_data_code_piece
|
||||
}
|
||||
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: consumer_seek
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -99,7 +100,7 @@ public class ConsumerLoopFull {
|
|||
consumer.subscribe(topics);
|
||||
System.out.println("Subscribe topics successfully.");
|
||||
Set<TopicPartition> assignment = consumer.assignment();
|
||||
System.out.println("Now assignment: " + JSON.toJSONString(assignment));
|
||||
System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment));
|
||||
|
||||
ConsumerRecords<ResultBean> records = ConsumerRecords.emptyRecord();
|
||||
// make sure we have got some data
|
||||
|
@ -125,7 +126,7 @@ public class ConsumerLoopFull {
|
|||
}
|
||||
|
||||
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: commit_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -135,7 +136,7 @@ public class ConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
if (!records.isEmpty()) {
|
||||
// after processing the data, commit the offset manually
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
|
@ -31,7 +31,11 @@ public class ConsumerLoopImp {
|
|||
final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() {
|
||||
@Override
|
||||
public void process(ResultBean result) {
|
||||
System.out.println("data: " + JSON.toJSONString(result));
|
||||
try{
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ public class SchemalessWsTest {
|
|||
private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041?user=root&password=taosdata&batchfetch=true";
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
final String url = "jdbc:TAOS-WS://" + host + ":6041?user=root&password=taosdata";
|
||||
try (Connection connection = DriverManager.getConnection(url)) {
|
||||
init(connection);
|
||||
AbstractConnection conn = connection.unwrap(AbstractConnection.class);
|
||||
|
||||
|
|
|
@ -12,9 +12,9 @@ public class WSConnectExample {
|
|||
public static void main(String[] args) throws Exception {
|
||||
// use
|
||||
// String jdbcUrl =
|
||||
// "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true";
|
||||
// "jdbc:TAOS-WS://localhost:6041/dbName?user=root&password=taosdata";
|
||||
// if you want to connect a specified database named "dbName".
|
||||
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata&batchfetch=true";
|
||||
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
|
|
|
@ -15,7 +15,7 @@ public class WSParameterBindingBasicDemo {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
init(conn);
|
||||
|
||||
|
@ -40,7 +40,7 @@ public class WSParameterBindingBasicDemo {
|
|||
pstmt.setFloat(4, random.nextFloat());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
int [] exeResult = pstmt.executeBatch();
|
||||
int[] exeResult = pstmt.executeBatch();
|
||||
// you can check exeResult here
|
||||
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
|
||||
}
|
||||
|
@ -60,7 +60,8 @@ public class WSParameterBindingBasicDemo {
|
|||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
|
||||
stmt.execute("USE power");
|
||||
stmt.execute("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||
stmt.execute(
|
||||
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class WSParameterBindingFullDemo {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041/";
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
|
||||
|
@ -51,8 +51,10 @@ public class WSParameterBindingFullDemo {
|
|||
stmtAll(conn);
|
||||
|
||||
} catch (SQLException ex) {
|
||||
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
||||
// handle any errors, please refer to the JDBC specifications for detailed
|
||||
// exceptions info
|
||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: "
|
||||
+ ex.getMessage());
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage());
|
||||
|
@ -104,30 +106,29 @@ public class WSParameterBindingFullDemo {
|
|||
pstmt.setTagBoolean(3, true);
|
||||
pstmt.setTagString(4, "binary_value");
|
||||
pstmt.setTagNString(5, "nchar_value");
|
||||
pstmt.setTagVarbinary(6, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setTagGeometry(7, new byte[]{
|
||||
pstmt.setTagVarbinary(6, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||
pstmt.setTagGeometry(7, new byte[] {
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||
|
||||
long current = System.currentTimeMillis();
|
||||
|
||||
|
||||
pstmt.setTimestamp(1, new Timestamp(current));
|
||||
pstmt.setInt(2, 1);
|
||||
pstmt.setDouble(3, 1.1);
|
||||
pstmt.setBoolean(4, true);
|
||||
pstmt.setString(5, "binary_value");
|
||||
pstmt.setNString(6, "nchar_value");
|
||||
pstmt.setVarbinary(7, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setGeometry(8, new byte[]{
|
||||
pstmt.setVarbinary(7, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||
pstmt.setGeometry(8, new byte[] {
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||
pstmt.addBatch();
|
||||
pstmt.executeBatch();
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.tmq.*;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.*;
|
||||
import java.time.Duration;
|
||||
|
@ -60,7 +61,7 @@ public class WsConsumerLoopFull {
|
|||
// ANCHOR_END: create_consumer
|
||||
}
|
||||
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: poll_data_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -73,7 +74,7 @@ public class WsConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
|
@ -91,7 +92,7 @@ public class WsConsumerLoopFull {
|
|||
// ANCHOR_END: poll_data_code_piece
|
||||
}
|
||||
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: consumer_seek
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -99,7 +100,7 @@ public class WsConsumerLoopFull {
|
|||
consumer.subscribe(topics);
|
||||
System.out.println("Subscribe topics successfully.");
|
||||
Set<TopicPartition> assignment = consumer.assignment();
|
||||
System.out.println("Now assignment: " + JSON.toJSONString(assignment));
|
||||
System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment));
|
||||
|
||||
ConsumerRecords<ResultBean> records = ConsumerRecords.emptyRecord();
|
||||
// make sure we have got some data
|
||||
|
@ -125,7 +126,7 @@ public class WsConsumerLoopFull {
|
|||
}
|
||||
|
||||
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: commit_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -135,7 +136,7 @@ public class WsConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
if (!records.isEmpty()) {
|
||||
// after processing the data, commit the offset manually
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
|
@ -28,7 +28,11 @@ public abstract class WsConsumerLoopImp {
|
|||
final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() {
|
||||
@Override
|
||||
public void process(ResultBean result) {
|
||||
System.out.println("data: " + JSON.toJSONString(result));
|
||||
try{
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -13,6 +13,9 @@ public class DataBaseMonitor {
|
|||
public DataBaseMonitor init() throws SQLException {
|
||||
if (conn == null) {
|
||||
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
|
||||
if (jdbcURL == null || jdbcURL == ""){
|
||||
jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
}
|
||||
conn = DriverManager.getConnection(jdbcURL);
|
||||
stmt = conn.createStatement();
|
||||
}
|
||||
|
|
|
@ -69,6 +69,9 @@ public class SQLWriter {
|
|||
*/
|
||||
private static Connection getConnection() throws SQLException {
|
||||
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
|
||||
if (jdbcURL == null || jdbcURL == ""){
|
||||
jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
}
|
||||
return DriverManager.getConnection(jdbcURL);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,37 @@ public class TestAll {
|
|||
stmt.execute("drop database if exists " + dbName);
|
||||
}
|
||||
}
|
||||
waitTransaction();
|
||||
}
|
||||
|
||||
public void dropTopic(String topicName) throws SQLException {
|
||||
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop topic if exists " + topicName);
|
||||
}
|
||||
}
|
||||
waitTransaction();
|
||||
}
|
||||
|
||||
public void waitTransaction() throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
stmt.execute("show transactions");
|
||||
try (ResultSet resultSet = stmt.getResultSet()) {
|
||||
if (resultSet.next()) {
|
||||
int count = resultSet.getInt(1);
|
||||
if (count == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void insertData() throws SQLException {
|
||||
|
@ -104,14 +135,20 @@ public class TestAll {
|
|||
SubscribeDemo.main(args);
|
||||
}
|
||||
|
||||
// @Test
|
||||
// public void testSubscribeJni() throws SQLException, InterruptedException {
|
||||
// dropDB("power");
|
||||
// ConsumerLoopFull.main(args);
|
||||
// }
|
||||
// @Test
|
||||
// public void testSubscribeWs() throws SQLException, InterruptedException {
|
||||
// dropDB("power");
|
||||
// WsConsumerLoopFull.main(args);
|
||||
// }
|
||||
@Test
|
||||
public void testSubscribeJni() throws SQLException, InterruptedException {
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
ConsumerLoopFull.main(args);
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
}
|
||||
@Test
|
||||
public void testSubscribeWs() throws SQLException, InterruptedException {
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
WsConsumerLoopFull.main(args);
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
"main": "index.js",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@tdengine/websocket": "^3.1.0"
|
||||
"@tdengine/websocket": "^3.1.1"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ const taos = require("@tdengine/websocket");
|
|||
|
||||
let dsn = 'ws://localhost:6041';
|
||||
async function createConnect() {
|
||||
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
|
|
|
@ -10,7 +10,6 @@ const groupId = "group1";
|
|||
const clientId = "client1";
|
||||
|
||||
async function createConsumer() {
|
||||
|
||||
let groupId = "group1";
|
||||
let clientId = "client1";
|
||||
let configMap = new Map([
|
||||
|
|
|
@ -4,9 +4,9 @@ sidebar_label: 文档首页
|
|||
slug: /
|
||||
---
|
||||
|
||||
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)
|
||||
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)。
|
||||
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[快速入门](./basic)一章。
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。
|
||||
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。
|
||||
|
||||
|
@ -16,6 +16,8 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
|
|||
|
||||
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
|
||||
|
||||
如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinterna)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -17,7 +17,7 @@ docker pull tdengine/tdengine:latest
|
|||
或者指定版本的容器镜像:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:3.0.1.4
|
||||
docker pull tdengine/tdengine:3.3.3.0
|
||||
```
|
||||
|
||||
然后只需执行下面的命令:
|
||||
|
@ -121,4 +121,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
||||
|
|
|
@ -14,7 +14,9 @@ TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)
|
|||
|
||||
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。
|
||||
|
||||
在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。
|
||||
在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统,用户可以根据自己的运行环境自行选择。同时我们也提供了 tar.gz 格式安装包,以及 `apt-get` 工具从线上进行安装。
|
||||
|
||||
此外,TDengine 也提供 macOS x64/m1 平台的 pkg 安装包。
|
||||
|
||||
## 运行环境要求
|
||||
在linux系统中,运行环境最低要求如下:
|
||||
|
@ -317,4 +319,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
|
@ -54,4 +54,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
|
@ -111,7 +111,7 @@ TDengine 还支持直接向超级表写入数据。需要注意的是,超级
|
|||
|
||||
```sql
|
||||
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
|
||||
values( "d1001v, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||
values( "d1001, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||
```
|
||||
|
||||
### 零代码写入
|
||||
|
|
|
@ -8,7 +8,7 @@ toc_max_heading_level: 4
|
|||
|
||||
## 基本查询
|
||||
|
||||
为了更好的介绍 TDengine 数据查询,使用 如下 taosBenchmark 命令,生成本章内容需要的时序数据。
|
||||
为了更好的介绍 TDengine 数据查询,使用如下 taosBenchmark 命令,生成本章内容需要的时序数据。
|
||||
|
||||
```shell
|
||||
taosBenchmark --start-timestamp=1600000000000 --tables=100 --records=10000000 --time-step=10000
|
||||
|
@ -20,21 +20,22 @@ taosBenchmark --start-timestamp=1600000000000 --tables=100 --records=10000000 --
|
|||
|
||||
```sql
|
||||
SELECT * FROM meters
|
||||
WHERE voltage > 10
|
||||
WHERE voltage > 230
|
||||
ORDER BY ts DESC
|
||||
LIMIT 5
|
||||
LIMIT 5;
|
||||
```
|
||||
|
||||
上面的 SQL,从超级表 `meters` 中查询出电压 `voltage` 大于 10 的记录,按时间降序排列,且仅输出前 5 行。查询结果如下:
|
||||
上面的 SQL,从超级表 `meters` 中查询出电压 `voltage` 大于 230V 的记录,按时间降序排列,且仅输出前 5 行。查询结果如下:
|
||||
|
||||
```text
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
==========================================================================================================
|
||||
2023-11-14 22:13:10.000 | 1.1294620 | 18 | 0.3531540 | 8 | California.MountainView |
|
||||
2023-11-14 22:13:10.000 | 1.0294620 | 12 | 0.3631540 | 2 | California.Campbell |
|
||||
2023-11-14 22:13:10.000 | 1.0294620 | 16 | 0.3531540 | 1 | California.Campbell |
|
||||
2023-11-14 22:13:10.000 | 1.1294620 | 18 | 0.3531540 | 2 | California.Campbell |
|
||||
2023-11-14 22:13:10.000 | 1.1294620 | 16 | 0.3431540 | 7 | California.PaloAlto |
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
===================================================================================================
|
||||
2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 10 | California.Sunnyvale |
|
||||
2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 1 | California.LosAngles |
|
||||
2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 10 | California.Sunnyvale |
|
||||
2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 5 | California.Cupertino |
|
||||
2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 4 | California.SanFrancisco |
|
||||
Query OK, 5 row(s) in set (0.145403s)
|
||||
```
|
||||
|
||||
## 聚合查询
|
||||
|
@ -48,28 +49,28 @@ TDengine 支持通过 GROUP BY 子句,对数据进行聚合查询。SQL 语句
|
|||
group by 子句用于对数据进行分组,并为每个分组返回一行汇总信息。在 group by 子句中,可以使用表或视图中的任何列作为分组依据,这些列不需要出现在 select 列表中。此外,用户可以直接在超级表上执行聚合查询,无须预先创建子表。以智能电表的数据模型为例,使用 group by 子句的 SQL 如下:
|
||||
|
||||
```sql
|
||||
SELECT groupid,avg(voltage)
|
||||
SELECT groupid, avg(voltage)
|
||||
FROM meters
|
||||
WHERE ts >= "2022-01-01T00:00:00+08:00"
|
||||
AND ts < "2023-01-01T00:00:00+08:00"
|
||||
GROUP BY groupid
|
||||
GROUP BY groupid;
|
||||
```
|
||||
|
||||
上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2023-01-01T00:00:00+08:00` 的数据,按照 `groupid` 进行分组,求每组的平均电压。查询结果如下:
|
||||
|
||||
```text
|
||||
groupid | avg(voltage) |
|
||||
==========================================
|
||||
8 | 9.104040404040404 |
|
||||
5 | 9.078333333333333 |
|
||||
1 | 9.087037037037037 |
|
||||
7 | 8.991414141414142 |
|
||||
9 | 8.789814814814815 |
|
||||
6 | 9.051010101010101 |
|
||||
4 | 9.135353535353536 |
|
||||
10 | 9.213131313131314 |
|
||||
2 | 9.008888888888889 |
|
||||
3 | 8.783888888888889 |
|
||||
groupid | avg(voltage) |
|
||||
======================================
|
||||
8 | 243.961981544901079 |
|
||||
5 | 243.961981544901079 |
|
||||
1 | 243.961981544901079 |
|
||||
7 | 243.961981544901079 |
|
||||
9 | 243.961981544901079 |
|
||||
6 | 243.961981544901079 |
|
||||
4 | 243.961981544901079 |
|
||||
10 | 243.961981544901079 |
|
||||
2 | 243.961981544901079 |
|
||||
3 | 243.961981544901079 |
|
||||
Query OK, 10 row(s) in set (0.042446s)
|
||||
```
|
||||
|
||||
|
@ -110,24 +111,24 @@ TDengine 按如下方式处理数据切分子句。
|
|||
```sql
|
||||
SELECT location, avg(voltage)
|
||||
FROM meters
|
||||
PARTITION BY location
|
||||
PARTITION BY location;
|
||||
```
|
||||
|
||||
上面的示例 SQL 查询超级表 `meters`,将数据按标签 `location` 进行分组,每个分组计算电压的平均值。查询结果如下:
|
||||
|
||||
```text
|
||||
location | avg(voltage) |
|
||||
=========================================================
|
||||
California.SantaClara | 8.793334320000000 |
|
||||
California.SanFrancisco | 9.017645882352941 |
|
||||
California.SanJose | 9.156112940000000 |
|
||||
California.LosAngles | 9.036753507692307 |
|
||||
California.SanDiego | 8.967037053333334 |
|
||||
California.Sunnyvale | 8.978572085714285 |
|
||||
California.PaloAlto | 8.936665800000000 |
|
||||
California.Cupertino | 8.987654066666666 |
|
||||
California.MountainView | 9.046297266666667 |
|
||||
California.Campbell | 9.149999028571429 |
|
||||
location | avg(voltage) |
|
||||
======================================================
|
||||
California.SantaClara | 243.962050000000005 |
|
||||
California.SanFrancisco | 243.962050000000005 |
|
||||
California.SanJose | 243.962050000000005 |
|
||||
California.LosAngles | 243.962050000000005 |
|
||||
California.SanDiego | 243.962050000000005 |
|
||||
California.Sunnyvale | 243.962050000000005 |
|
||||
California.PaloAlto | 243.962050000000005 |
|
||||
California.Cupertino | 243.962050000000005 |
|
||||
California.MountainView | 243.962050000000005 |
|
||||
California.Campbell | 243.962050000000005 |
|
||||
Query OK, 10 row(s) in set (2.415961s)
|
||||
```
|
||||
|
||||
|
@ -200,20 +201,20 @@ SLIMIT 2;
|
|||
上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据;数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,且每个时间窗口向后偏移 5 秒;最后,仅取前 2 个分片的数据作为结果。查询结果如下:
|
||||
|
||||
```text
|
||||
tbname | _wstart | _wend | avg(voltage) |
|
||||
==========================================================================================
|
||||
d40 | 2021-12-31 15:59:05.000 | 2021-12-31 16:00:05.000 | 4.000000000000000 |
|
||||
d40 | 2021-12-31 16:00:05.000 | 2021-12-31 16:01:05.000 | 5.000000000000000 |
|
||||
d40 | 2021-12-31 16:01:05.000 | 2021-12-31 16:02:05.000 | 8.000000000000000 |
|
||||
d40 | 2021-12-31 16:02:05.000 | 2021-12-31 16:03:05.000 | 7.666666666666667 |
|
||||
d40 | 2021-12-31 16:03:05.000 | 2021-12-31 16:04:05.000 | 9.666666666666666 |
|
||||
d40 | 2021-12-31 16:04:05.000 | 2021-12-31 16:05:05.000 | 15.199999999999999 |
|
||||
d41 | 2021-12-31 15:59:05.000 | 2021-12-31 16:00:05.000 | 4.000000000000000 |
|
||||
d41 | 2021-12-31 16:00:05.000 | 2021-12-31 16:01:05.000 | 7.000000000000000 |
|
||||
d41 | 2021-12-31 16:01:05.000 | 2021-12-31 16:02:05.000 | 9.000000000000000 |
|
||||
d41 | 2021-12-31 16:02:05.000 | 2021-12-31 16:03:05.000 | 10.666666666666666 |
|
||||
d41 | 2021-12-31 16:03:05.000 | 2021-12-31 16:04:05.000 | 8.333333333333334 |
|
||||
d41 | 2021-12-31 16:04:05.000 | 2021-12-31 16:05:05.000 | 9.600000000000000 |
|
||||
tbname | _wstart | _wend | avg(voltage) |
|
||||
======================================================================================
|
||||
d2 | 2021-12-31 23:59:05.000 | 2022-01-01 00:00:05.000 | 253.000000000000000 |
|
||||
d2 | 2022-01-01 00:00:05.000 | 2022-01-01 00:01:05.000 | 244.166666666666657 |
|
||||
d2 | 2022-01-01 00:01:05.000 | 2022-01-01 00:02:05.000 | 241.833333333333343 |
|
||||
d2 | 2022-01-01 00:02:05.000 | 2022-01-01 00:03:05.000 | 243.166666666666657 |
|
||||
d2 | 2022-01-01 00:03:05.000 | 2022-01-01 00:04:05.000 | 240.833333333333343 |
|
||||
d2 | 2022-01-01 00:04:05.000 | 2022-01-01 00:05:05.000 | 244.800000000000011 |
|
||||
d26 | 2021-12-31 23:59:05.000 | 2022-01-01 00:00:05.000 | 253.000000000000000 |
|
||||
d26 | 2022-01-01 00:00:05.000 | 2022-01-01 00:01:05.000 | 244.166666666666657 |
|
||||
d26 | 2022-01-01 00:01:05.000 | 2022-01-01 00:02:05.000 | 241.833333333333343 |
|
||||
d26 | 2022-01-01 00:02:05.000 | 2022-01-01 00:03:05.000 | 243.166666666666657 |
|
||||
d26 | 2022-01-01 00:03:05.000 | 2022-01-01 00:04:05.000 | 240.833333333333343 |
|
||||
d26 | 2022-01-01 00:04:05.000 | 2022-01-01 00:05:05.000 | 244.800000000000011 |
|
||||
Query OK, 12 row(s) in set (0.021265s)
|
||||
```
|
||||
|
||||
|
@ -255,19 +256,19 @@ SLIMIT 1;
|
|||
上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据,数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,且时间窗口按照 30 秒进行滑动;最后,仅取前 1 个分片的数据作为结果。查询结果如下:
|
||||
|
||||
```text
|
||||
tbname | _wstart | avg(voltage) |
|
||||
================================================================
|
||||
d40 | 2021-12-31 15:59:30.000 | 4.000000000000000 |
|
||||
d40 | 2021-12-31 16:00:00.000 | 5.666666666666667 |
|
||||
d40 | 2021-12-31 16:00:30.000 | 4.333333333333333 |
|
||||
d40 | 2021-12-31 16:01:00.000 | 5.000000000000000 |
|
||||
d40 | 2021-12-31 16:01:30.000 | 9.333333333333334 |
|
||||
d40 | 2021-12-31 16:02:00.000 | 9.666666666666666 |
|
||||
d40 | 2021-12-31 16:02:30.000 | 10.000000000000000 |
|
||||
d40 | 2021-12-31 16:03:00.000 | 10.333333333333334 |
|
||||
d40 | 2021-12-31 16:03:30.000 | 10.333333333333334 |
|
||||
d40 | 2021-12-31 16:04:00.000 | 13.000000000000000 |
|
||||
d40 | 2021-12-31 16:04:30.000 | 15.333333333333334 |
|
||||
tbname | _wstart | avg(voltage) |
|
||||
=============================================================
|
||||
d2 | 2021-12-31 23:59:30.000 | 248.333333333333343 |
|
||||
d2 | 2022-01-01 00:00:00.000 | 246.000000000000000 |
|
||||
d2 | 2022-01-01 00:00:30.000 | 244.666666666666657 |
|
||||
d2 | 2022-01-01 00:01:00.000 | 240.833333333333343 |
|
||||
d2 | 2022-01-01 00:01:30.000 | 239.500000000000000 |
|
||||
d2 | 2022-01-01 00:02:00.000 | 243.833333333333343 |
|
||||
d2 | 2022-01-01 00:02:30.000 | 243.833333333333343 |
|
||||
d2 | 2022-01-01 00:03:00.000 | 241.333333333333343 |
|
||||
d2 | 2022-01-01 00:03:30.000 | 241.666666666666657 |
|
||||
d2 | 2022-01-01 00:04:00.000 | 244.166666666666657 |
|
||||
d2 | 2022-01-01 00:04:30.000 | 244.666666666666657 |
|
||||
Query OK, 11 row(s) in set (0.013153s)
|
||||
```
|
||||
|
||||
|
@ -290,13 +291,13 @@ SLIMIT 1;
|
|||
上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据,数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,且时间窗口按照 1 分钟进行切分;最后,仅取前 1 个分片的数据作为结果。查询结果如下:
|
||||
|
||||
```text
|
||||
tbname | _wstart | _wend | avg(voltage) |
|
||||
=================================================================================================================
|
||||
d28 | 2021-12-31 16:00:00.000 | 2021-12-31 16:01:00.000 | 7.333333333333333 |
|
||||
d28 | 2021-12-31 16:01:00.000 | 2021-12-31 16:02:00.000 | 8.000000000000000 |
|
||||
d28 | 2021-12-31 16:02:00.000 | 2021-12-31 16:03:00.000 | 11.000000000000000 |
|
||||
d28 | 2021-12-31 16:03:00.000 | 2021-12-31 16:04:00.000 | 6.666666666666667 |
|
||||
d28 | 2021-12-31 16:04:00.000 | 2021-12-31 16:05:00.000 | 10.000000000000000 |
|
||||
tbname | _wstart | _wend | avg(voltage) |
|
||||
======================================================================================
|
||||
d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:00.000 | 246.000000000000000 |
|
||||
d2 | 2022-01-01 00:01:00.000 | 2022-01-01 00:02:00.000 | 240.833333333333343 |
|
||||
d2 | 2022-01-01 00:02:00.000 | 2022-01-01 00:03:00.000 | 243.833333333333343 |
|
||||
d2 | 2022-01-01 00:03:00.000 | 2022-01-01 00:04:00.000 | 241.333333333333343 |
|
||||
d2 | 2022-01-01 00:04:00.000 | 2022-01-01 00:05:00.000 | 244.166666666666657 |
|
||||
Query OK, 5 row(s) in set (0.016812s)
|
||||
```
|
||||
|
||||
|
@ -342,53 +343,65 @@ SLIMIT 2;
|
|||
上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据;数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,如果窗口内的数据出现缺失,则使用使用前一个非 NULL 值填充数据;最后,仅取前 2 个分片的数据作为结果。查询结果如下:
|
||||
|
||||
```text
|
||||
tbname | _wstart | _wend | avg(voltage) |
|
||||
=================================================================================================================
|
||||
d40 | 2021-12-31 16:00:00.000 | 2021-12-31 16:01:00.000 | 5.666666666666667 |
|
||||
d40 | 2021-12-31 16:01:00.000 | 2021-12-31 16:02:00.000 | 5.000000000000000 |
|
||||
d40 | 2021-12-31 16:02:00.000 | 2021-12-31 16:03:00.000 | 9.666666666666666 |
|
||||
d40 | 2021-12-31 16:03:00.000 | 2021-12-31 16:04:00.000 | 10.333333333333334 |
|
||||
d40 | 2021-12-31 16:04:00.000 | 2021-12-31 16:05:00.000 | 13.000000000000000 |
|
||||
d41 | 2021-12-31 16:00:00.000 | 2021-12-31 16:01:00.000 | 5.666666666666667 |
|
||||
d41 | 2021-12-31 16:01:00.000 | 2021-12-31 16:02:00.000 | 9.333333333333334 |
|
||||
d41 | 2021-12-31 16:02:00.000 | 2021-12-31 16:03:00.000 | 11.000000000000000 |
|
||||
d41 | 2021-12-31 16:03:00.000 | 2021-12-31 16:04:00.000 | 7.666666666666667 |
|
||||
d41 | 2021-12-31 16:04:00.000 | 2021-12-31 16:05:00.000 | 10.000000000000000 |
|
||||
tbname | _wstart | _wend | avg(voltage) |
|
||||
=======================================================================================
|
||||
d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:00.000 | 246.000000000000000 |
|
||||
d2 | 2022-01-01 00:01:00.000 | 2022-01-01 00:02:00.000 | 240.833333333333343 |
|
||||
d2 | 2022-01-01 00:02:00.000 | 2022-01-01 00:03:00.000 | 243.833333333333343 |
|
||||
d2 | 2022-01-01 00:03:00.000 | 2022-01-01 00:04:00.000 | 241.333333333333343 |
|
||||
d2 | 2022-01-01 00:04:00.000 | 2022-01-01 00:05:00.000 | 244.166666666666657 |
|
||||
d26 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:00.000 | 246.000000000000000 |
|
||||
d26 | 2022-01-01 00:01:00.000 | 2022-01-01 00:02:00.000 | 240.833333333333343 |
|
||||
d26 | 2022-01-01 00:02:00.000 | 2022-01-01 00:03:00.000 | 243.833333333333343 |
|
||||
d26 | 2022-01-01 00:03:00.000 | 2022-01-01 00:04:00.000 | 241.333333333333343 |
|
||||
d26 | 2022-01-01 00:04:00.000 | 2022-01-01 00:05:00.000 | 244.166666666666657 |
|
||||
Query OK, 10 row(s) in set (0.022866s)
|
||||
```
|
||||
|
||||
### 状态窗口
|
||||
|
||||
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。TDengine 还支持将 CASE 表达式用在状态量,可以表达某个状态的开始是由满足某个条件而触发,这个状态的结束是由另外一个条件满足而触发的语义。以智能电表为例,电压正常范围是 205V 到 235V,那么可以通过监控电压来判断电路是否正常。
|
||||
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。TDengine 还支持将 CASE 表达式用在状态量,可以表达某个状态的开始是由满足某个条件而触发,这个状态的结束是由另外一个条件满足而触发的语义。以智能电表为例,电压正常范围是 225V 到 235V,那么可以通过监控电压来判断电路是否正常。
|
||||
|
||||
```sql
|
||||
SELECT tbname, _wstart, _wend,_wduration, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END status
|
||||
SELECT tbname, _wstart, _wend,_wduration, CASE WHEN voltage >= 225 and voltage <= 235 THEN 1 ELSE 0 END status
|
||||
FROM meters
|
||||
WHERE ts >= "2022-01-01T00:00:00+08:00"
|
||||
AND ts < "2022-01-01T00:05:00+08:00"
|
||||
PARTITION BY tbname
|
||||
STATE_WINDOW(
|
||||
CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END
|
||||
CASE WHEN voltage >= 225 and voltage <= 235 THEN 1 ELSE 0 END
|
||||
)
|
||||
SLIMIT 10;
|
||||
SLIMIT 2;
|
||||
```
|
||||
|
||||
以上 SQL,查询超级表 meters 中,时间戳大于等于 2022-01-01T00:00:00+08:00,且时间戳小于 2022-01-01T00:05:00+08:00的数据;数据首先按照子表名 tbname 进行数据切分;根据电压是否在正常范围内进行状态窗口的划分;最后,取前 10 个分片的数据作为结果。查询结果如下:
|
||||
以上 SQL,查询超级表 meters 中,时间戳大于等于 2022-01-01T00:00:00+08:00,且时间戳小于 2022-01-01T00:05:00+08:00的数据;数据首先按照子表名 tbname 进行数据切分;根据电压是否在正常范围内进行状态窗口的划分;最后,取前 2 个分片的数据作为结果。查询结果如下:(由于数据是随机生成,结果集包含的数据条数会有不同)
|
||||
|
||||
```text
|
||||
tbname | _wstart | _wend | _wduration | status |
|
||||
=====================================================================================================================================
|
||||
d76 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d47 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d37 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d87 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d64 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d35 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d83 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d51 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d63 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
d0 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 |
|
||||
Query OK, 10 row(s) in set (0.040495s)
|
||||
tbname | _wstart | _wend | _wduration | status |
|
||||
===============================================================================================
|
||||
d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:20.000 | 80000 | 0 |
|
||||
d2 | 2022-01-01 00:01:30.000 | 2022-01-01 00:01:30.000 | 0 | 1 |
|
||||
d2 | 2022-01-01 00:01:40.000 | 2022-01-01 00:01:40.000 | 0 | 0 |
|
||||
d2 | 2022-01-01 00:01:50.000 | 2022-01-01 00:01:50.000 | 0 | 1 |
|
||||
d2 | 2022-01-01 00:02:00.000 | 2022-01-01 00:02:20.000 | 20000 | 0 |
|
||||
d2 | 2022-01-01 00:02:30.000 | 2022-01-01 00:02:30.000 | 0 | 1 |
|
||||
d2 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:00.000 | 20000 | 0 |
|
||||
d2 | 2022-01-01 00:03:10.000 | 2022-01-01 00:03:10.000 | 0 | 1 |
|
||||
d2 | 2022-01-01 00:03:20.000 | 2022-01-01 00:03:40.000 | 20000 | 0 |
|
||||
d2 | 2022-01-01 00:03:50.000 | 2022-01-01 00:03:50.000 | 0 | 1 |
|
||||
d2 | 2022-01-01 00:04:00.000 | 2022-01-01 00:04:50.000 | 50000 | 0 |
|
||||
d26 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:20.000 | 80000 | 0 |
|
||||
d26 | 2022-01-01 00:01:30.000 | 2022-01-01 00:01:30.000 | 0 | 1 |
|
||||
d26 | 2022-01-01 00:01:40.000 | 2022-01-01 00:01:40.000 | 0 | 0 |
|
||||
d26 | 2022-01-01 00:01:50.000 | 2022-01-01 00:01:50.000 | 0 | 1 |
|
||||
d26 | 2022-01-01 00:02:00.000 | 2022-01-01 00:02:20.000 | 20000 | 0 |
|
||||
d26 | 2022-01-01 00:02:30.000 | 2022-01-01 00:02:30.000 | 0 | 1 |
|
||||
d26 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:00.000 | 20000 | 0 |
|
||||
d26 | 2022-01-01 00:03:10.000 | 2022-01-01 00:03:10.000 | 0 | 1 |
|
||||
d26 | 2022-01-01 00:03:20.000 | 2022-01-01 00:03:40.000 | 20000 | 0 |
|
||||
d26 | 2022-01-01 00:03:50.000 | 2022-01-01 00:03:50.000 | 0 | 1 |
|
||||
d26 | 2022-01-01 00:04:00.000 | 2022-01-01 00:04:50.000 | 50000 | 0 |
|
||||
Query OK, 22 row(s) in set (0.153403s)
|
||||
```
|
||||
|
||||
### 会话窗口
|
||||
|
@ -417,18 +430,18 @@ SLIMIT 10;
|
|||
|
||||
上面的 SQL,查询超级表 meters 中,时间戳大于等于 2022-01-01T00:00:00+08:00,且时间戳小于 2022-01-01T00:10:00+08:00的数据;数据先按照子表名 tbname 进行数据切分,再根据 10 分钟的会话窗口进行切分;最后,取前 10 个分片的数据作为结果,返回子表名、窗口开始时间、窗口结束时间、窗口宽度、窗口内数据条数。查询结果如下:
|
||||
```text
|
||||
tbname | _wstart | _wend | _wduration | count(*) |
|
||||
=====================================================================================================================================
|
||||
d76 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d47 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d37 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d87 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d64 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d35 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d83 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d51 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d63 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
d0 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 |
|
||||
tbname | _wstart | _wend | _wduration | count(*) |
|
||||
===============================================================================================
|
||||
d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d26 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d52 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d64 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d76 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d28 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d4 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d88 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d77 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
d54 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 |
|
||||
Query OK, 10 row(s) in set (0.043489s)
|
||||
```
|
||||
|
||||
|
@ -458,26 +471,26 @@ FROM meters
|
|||
WHERE ts >= "2022-01-01T00:00:00+08:00"
|
||||
AND ts < "2022-01-01T00:10:00+08:00"
|
||||
PARTITION BY tbname
|
||||
EVENT_WINDOW START WITH voltage >= 10 END WITH voltage < 20
|
||||
LIMIT 10;
|
||||
EVENT_WINDOW START WITH voltage >= 225 END WITH voltage < 235
|
||||
LIMIT 5;
|
||||
```
|
||||
|
||||
上面的 SQL,查询超级表meters中,时间戳大于等于2022-01-01T00:00:00+08:00,且时间戳小于2022-01-01T00:10:00+08:00的数据;数据先按照子表名tbname进行数据切分,再根据事件窗口条件:电压大于等于 10V,且小于 20V 进行切分;最后,取前 10 行的数据作为结果,返回子表名、窗口开始时间、窗口结束时间、窗口宽度、窗口内数据条数。查询结果如下:
|
||||
上面的 SQL,查询超级表meters中,时间戳大于等于2022-01-01T00:00:00+08:00,且时间戳小于2022-01-01T00:10:00+08:00的数据;数据先按照子表名tbname进行数据切分,再根据事件窗口条件:电压大于等于 225V,且小于 235V 进行切分;最后,取每个分片的前 5 行的数据作为结果,返回子表名、窗口开始时间、窗口结束时间、窗口宽度、窗口内数据条数。查询结果如下:
|
||||
|
||||
```text
|
||||
tbname | _wstart | _wend | _wduration | count(*) |
|
||||
=====================================================================================================================================
|
||||
d0 | 2021-12-31 16:00:00.000 | 2021-12-31 16:00:00.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:00:30.000 | 2021-12-31 16:00:30.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:00:40.000 | 2021-12-31 16:00:40.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:01:20.000 | 2021-12-31 16:01:20.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:02:20.000 | 2021-12-31 16:02:20.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:02:30.000 | 2021-12-31 16:02:30.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:03:10.000 | 2021-12-31 16:03:10.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:03:30.000 | 2021-12-31 16:03:30.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:03:40.000 | 2021-12-31 16:03:40.000 | 0 | 1 |
|
||||
d0 | 2021-12-31 16:03:50.000 | 2021-12-31 16:03:50.000 | 0 | 1 |
|
||||
Query OK, 10 row(s) in set (0.034127s)
|
||||
tbname | _wstart | _wend | _wduration | count(*) |
|
||||
==============================================================================================
|
||||
d0 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:30.000 | 90000 | 10 |
|
||||
d0 | 2022-01-01 00:01:40.000 | 2022-01-01 00:02:30.000 | 50000 | 6 |
|
||||
d0 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:10.000 | 30000 | 4 |
|
||||
d0 | 2022-01-01 00:03:20.000 | 2022-01-01 00:07:10.000 | 230000 | 24 |
|
||||
d0 | 2022-01-01 00:07:20.000 | 2022-01-01 00:07:50.000 | 30000 | 4 |
|
||||
d1 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:30.000 | 90000 | 10 |
|
||||
d1 | 2022-01-01 00:01:40.000 | 2022-01-01 00:02:30.000 | 50000 | 6 |
|
||||
d1 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:10.000 | 30000 | 4 |
|
||||
d1 | 2022-01-01 00:03:20.000 | 2022-01-01 00:07:10.000 | 230000 | 24 |
|
||||
……
|
||||
Query OK, 500 row(s) in set (0.328557s)
|
||||
```
|
||||
|
||||
### 计数窗口
|
||||
|
@ -492,17 +505,25 @@ sliding_val 是一个常量,表示窗口滑动的数量,类似于 interval
|
|||
select _wstart, _wend, count(*)
|
||||
from meters
|
||||
where ts >= "2022-01-01T00:00:00+08:00" and ts < "2022-01-01T00:30:00+08:00"
|
||||
count_window(10);
|
||||
count_window(1000);
|
||||
```
|
||||
|
||||
上面的 SQL 查询超级表 meters 中时间戳大于等于 2022-01-01T00:00:00+08:00 且时间戳小于 2022-01-01T00:10:00+08:00 的数据。以每 10 条数据为一组,返回每组的开始时间、结束时间和分组条数。查询结果如下。
|
||||
上面的 SQL 查询超级表 meters 中时间戳大于等于 2022-01-01T00:00:00+08:00 且时间戳小于 2022-01-01T00:10:00+08:00 的数据。以每 1000 条数据为一组,返回每组的开始时间、结束时间和分组条数。查询结果如下:
|
||||
|
||||
```text
|
||||
_wstart | _wend |count(*)|
|
||||
===========================================================
|
||||
2021-12-31 16:00:00.000 | 2021-12-31 16:10:00.000 | 10 |
|
||||
2021-12-31 16:10:00.000 | 2021-12-31 16:20:00.000 | 10 |
|
||||
2021-12-31 16:20:00.000 | 2021-12-31 16:30:00.000 | 10 |
|
||||
_wstart | _wend | count(*) |
|
||||
=====================================================================
|
||||
2022-01-01 00:00:00.000 | 2022-01-01 00:01:30.000 | 1000 |
|
||||
2022-01-01 00:01:40.000 | 2022-01-01 00:03:10.000 | 1000 |
|
||||
2022-01-01 00:03:20.000 | 2022-01-01 00:04:50.000 | 1000 |
|
||||
2022-01-01 00:05:00.000 | 2022-01-01 00:06:30.000 | 1000 |
|
||||
2022-01-01 00:06:40.000 | 2022-01-01 00:08:10.000 | 1000 |
|
||||
2022-01-01 00:08:20.000 | 2022-01-01 00:09:50.000 | 1000 |
|
||||
2022-01-01 00:10:00.000 | 2022-01-01 00:11:30.000 | 1000 |
|
||||
2022-01-01 00:11:40.000 | 2022-01-01 00:13:10.000 | 1000 |
|
||||
2022-01-01 00:13:20.000 | 2022-01-01 00:14:50.000 | 1000 |
|
||||
2022-01-01 00:15:00.000 | 2022-01-01 00:16:30.000 | 1000 |
|
||||
Query OK, 10 row(s) in set (0.062794s)
|
||||
```
|
||||
|
||||
## 时序数据特有函数
|
||||
|
@ -563,14 +584,14 @@ UNION ALL
|
|||
上面的 SQL,分别查询:子表 d1 的 1 条数据,子表 d11 的 2 条数据,子表 d21 的 3 条数据,并将结果合并。返回的结果如下:
|
||||
|
||||
```text
|
||||
tbname | ts | current | voltage | phase |
|
||||
=================================================================================================
|
||||
d11 | 2020-09-13 12:26:40.000 | 1.0260611 | 6 | 0.3620200 |
|
||||
d11 | 2020-09-13 12:26:50.000 | 2.9544230 | 8 | 1.0048079 |
|
||||
d21 | 2020-09-13 12:26:40.000 | 1.0260611 | 2 | 0.3520200 |
|
||||
d21 | 2020-09-13 12:26:50.000 | 2.9544230 | 2 | 0.9948080 |
|
||||
d21 | 2020-09-13 12:27:00.000 | -0.0000430 | 12 | 0.0099860 |
|
||||
d1 | 2020-09-13 12:26:40.000 | 1.0260611 | 10 | 0.3520200 |
|
||||
tbname | ts | current | voltage | phase |
|
||||
====================================================================================
|
||||
d11 | 2020-09-13 20:26:40.000 | 11.5680809 | 247 | 146.5000000 |
|
||||
d11 | 2020-09-13 20:26:50.000 | 14.2392311 | 234 | 148.0000000 |
|
||||
d1 | 2020-09-13 20:26:40.000 | 11.5680809 | 247 | 146.5000000 |
|
||||
d21 | 2020-09-13 20:26:40.000 | 11.5680809 | 247 | 146.5000000 |
|
||||
d21 | 2020-09-13 20:26:50.000 | 14.2392311 | 234 | 148.0000000 |
|
||||
d21 | 2020-09-13 20:27:00.000 | 10.0999422 | 251 | 146.0000000 |
|
||||
Query OK, 6 row(s) in set (0.006438s)
|
||||
```
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL
|
|||
- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。
|
||||
|
||||
```sql
|
||||
CREATE DATABASE POWER WAL_LEVEL 1 WAL_FSYNC_PERIOD 3000;
|
||||
CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000;
|
||||
```
|
||||
|
||||
在创建数据库时可以选择不同的参数类型,来选择性能优先或者可靠性优先。
|
||||
|
@ -119,4 +119,4 @@ taos> select last_row(ts,current) from meters;
|
|||
Query OK, 1 row(s) in set (0.046682s)
|
||||
```
|
||||
|
||||
可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。
|
||||
可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。
|
||||
|
|
|
@ -14,18 +14,26 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx
|
|||
|
||||
## 支持的数据源
|
||||
|
||||
目前 TDengine 支持的数据源如下:
|
||||
目前 TDengine 支持的数据源如下表:
|
||||
|
||||
1. Aveva PI System:一个工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理
|
||||
2. Aveva Historian:一个工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据。
|
||||
3. OPC DA/UA:OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题。OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制;2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本。
|
||||
4. MQTT:Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。
|
||||
5. Kafka:由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。
|
||||
6. OpenTSDB:基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。
|
||||
7. CSV:Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。
|
||||
8. TDengine 2:泛指运行 TDengine 2.x 版本的 TDengine 实例。
|
||||
9. TDengine 3:泛指运行 TDengine 3.x 版本的 TDengine 实例。
|
||||
10. MySQL, PostgreSQL, Oracle 等关系型数据库。
|
||||
| 数据源 | 支持版本 | 描述 |
|
||||
| --- | --- | --- |
|
||||
| Aveva PI System | PI AF Server Version 2.10.9.593 或以上 | 工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 |
|
||||
| Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 |
|
||||
| OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题;OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制。 |
|
||||
| OPC UA | KeepWare KEPServerEx 6.5 | 2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 |
|
||||
| MQTT | emqx: 3.0.0 到 5.7.1<br/> hivemq: 4.0.0 到 4.31.0<br/> mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 |
|
||||
| Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 |
|
||||
| InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。|
|
||||
| OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 |
|
||||
| MySQL | 5.6,5.7,8.0+ | MySQL是最流行的关系型数据库管理系统之一,由于其体积小、速度快、总体拥有成本低,尤其是开放源码这一特点,一般中小型和大型网站的开发都选择 MySQL 作为网站数据库。 |
|
||||
| Oracle | 11G/12c/19c | Oracle 数据库系统是世界上流行的关系数据库管理系统,系统可移植性好、使用方便、功能强,适用于各类大、中、小微机环境。它是一种高效率的、可靠性好的、适应高吞吐量的数据库方案。 |
|
||||
| PostgreSQL | v15.0+ | PostgreSQL 是一个功能非常强大的、源代码开放的客户/服务器关系型数据库管理系统, 有很多在大型商业RDBMS中所具有的特性,包括事务、子选择、触发器、视图、外键引用完整性和复杂锁定功能。|
|
||||
| SQL Server | 2012/2022 | Microsoft SQL Server 是一种关系型数据库管理系统,由 Microsoft 公司开发,具有使用方便可伸缩性好与相关软件集成程度高等优点。 |
|
||||
| MongoDB | 3.6+ | MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。 |
|
||||
| CSV | - | Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 |
|
||||
| TDengine 2.x | 2.4 或 2.6+ | TDengine 旧版本,已不再维护,推荐升级到 3.0 最新版本。 |
|
||||
| TDengine 3.x | 源端版本+ | 使用 TMQ 进行 TDengine 指定从数据库或超级表的订阅。 |
|
||||
|
||||
## 数据提取、过滤和转换
|
||||
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
---
|
||||
title: "ARIMA"
|
||||
sidebar_label: "ARIMA"
|
||||
---
|
||||
|
||||
本节讲述 ARIMA 算法模型的使用方法。
|
||||
|
||||
## 功能概述
|
||||
|
||||
ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。
|
||||
ARIMA模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA模型要求时序数据是**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。
|
||||
|
||||
>平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。
|
||||
|
||||
以下参数可以动态输入控制预测过程中生成 合适的 ARIMA 的模型。
|
||||
|
||||
- p= 自回归模型阶数
|
||||
- d= 差分阶数
|
||||
- q= 移动平均模型阶数
|
||||
|
||||
|
||||
### 参数
|
||||
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|
||||
|参数名称|说明|必填项|
|
||||
|---|---|---|
|
||||
|period|输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0, 将使用非季节性/周期性的 ARIMA 模型预测。|选填|
|
||||
|start_p| 自回归模型阶数的 起始值,0 开始的整数,不推荐大于 10 |选填|
|
||||
|max_p| 自回归模型阶数的 结束值,0 开始的整数,不推荐大于 10 |选填|
|
||||
|start_q| 移动平均模型阶数的起始值, 0 开始的整数,不推荐大于 10 |选填|
|
||||
|max_q| 移动平均模型阶数的结束值, 0 开始的整数,不推荐大于 10 |选填|
|
||||
|d| 差分阶数|选填|
|
||||
|
||||
`start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。
|
||||
|
||||
### 示例及结果
|
||||
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,start_p 起始是 1, 最大拟合是 5,start_q是1,最大值是5,预测结果中返回 95% 置信区间范围边界。
|
||||
```
|
||||
FORECAST(i32, "algo=arima,alpha=95,period=10, start_p=1, max_p=5, start_q=1, max_q=5")
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"rows": fc_rows, // 预测结果的行数
|
||||
"period": period, // 返回结果的周期性,同输入
|
||||
"alpha": alpha, // 返回结果的置信区间,同输入
|
||||
"algo": "arima", // 返回结果使用的算法
|
||||
"mse":mse, // 拟合输入时序数据时候生成模型的最小均方误差(MSE)
|
||||
"res": res // 列模式的结果
|
||||
}
|
||||
```
|
||||
|
||||
### 参考文献
|
||||
- https://en.wikipedia.org/wiki/Autoregressive_moving-average_model
|
||||
- https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931?fromtitle=ARMA%E6%A8%A1%E5%9E%8B&fromid=8048415
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
title: "HoltWinters"
|
||||
sidebar_label: "HoltWinters"
|
||||
---
|
||||
|
||||
本节讲述 HoltWinters 算法模型的使用方法。
|
||||
|
||||
## 功能概述
|
||||
HoltWinters模型又称为多次指数平滑模型(EMA)。对含有线性趋势和周期波动的非平稳序列适用,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。
|
||||
HoltWinters有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。
|
||||
该模型对于返回数据也不提供计算的置信区间范围结果。在 95% 置信区间的上下界结果与预测结果相同。
|
||||
|
||||
|
||||
### 参数
|
||||
|
||||
分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。
|
||||
|参数名称|说明|必填项|
|
||||
|---|---|---|
|
||||
|period| 输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0, 将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填|
|
||||
|trend| 趋势模型使用加法模型还是乘法模型|选填|
|
||||
|seasonal| 季节性采用加法模型还是乘法模型|选填|
|
||||
|
||||
参数 `trend` 和 `seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。
|
||||
|
||||
### 示例及结果
|
||||
针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势采用乘法模型,季节采用乘法模型
|
||||
```
|
||||
FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul")
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"rows": rows, // 结果的行数
|
||||
"period": period, // 返回结果的周期性, 该结果与输入的周期性相同,如果没有周期性,该值为 0
|
||||
"algo": 'holtwinters' // 返回结果使用的计算模型
|
||||
"mse":mse, // 最小均方误差(minmum square error)
|
||||
"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了 两列[timestamp][fc_results]。
|
||||
}
|
||||
```
|
||||
|
||||
### 参考文献
|
||||
- https://en.wikipedia.org/wiki/Exponential_smoothing
|
||||
- https://orangematter.solarwinds.com/2019/12/15/holt-winters-forecasting-simplified/
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
title: "Anomaly-detection"
|
||||
sidebar_label: "Anomaly-detection"
|
||||
---
|
||||
|
||||
本节讲述 异常检测 算法模型的使用方法。
|
||||
|
||||
## 概述
|
||||
分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。
|
||||
|
||||
|
||||
### 统计学异常检测方法
|
||||
|
||||
- k-sigma<sup>[1]</sup>: 即 ***68–95–99.7 rule*** 。***k***值默认为3, 即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值K倍标准差,则该点被视为异常点.
|
||||
|
||||
|参数名称|说明|是否必选|默认值|
|
||||
|---|---|---|---|
|
||||
|k|标准差倍数|选填|3|
|
||||
|
||||
|
||||
- IQR<sup>[2]</sup>:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5 。无输入参数。
|
||||
|
||||
- Grubbs<sup>[3]</sup>: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
|
||||
|
||||
- SHESD<sup>[4]</sup>: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多49.9%。数据集的异常比例一般不超过5%
|
||||
|
||||
|参数名称|说明|是否必选|默认值|
|
||||
|---|---|---|---|
|
||||
|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
|
||||
|
||||
|
||||
### 基于数据密度的检测方法
|
||||
LOF<sup>[5]</sup>: 局部离群因子(LOF,又叫局部异常因子)算法是Breunig于2000年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的top(n)个点。
|
||||
|
||||
|
||||
### 基于深度学习的检测方法
|
||||
使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。
|
||||
|
||||
|
||||
### 参考文献
|
||||
1. https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
|
||||
2. https://en.wikipedia.org/wiki/Interquartile_range
|
||||
3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948.
|
||||
4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017).
|
||||
5. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93–104. doi:10.1145/335191.335388. ISBN 1-58113-217-4.
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
---
|
||||
title: "addins"
|
||||
sidebar_label: "addins"
|
||||
---
|
||||
|
||||
本节说明如何将自己开发的新预测算法和异常检测算法整合到 TDengine 分析平台, 并能够通过 SQL 语句进行调用。
|
||||
|
||||
## 目录结构
|
||||
|
||||

|
||||
|
||||
|目录|说明|
|
||||
|---|---|
|
||||
|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc, 单元测试和集成测试目录 test。 algo目录下 ad 放置异常检测算法代码, fc 放置预测算法代码|
|
||||
|script|是安装脚本和发布脚本放置目录|
|
||||
|model|放置针对数据集完成的训练模型|
|
||||
|cfg| 配置文件目录|
|
||||
|
||||
## 约定与限制
|
||||
|
||||
定义异常检测算法的 Python 代码文件 需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
|
||||
|
||||
|
||||
### 类命名规范
|
||||
|
||||
算法类的名称需要 以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。
|
||||
|
||||
### 类继承约定
|
||||
|
||||
异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`.
|
||||
预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`。
|
||||
|
||||
### 类属性初始化
|
||||
每个算法实现的类需要静态初始化两个类属性,分别是
|
||||
|
||||
`name`: 的触发调用关键词,全小写英文字母。
|
||||
`desc`:该算法的描述信息。
|
||||
|
||||
### 核心方法输入与输出约定
|
||||
|
||||
`execute` 是算法处理的核心方法。调用该方法的时候, `self.list` 已经设置好输入数组。
|
||||
异常检测输出结果
|
||||
|
||||
`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
|
||||
预测输出结果
|
||||
|
||||
对于预测算法, `AbstractForecastService` 的对象属性说明如下:
|
||||
|
||||
|属性名称|说明|默认值|
|
||||
|---|---|---|
|
||||
|period|输入时序数据的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可。| 0|
|
||||
|start_ts|预测数据的开始时间| 0|
|
||||
|time_step|预测结果的两个数据点之间时间间隔|0 |
|
||||
|fc_rows|预测结果数量| 0 |
|
||||
|return_conf|返回结果中是否包含执行区间范围,如果算法计算结果不包含置信区间,那么上界和下界与自身相同| 1|
|
||||
|conf|执行区间分位数 0.05|
|
||||
|
||||
|
||||
预测返回结果如下:
|
||||
```python
|
||||
return {
|
||||
"rows": self.fc_rows, # 预测数据行数
|
||||
"period": self.period, # 数据周期性,同输入
|
||||
"algo": "holtwinters", # 预测使用的算法
|
||||
"mse": mse, # 预测算法的 mse
|
||||
"res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## 示例代码
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from service import AbstractAnomalyDetectionService
|
||||
|
||||
# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。
|
||||
class _IqrService(AbstractAnomalyDetectionService):
|
||||
""" IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService类的抽象函数 """
|
||||
|
||||
# 定义算法调用关键词,全小写ASCII码(必须添加)
|
||||
name = 'iqr'
|
||||
|
||||
# 该算法的描述信息(建议添加)
|
||||
desc = """found the anomaly data according to the inter-quartile range"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def execute(self):
|
||||
""" execute 是算法实现逻辑的核心实现,直接修改该实现即可 """
|
||||
|
||||
# self.list 是输入数值列,list 类型,例如:[1,2,3,4,5]。设置 self.list 的方法在父类中已经进行了定义。实现自己的算法,修改该文件即可,以下代码使用自己的实现替换即可。
|
||||
#lower = np.quantile(self.list, 0.25)
|
||||
#upper = np.quantile(self.list, 0.75)
|
||||
|
||||
#min_val = lower - 1.5 * (upper - lower)
|
||||
#max_val = upper + 1.5 * (upper - lower)
|
||||
#threshold = [min_val, max_val]
|
||||
|
||||
# 返回值是与输入数值列长度相同的数据列,异常值对应位置是 -1。例如上述输入数据列,返回数值列是 [1, 1, 1, 1, -1],表示 [5] 是异常值。
|
||||
return [-1 if k < threshold[0] or k > threshold[1] else 1 for k in self.list]
|
||||
|
||||
|
||||
def set_params(self, params):
|
||||
"""该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
|
||||
pass
|
||||
```
|
||||
|
||||
|
||||
## 单元测试
|
||||
|
||||
在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
|
||||
```python
|
||||
def test_iqr(self):
|
||||
""" 测试 _IqrService 类 """
|
||||
s = loader.get_service("iqr")
|
||||
|
||||
# 设置需要进行检测的输入数据
|
||||
s.set_input_list(AnomalyDetectionTest.input_list)
|
||||
|
||||
# 测试 set_params 的处理逻辑
|
||||
try:
|
||||
s.set_params({"k": 2})
|
||||
except ValueError as e:
|
||||
self.assertEqual(1, 0)
|
||||
|
||||
r = s.execute()
|
||||
|
||||
# 绘制异常检测结果
|
||||
draw_ad_results(AnomalyDetectionTest.input_list, r, "iqr")
|
||||
|
||||
# 检查结果
|
||||
self.assertEqual(r[-1], -1)
|
||||
self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
|
||||
```
|
||||
|
||||
## 需要模型的算法
|
||||
|
||||
针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立了, autoencoder的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
|
||||
|
||||
训练完成后的模型,使用 joblib 进行保存。
|
||||
|
||||
并在 model 目录下建立对应的文件夹存放该模型。
|
||||
|
||||
保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。
|
||||
|
||||
具体的调用方式如下:
|
||||
|
||||
```python
|
||||
def test_autoencoder_ad(self):
|
||||
# 获取特定的算法服务
|
||||
s = loader.get_service("ac")
|
||||
data = self.__load_remote_data_for_ad()
|
||||
|
||||
# 设置异常检查的输入数据
|
||||
s.set_input_list(data)
|
||||
|
||||
# 指定调用的模型,该模型是之前针对该数据集进行训练获得
|
||||
s.set_params({"model": "ad_encoder_keras"})
|
||||
# 执行检查动作,并返回结果
|
||||
r = s.execute()
|
||||
|
||||
num_of_error = -(sum(filter(lambda x: x == -1, r)))
|
||||
self.assertEqual(num_of_error, 109)
|
||||
```
|
||||
|
|
@ -0,0 +1,318 @@
|
|||
---
|
||||
sidebar_label: 数据分析
|
||||
title: 数据分析功能
|
||||
---
|
||||
|
||||
## 概述
|
||||
|
||||
TDengine 通过 ANode(AnalysisNode) 是提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,从而拓展 TDengine 的功能,支持时间序列高级分析功能。
|
||||
ANode 是无状态的数据分析节点,集群中可以存在多个 ANode节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
|
||||
下图是数据分析的技术架构示意图。
|
||||
|
||||

|
||||
|
||||
## 安装部署
|
||||
### 环境准备
|
||||
ANode 的要求节点上准备有 Python 3.10 及以上版本以及相应的Python包自动安装组件 Pip ,同时请确保能够正常连接互联网。
|
||||
|
||||
### 安装及卸载
|
||||
使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengien 的安装流程一致。
|
||||
|
||||
```bash
|
||||
tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
|
||||
cd TDengine-enterprise-anode-1.0.0
|
||||
sudo ./install.sh
|
||||
```
|
||||
|
||||
卸载 ANode,执行命令 `rmtaosanode` 即可。
|
||||
|
||||
### 其他
|
||||
为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。
|
||||
|
||||
## 启动及停止服务
|
||||
安装 ANode 以后,可以使用`systemctl`来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
|
||||
|
||||
```bash
|
||||
systemctl start taosanoded
|
||||
systemctl stop taosanoded
|
||||
systemctl status taosanoded
|
||||
```
|
||||
|
||||
## 目录及配置说明
|
||||
|目录/文件|说明|
|
||||
|---------------|------|
|
||||
|/usr/local/taos/taosanode/bin|可执行文件目录|
|
||||
|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
|
||||
|/usr/local/taos/taosanode/lib|库文件目录|
|
||||
|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
|
||||
|/var/log/taos/taosanode/|日志文件目录|
|
||||
|/etc/taos/taosanode.ini|配置文件|
|
||||
|
||||
### 配置说明
|
||||
|
||||
Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 的配置和 uWSGI 的配置在同一个配置文件中,具体如下:
|
||||
|
||||
```ini
|
||||
[uwsgi]
|
||||
# charset
|
||||
env=LC_ALL=en_US.UTF-8
|
||||
|
||||
# ip:port
|
||||
http = 127.0.0.1:6050
|
||||
|
||||
# the local unix socket file than communicate to Nginx
|
||||
#socket = 127.0.0.1:8001
|
||||
#socket-timeout=10
|
||||
|
||||
# base directory
|
||||
chdir = /usr/local/taos/taosanode/lib
|
||||
|
||||
# initialize python file
|
||||
wsgi-file = /usr/local/taos/taosanode/lib/app.py
|
||||
|
||||
# invoke app model
|
||||
callable = app
|
||||
|
||||
# auto remove unix Socket and pid file when stopping
|
||||
vacuum = true
|
||||
|
||||
# socket exec model
|
||||
#chmod-socket = 664
|
||||
|
||||
# uWSGI pid
|
||||
uid=root
|
||||
|
||||
# uWSGI gid
|
||||
gid=root
|
||||
|
||||
# main process
|
||||
master = true
|
||||
|
||||
# the number of worker processes
|
||||
processes = 2
|
||||
|
||||
# pid file
|
||||
pidfile = /usr/local/taos/taosanode/uwsgi.pid
|
||||
|
||||
# enable threads
|
||||
enable-threads=true
|
||||
|
||||
# the number of threads for each process
|
||||
threads=2
|
||||
|
||||
# memory useage report
|
||||
memory-report = true
|
||||
reload-mercy = 10
|
||||
|
||||
# conflict with systemctl, so do NOT uncomment this
|
||||
# daemonize = /var/log/taos/taosanode/taosanode.log
|
||||
|
||||
# set log
|
||||
logto = /var/log/taos/taosanode/taosanode.log
|
||||
|
||||
# monitor server
|
||||
stats = 127.0.0.1:8387
|
||||
|
||||
# python virtual environment directory
|
||||
virtualenv = /usr/local/taos/taosanode/venv/
|
||||
|
||||
[taosanode]
|
||||
# default app log file
|
||||
app-log = /var/log/taos/taosanode/taosanode.app.log
|
||||
|
||||
# model storage directory
|
||||
model-dir=/usr/local/taos/taosanode/model/
|
||||
|
||||
# default log level
|
||||
log-level = DEBUG
|
||||
```
|
||||
|
||||
**提示**
|
||||
请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。
|
||||
|
||||
|
||||
## ANode 基本操作
|
||||
### 管理 ANode
|
||||
#### 创建 ANode
|
||||
```sql
|
||||
CREATE ANODE {node_url}
|
||||
```
|
||||
node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
|
||||
|
||||
#### 查看 ANode
|
||||
列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。
|
||||
```sql
|
||||
SHOW ANODES;
|
||||
```
|
||||
|
||||
#### 查看提供的时序数据分析服务
|
||||
|
||||
```SQL
|
||||
SHOW ANODES FULL;
|
||||
```
|
||||
|
||||
#### 强制刷新 TDengine 集群中分析算法缓存
|
||||
```SQL
|
||||
UPDATE ANODE {node_id}
|
||||
UPDATE ALL ANODES
|
||||
```
|
||||
|
||||
#### 删除 ANode
|
||||
```sql
|
||||
DROP ANODE {anode_id}
|
||||
```
|
||||
删除 ANode 只是将 ANode 从 TDengine 集群中删除,管理 ANode 的启停仍然需要使用`systemctl`命令。
|
||||
|
||||
### 时序数据分析功能
|
||||
|
||||
#### 白噪声检查
|
||||
|
||||
平台提供 Restful的服务检测输入时间序列是否是白噪声时间序列(White Noise Data, WND),白噪声时间序列及随机数序列。
|
||||
此外,分析平台要求输入的数据不能是 , 因此针对的所有数据均默认进行 白噪声检查。当前白噪声检查采用通行的 `Ljung-Box`检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
|
||||
如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
|
||||
同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
|
||||
|
||||
|
||||
#### 数据重采样和时间戳对齐
|
||||
|
||||
数据分析平台支持将输入的数据进行重采样的预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
|
||||
|
||||
- 数据时间戳对齐。由于真实数据时间可能并非严格按照查询指定的时间戳输入。此时数据平台将自动将数据的时间间隔按照指定的时间间隔进行对齐。例如有输入时间序列:[11, 22, 29, 41],用户指定时间间隔为 10,那么该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
|
||||
- 数据时间重采样。用户输入的时间序列其采样频率超过了指定的查询需要获得结果的时间间隔,例如输入原始数据是 5, 但是输出结果的频率是 10. [0, 5, 10, 15, 20, 25, 30],那么该输入数据列将重采用为间隔 为 10 的输入序列,其结果如下 [0, 10, 20,30]。[5, 15, 25] 处的数据将被丢弃。
|
||||
|
||||
需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据[11, 22, 29, 49],并且用户要求的时间间隔为 10, 重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
|
||||
|
||||
|
||||
#### 时序数据异常检测
|
||||
异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
|
||||
异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
|
||||
对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
|
||||
|
||||
|
||||
##### 语法
|
||||
|
||||
```SQL
|
||||
ANOMALY_WINDOW(column_name, option_expr)
|
||||
|
||||
option_expr: {"
|
||||
algo=expr1
|
||||
[,wncheck=1|0]
|
||||
[,expr2]
|
||||
"}
|
||||
```
|
||||
|
||||
1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列输入,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
|
||||
2. `options`:字符串。其中使用 K/V 调用异常检测的算法,及与算法相关的参数。采用 逗号分隔的K/V字符串表示,其中的字符串不需要使用单引号、双引号、或转意号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma, k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
|
||||
3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
|
||||
4. 输入数据默认进行白噪声检查,如果检查结果是输入数据是白噪声,将不会有任何(异常)窗口信息返回。
|
||||
|
||||
**参数说明**
|
||||
|参数|含义|默认值|
|
||||
|---|---|---|
|
||||
|algo|异常检测调用的算法|iqr|
|
||||
|wncheck|对输入数据列是否进行白噪声检查|取值为0或者1,默认值为 1,表示进行白噪声检查|
|
||||
|
||||
异常检测的返回结果以窗口的形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
|
||||
1. `_WSTART`: 异常窗口开始时间戳
|
||||
2. `_WEND`:异常窗口结束时间戳
|
||||
3. `_WDURATION`:异常窗口持续时间
|
||||
|
||||
**示例**
|
||||
```SQL
|
||||
--- 使用 iqr 算法进行异常检测,检测列 i32 列。
|
||||
SELECT _wstart, _wend, SUM(i32)
|
||||
FROM ai.atb
|
||||
ANOMALY_WINDOW(i32, "algo=iqr");
|
||||
|
||||
--- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2,检测列 i32 列
|
||||
SELECT _wstart, _wend, SUM(i32)
|
||||
FROM ai.atb
|
||||
ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
|
||||
```
|
||||
|
||||
```
|
||||
taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
|
||||
_wstart | _wend | count(*) |
|
||||
============================================================================
|
||||
2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
|
||||
Query OK, 1 row(s) in set (0.028946s)
|
||||
```
|
||||
|
||||
|
||||
**可用异常检测算法**
|
||||
- iqr
|
||||
- ksigma
|
||||
- grubbs
|
||||
- lof
|
||||
- shesd
|
||||
- tac
|
||||
|
||||
|
||||
#### 时序数据预测
|
||||
数据预测以一段训练数据作为输入,预测接下来一个连续时间区间内,时序数据的趋势。
|
||||
|
||||
##### 语法
|
||||
```SQL
|
||||
FORECAST(column_expr, option_expr)
|
||||
|
||||
option_expr: {"
|
||||
algo=expr1
|
||||
[,wncheck=1|0]
|
||||
[,conf=conf_val]
|
||||
[,every=every_val]
|
||||
[,rows=rows_val]
|
||||
[,start=start_ts_val]
|
||||
[,expr2]
|
||||
"}
|
||||
|
||||
```
|
||||
1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。
|
||||
2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持`conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
|
||||
|
||||
**参数说明**
|
||||
|
||||
|参数|含义|默认值|
|
||||
|---|---|---|
|
||||
|algo|预测分析使用的算法|holtwinters|
|
||||
|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
|
||||
|conf|预测数据的置信区间范围 ,取值范围[0, 100]|95|
|
||||
|every|预测数据的采样间隔|输入数据的采样间隔|
|
||||
|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
|
||||
|rows|预测结果的记录数|10|
|
||||
|
||||
1. 预测查询结果新增了三个伪列,具体如下: `_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
|
||||
2. 更改参数 `START`:返回预测结果的起始时间,改变这个起始时间不会影响返回的预测数值,只影响起始时间。
|
||||
3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
|
||||
4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
|
||||
|
||||
**示例**
|
||||
|
||||
```SQL
|
||||
--- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
|
||||
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
|
||||
FROM ai.ftb;
|
||||
|
||||
--- 使用 arima 算法进行预测,输入数据的是周期数据,每10个采样点是一个周期。返回置信区间是 95%.
|
||||
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
|
||||
FROM ai.ftb;
|
||||
```
|
||||
```
|
||||
taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
|
||||
_flow | _fhigh | _frowts | forecast(i32) |
|
||||
========================================================================================
|
||||
10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
|
||||
-21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 |
|
||||
-78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 |
|
||||
-154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 |
|
||||
-253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 |
|
||||
-375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 |
|
||||
-514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 |
|
||||
-680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 |
|
||||
-868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 |
|
||||
-1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 |
|
||||
```
|
||||
|
||||
|
||||
**可用预测算法**
|
||||
- arima
|
||||
- holtwinters
|
Binary file not shown.
After Width: | Height: | Size: 49 KiB |
Binary file not shown.
After Width: | Height: | Size: 7.1 KiB |
|
@ -3,6 +3,6 @@
|
|||
```
|
||||
|
||||
:::note
|
||||
对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "ws" 特性,那么只有 Websocket 的实现会被编译进来。
|
||||
对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "ws" 特性,那么只有 WebSocket 的实现会被编译进来。
|
||||
|
||||
:::
|
||||
|
|
|
@ -28,7 +28,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
|
||||
1. 通过客户端驱动程序 taosc 直接与服务端程序 taosd 建立连接,这种连接方式下文中简称 “原生连接”。
|
||||
2. 通过 taosAdapter 组件提供的 REST API 建立与 taosd 的连接,这种连接方式下文中简称 “REST 连接”
|
||||
3. 通过 taosAdapter 组件提供的 Websocket API 建立与 taosd 的连接,这种连接方式下文中简称 “Websocket 连接”
|
||||
3. 通过 taosAdapter 组件提供的 WebSocket API 建立与 taosd 的连接,这种连接方式下文中简称 “WebSocket 连接”
|
||||
|
||||

|
||||
|
||||
|
@ -38,9 +38,9 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
关键不同点在于:
|
||||
|
||||
1. 使用 原生连接,需要保证客户端的驱动程序 taosc 和服务端的 TDengine 版本配套。
|
||||
2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 Websocket 连接相比,REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。
|
||||
3. 使用 Websocket 连接,用户也无需安装客户端驱动程序 taosc。
|
||||
4. 连接云服务实例,必须使用 REST 连接 或 Websocket 连接。
|
||||
2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 WebSocket 连接相比,REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。
|
||||
3. 使用 WebSocket 连接,用户也无需安装客户端驱动程序 taosc。
|
||||
4. 连接云服务实例,必须使用 REST 连接 或 WebSocket 连接。
|
||||
|
||||
**推荐使用 WebSocket 连接**
|
||||
|
||||
|
@ -126,7 +126,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
```bash
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
:::note 此安装包为 Websocket 连接器
|
||||
:::note 此安装包为 WebSocket 连接器
|
||||
- 同时安装 `taospy` 和 `taos-ws-py`
|
||||
```bash
|
||||
pip3 install taospy[ws]
|
||||
|
@ -182,7 +182,7 @@ taos = { version = "*"}
|
|||
```
|
||||
|
||||
:::info
|
||||
Rust 连接器通过不同的特性区分不同的连接方式。默认同时支持原生连接和 Websocket 连接,如果仅需要建立 Websocket 连接,可设置 `ws` 特性:
|
||||
Rust 连接器通过不同的特性区分不同的连接方式。默认同时支持原生连接和 WebSocket 连接,如果仅需要建立 WebSocket 连接,可设置 `ws` 特性:
|
||||
|
||||
```toml
|
||||
taos = { version = "*", default-features = false, features = ["ws"] }
|
||||
|
@ -201,7 +201,7 @@ taos = { version = "*", default-features = false, features = ["ws"] }
|
|||
```
|
||||
npm install @tdengine/websocket
|
||||
```
|
||||
:::note Node.js 目前只支持 Websocket 连接
|
||||
:::note Node.js 目前只支持 WebSocket 连接
|
||||
- **安装验证**
|
||||
- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/nodejsChecker.js)到本地。
|
||||
- 在命令行中执行以下命令。
|
||||
|
@ -271,12 +271,10 @@ dotnet add package TDengine.Connector
|
|||
<TabItem label="Java" value="java">
|
||||
Java 连接器建立连接的参数有 URL 和 Properties。
|
||||
TDengine 的 JDBC URL 规范格式为:
|
||||
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
|
||||
`jdbc:[TAOS|TAOS-WS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
|
||||
|
||||
URL 和 Properties 的详细参数说明和如何使用详见 [url 规范](../../reference/connector/java/#url-规范)
|
||||
|
||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
Python 连接器使用 `connect()` 方法来建立连接,下面是连接参数的具体说明:
|
||||
|
@ -387,8 +385,8 @@ DSN 的详细说明和如何使用详见 [连接功能](../../reference/connecto
|
|||
- `reconnectIntervalMs`:重连间隔毫秒时间,默认为 2000。
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
**Websocket 连接**
|
||||
C/C++ 语言连接器 Websocket 连接方式使用 `ws_connect()` 函数用于建立与 TDengine 数据库的连接。其参数为 DSN 描述字符串,其基本结构如下:
|
||||
**WebSocket 连接**
|
||||
C/C++ 语言连接器 WebSocket 连接方式使用 `ws_connect()` 函数用于建立与 TDengine 数据库的连接。其参数为 DSN 描述字符串,其基本结构如下:
|
||||
|
||||
```text
|
||||
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||
|
@ -417,8 +415,8 @@ C/C++ 语言连接器原生连接方式使用 `taos_connect()` 函数用于建
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Websocket 连接
|
||||
下面是各语言连接器建立 Websocket 连接代码样例。演示了如何使用 Websocket 连接方式连接到 TDengine 数据库,并对连接设定一些参数。整个过程主要涉及到数据库连接的建立和异常处理。
|
||||
### WebSocket 连接
|
||||
下面是各语言连接器建立 WebSocket 连接代码样例。演示了如何使用 WebSocket 连接方式连接到 TDengine 数据库,并对连接设定一些参数。整个过程主要涉及到数据库连接的建立和异常处理。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem label="Java" value="java">
|
||||
|
|
|
@ -33,7 +33,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/create_db_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -69,7 +69,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c title="Websocket 连接"
|
||||
```c title="WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/create_db_demo.c:create_db_and_table}}
|
||||
```
|
||||
|
||||
|
@ -114,7 +114,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/insert_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -151,7 +151,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c title="Websocket 连接"
|
||||
```c title="WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/insert_data_demo.c:insert_data}}
|
||||
```
|
||||
|
||||
|
@ -189,7 +189,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/query_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -230,7 +230,7 @@ rust 连接器还支持使用 **serde** 进行反序列化行为结构体的结
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c title="Websocket 连接"
|
||||
```c title="WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/query_data_demo.c:query_data}}
|
||||
```
|
||||
|
||||
|
@ -273,7 +273,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/reqid_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -310,7 +310,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c "Websocket 连接"
|
||||
```c "WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/with_reqid_demo.c:with_reqid}}
|
||||
```
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
|
|||
|
||||
:::
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
|
|
@ -23,7 +23,7 @@ import TabItem from "@theme/TabItem";
|
|||
- 执行批量插入操作,将这些数据行插入到对应的子表中。
|
||||
3. 最后打印实际插入表中的行数。
|
||||
|
||||
## Websocket 连接
|
||||
## WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
```java
|
||||
|
|
|
@ -94,7 +94,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
- Websocket 连接: 因为使用 dsn,不需要 `td.connect.ip`,`td.connect.port`,`td.connect.user` 和 `td.connect.pass` 四个配置项,其余同通用配置项。
|
||||
- WebSocket 连接: 因为使用 dsn,不需要 `td.connect.ip`,`td.connect.port`,`td.connect.user` 和 `td.connect.pass` 四个配置项,其余同通用配置项。
|
||||
- 原生连接: 同通用基础配置项。
|
||||
|
||||
</TabItem>
|
||||
|
@ -103,8 +103,8 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Websocket 连接
|
||||
介绍各语言连接器使用 Websocket 连接方式创建消费者。指定连接的服务器地址,设置自动提交,从最新消息开始消费,指定 `group.id` 和 `client.id` 等信息。有的语言的连接器还支持反序列化参数。
|
||||
### WebSocket 连接
|
||||
介绍各语言连接器使用 WebSocket 连接方式创建消费者。指定连接的服务器地址,设置自动提交,从最新消息开始消费,指定 `group.id` 和 `client.id` 等信息。有的语言的连接器还支持反序列化参数。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
@ -234,7 +234,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
## 订阅消费数据
|
||||
消费者订阅主题后,可以开始接收并处理这些主题中的消息。订阅消费数据的示例代码如下:
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -403,7 +403,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
## 指定订阅的 Offset
|
||||
消费者可以指定从特定 Offset 开始读取分区中的消息,这允许消费者重读消息或跳过已处理的消息。下面展示各语言连接器如何指定订阅的 Offset。
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -549,7 +549,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
**注意**:手工提交消费进度前确保消息正常处理完成,否则处理出错的消息不会被再次消费。自动提交是在本次 `poll` 消息时可能会提交上次消息的消费进度,因此请确保消息处理完毕再进行下一次 `poll` 或消息获取。
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -663,7 +663,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
## 取消订阅和关闭消费
|
||||
消费者可以取消对主题的订阅,停止接收消息。当消费者不再需要时,应该关闭消费者实例,以释放资源和断开与 TDengine 服务器的连接。
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -766,7 +766,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
|
||||
## 完整示例
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
<details>
|
||||
|
|
|
@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100)
|
|||
|
||||
与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。
|
||||
|
||||
由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险
|
||||
由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险
|
||||
|
||||
**注意**
|
||||
- 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。
|
||||
|
@ -146,11 +146,11 @@ TDengine 的多级存储功能在使用上还具备以下优点。
|
|||
|
||||
下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。
|
||||
|
||||
|接口或组件 | 端口 |
|
||||
|:---------------------------:|:---------:|
|
||||
|原生接口(taosc) | 6030 |
|
||||
|RESTful 接口 | 6041 |
|
||||
|WebSocket 接口 |6041 |
|
||||
|taosKeeper | 6043 |
|
||||
|taosX | 6050, 6055 |
|
||||
|taosExplorer | 6060 |
|
||||
| 接口或组件 | 端口 |
|
||||
| :---------------: | :--------: |
|
||||
| 原生接口(taosc) | 6030 |
|
||||
| RESTful 接口 | 6041 |
|
||||
| WebSocket 接口 | 6041 |
|
||||
| taosKeeper | 6043 |
|
||||
| taosX | 6050, 6055 |
|
||||
| taosExplorer | 6060 |
|
||||
|
|
|
@ -368,6 +368,18 @@ spec:
|
|||
labels:
|
||||
app: "tdengine"
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- tdengine
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: "tdengine"
|
||||
image: "tdengine/tdengine:3.2.3.0"
|
||||
|
@ -837,4 +849,4 @@ Helm 管理下,清理操作也变得简单:
|
|||
helm uninstall tdengine
|
||||
```
|
||||
|
||||
但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。
|
||||
但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。
|
||||
|
|
|
@ -76,7 +76,7 @@ dataDir /mnt/data6 2 0
|
|||
|s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000 (30天),默认值 60 秒 |
|
||||
|s3PageCacheSize |s3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024\*1024。 ,默认值 4096|
|
||||
|s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 |
|
||||
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 1,表示开启自动 S3 迁移,可配置为 0。 |
|
||||
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 |
|
||||
|
||||
### 检查配置参数可用性
|
||||
|
||||
|
@ -108,9 +108,37 @@ s3migrate database <db_name>;
|
|||
|
||||
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
|
||||
| :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- |
|
||||
| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
||||
| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 |
|
||||
| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
|
||||
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
||||
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
|
||||
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
|
||||
|
||||
### 对象存储读写次数估算
|
||||
|
||||
对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。
|
||||
|
||||
#### 数据上传
|
||||
|
||||
当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。
|
||||
|
||||
```math
|
||||
上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1
|
||||
```
|
||||
|
||||
在创建数据库时,可以通过 `s3_chunkpages` 参数调整每个文件块的大小,从而控制每个数据文件的上传次数。
|
||||
|
||||
其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。
|
||||
|
||||
#### 数据下载
|
||||
|
||||
在查询操作中,如果需要访问对象存储中的数据,TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。
|
||||
|
||||
相邻的多个数据页会作为一个数据块从对象存储下载一次,以减少从对象存储下载的次数。每个数据页的大小,在创建数据库时,通过 `tsdb_pagesize` 参数指定,默认 4K 字节。
|
||||
|
||||
```math
|
||||
下载次数 = 查询需要的数据块数量 - 已缓存的数据块数量
|
||||
```
|
||||
|
||||
页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。
|
||||
|
||||
## Azure Blob 存储
|
||||
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
|
||||
|
|
|
@ -30,9 +30,8 @@ toc_max_heading_level: 4
|
|||
目前只有 Java 连接器在 WebSocket 连接模式下支持双活,其配置示例如下
|
||||
|
||||
```java
|
||||
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
url = "jdbc:TAOS-WS://" + host + ":6041/?user=root&password=taosdata";
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_SLAVE_CLUSTER_HOST, "192.168.1.11");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_SLAVE_CLUSTER_PORT, "6041");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
|
@ -43,13 +42,13 @@ connection = DriverManager.getConnection(url, properties);
|
|||
|
||||
其中的配置属性及含义如下表
|
||||
|
||||
| 属性名 | 含义 |
|
||||
| ----------------- | ------------------ |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 |
|
||||
| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true |
|
||||
| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 |
|
||||
| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 |
|
||||
| 属性名 | 含义 |
|
||||
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 |
|
||||
| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true |
|
||||
| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 |
|
||||
| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 |
|
||||
|
||||
### 约束条件
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ taosd 命令行参数如下
|
|||
| secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,缺省值:无 |
|
||||
| fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname |
|
||||
| serverPort | 启动 taosd 后所监听的端口,缺省值:6030 |
|
||||
| numOfRpcSessions | 允许一个客户端能创建的最大连接数,取值范围 100-100000,缺省值:30000 |
|
||||
| numOfRpcSessions | 允许一个 dnode 能发起的最大连接数,取值范围 100-100000,缺省值:30000 |
|
||||
| timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值:500000 |
|
||||
|
||||
### 监控相关
|
||||
|
@ -156,14 +156,14 @@ charset 的有效值是 UTF-8。
|
|||
### 内存相关
|
||||
| 参数名称 | 参数说明 |
|
||||
| :----------------: | :---------------------------------------------: |
|
||||
| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10 |
|
||||
| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||
| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10 |
|
||||
| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||
|
||||
### 性能调优
|
||||
|
||||
| 参数名称 | 参数说明 |
|
||||
| :----------------: | :---------------------------------------------: |
|
||||
| numOfCommitThreads | 写入线程的最大数量,取值范围 0-1024,缺省值为 4 |
|
||||
| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024,缺省值为 4 |
|
||||
|
||||
### 日志相关
|
||||
|
||||
|
@ -180,6 +180,7 @@ charset 的有效值是 UTF-8。
|
|||
| tmrDebugFlag | 定时器模块的日志开关,取值范围同上 |
|
||||
| uDebugFlag | 共用功能模块的日志开关,取值范围同上 |
|
||||
| rpcDebugFlag | rpc 模块的日志开关,取值范围同上 |
|
||||
| cDebugFlag | 客户端模块的日志开关,取值范围同上 |
|
||||
| jniDebugFlag | jni 模块的日志开关,取值范围同上 |
|
||||
| qDebugFlag | query 模块的日志开关,取值范围同上 |
|
||||
| dDebugFlag | dnode 模块的日志开关,取值范围同上,缺省值 135 |
|
||||
|
@ -223,16 +224,16 @@ lossyColumns float|double
|
|||
|
||||
| 参数名称 | 参数说明 |
|
||||
| :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值 为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 |
|
||||
| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值 为 0 |
|
||||
| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 |
|
||||
| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值 为20 |
|
||||
| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值: 3 |
|
||||
| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 |
|
||||
| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值为 0 |
|
||||
| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变;默认值为 0 |
|
||||
| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值为20 |
|
||||
| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值为 3 |
|
||||
|
||||
|
||||
## taosd 监控指标
|
||||
|
||||
taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
|
||||
### taosd\_cluster\_basic 表
|
||||
|
||||
|
@ -458,4 +459,4 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。
|
|||
3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
|
||||
4. 慢日志文件不自动删除,不压缩。
|
||||
5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。
|
||||
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
|
||||
| 参数名称 | 参数含义 |
|
||||
|:-----------:|:----------------------------------------------------------:|
|
||||
|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:localhost:6030 |
|
||||
|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost |
|
||||
|secondEp | 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值 |
|
||||
|numOfRpcSessions | 一个客户端能创建的最大连接数,取值范围:10-50000000(单位为毫秒);缺省值:500000 |
|
||||
|telemetryReporting | 是否上传 telemetry,0: 不上传,1: 上传;缺省值:1 |
|
||||
|
@ -35,6 +35,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
|smlAutoChildTableNameDelimiter | schemaless tag之间的连接符,连起来作为子表名,无缺省值 |
|
||||
|smlTagName | schemaless tag 为空时默认的 tag 名字, 缺省值 "_tag_null" |
|
||||
|smlTsDefaultName | schemaless自动建表的时间列名字通过该配置设置, 缺省值 "_ts" |
|
||||
|smlDot2Underline | schemaless 把超级表名中的 dot 转成下划线 |
|
||||
|enableCoreFile | crash 时是否生成 core 文件,0: 不生成, 1: 生成;缺省值:1 |
|
||||
|enableScience | 是否开启科学计数法显示浮点数; 0: 不开始, 1: 开启;缺省值:1 |
|
||||
|compressMsgSize | 是否对 RPC 消息进行压缩; -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩; 缺省值 -1|
|
||||
|
|
|
@ -306,7 +306,7 @@ http 返回内容:
|
|||
|
||||
## taosAdapter 监控指标
|
||||
|
||||
taosAdapter 采集 REST/Websocket 相关请求的监控指标。将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
taosAdapter 采集 REST/WebSocket 相关请求的监控指标。将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
|
||||
#### adapter\_requests 表
|
||||
|
||||
|
@ -330,8 +330,8 @@ taosAdapter 采集 REST/Websocket 相关请求的监控指标。将监控指标
|
|||
| other\_fail | INT UNSIGNED | | 其他失败请求数 |
|
||||
| query\_in\_process | INT UNSIGNED | | 正在处理查询请求数 |
|
||||
| write\_in\_process | INT UNSIGNED | | 正在处理写入请求数 |
|
||||
| endpoint | VARCHAR | | 请求端点 |
|
||||
| req\_type | NCHAR | TAG | 请求类型:0 为 REST,1 为 Websocket |
|
||||
| endpoint | VARCHAR | | 请求端点 |
|
||||
| req\_type | NCHAR | TAG | 请求类型:0 为 REST,1 为 WebSocket |
|
||||
|
||||
## 结果返回条数限制
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@ sidebar_label: taosExplorer
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
taosExplorer 是一个为用户提供 TDengine 实例的可视化管理交互工具的 web 服务。本节主要讲述其安装和部署。它的各项功能都是基于简单易上手的图形界面,可以直接尝试,如果有需要也可以考高级功能和运维指南中的相关内容。为了确保访问 taosExplorer 的最佳体验,请使用 Chrome 79 及以上版本,或 Edge 79 及以上版本。
|
||||
taosExplorer 是一个为用户提供 TDengine 实例的可视化管理交互工具的 web 服务,虽然它没有开源,但随开源版安装包免费提供。本节主要讲述其安装和部署。它的各项功能都是基于简单易上手的图形界面,可以直接尝试,如果有需要也可以参考高级功能和运维指南中的相关内容。为了确保访问 taosExplorer 的最佳体验,请使用 Chrome 79 及以上版本,或 Edge 79 及以上版本。
|
||||
|
||||
## 安装
|
||||
|
||||
taosEexplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine Enterprise Server 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。
|
||||
taosExplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。
|
||||
|
||||
## 配置
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@ sidebar_label: taos
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。
|
||||
TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用工具。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。
|
||||
|
||||
## 启动
|
||||
|
||||
要进入 TDengine CLI,您只要在终端执行 `taos` 即可。
|
||||
要进入 TDengine CLI,您在终端执行 `taos` 即可。
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
@ -23,6 +23,11 @@ taos>
|
|||
```
|
||||
|
||||
进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。
|
||||
退出 TDengine CLI, 执行 `q` 或 `quit` 或 `exit` 回车即可
|
||||
```shell
|
||||
taos> quit
|
||||
```
|
||||
|
||||
|
||||
## 执行 SQL 脚本
|
||||
|
||||
|
@ -66,7 +71,7 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
|||
- -l PKTLEN: 网络测试时使用的测试包大小
|
||||
- -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server`
|
||||
- -N PKTNUM: 网络测试时使用的测试包数量
|
||||
- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
|
||||
- -r: 将时间列转化为无符号 64 位整数类型输出(即 C 语言中 uint64_t)
|
||||
- -R: 使用 RESTful 模式连接服务端
|
||||
- -s COMMAND: 以非交互模式执行的 SQL 命令
|
||||
- -t: 测试服务端启动状态,状态同-k
|
||||
|
@ -84,6 +89,13 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
|
||||
也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc)
|
||||
|
||||
## TDengine CLI TAB 键补全
|
||||
|
||||
- TAB 键前为空命令状态下按 TAB 键,会列出 TDengine CLI 支持的所有命令
|
||||
- TAB 键前为空格状态下按 TAB 键,会显示此位置可以出现的所有命令词的第一个,再次按 TAB 键切为下一个
|
||||
- TAB 键前为字符串,会搜索与此字符串前缀匹配的所有可出现命令词,并显示第一个,再次按 TAB 键切为下一个
|
||||
- 输入反斜杠 `\` + TAB 键, 会自动补全为列显示模式命令词 `\G;`
|
||||
|
||||
## TDengine CLI 小技巧
|
||||
|
||||
- 可以使用上下光标键查看历史输入的指令
|
||||
|
@ -91,7 +103,6 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
- Ctrl+C 中止正在进行中的查询
|
||||
- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存
|
||||
- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source <file-name>` 自动执行该文件里所有的 SQL 语句
|
||||
- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI
|
||||
|
||||
## TDengine CLI 导出查询结果到文件中
|
||||
|
||||
|
|
|
@ -364,6 +364,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
|
||||
- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。
|
||||
|
||||
- **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。
|
||||
|
||||
- **fun** : 此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式: 角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。
|
||||
|
||||
- **values** : nchar/binary 列/标签的值域,将从值中随机选择。
|
||||
|
@ -477,7 +479,9 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
| 13 | BOOL | bool
|
||||
| 14 | NCHAR | nchar
|
||||
| 15 | VARCHAR | varchar
|
||||
| 15 | JSON | json
|
||||
| 16 | VARBINARY | varbinary
|
||||
| 17 | GEOMETRY | geometry
|
||||
| 18 | JSON | json
|
||||
|
||||
注意:taosBenchmark 配置文件中数据类型必须小写方可识别
|
||||
|
||||
|
|
|
@ -42,8 +42,8 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
|
||||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据|
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型,3.1.0.0 版本开始支持
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持|
|
||||
|
||||
:::note
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ database_option: {
|
|||
| SINGLE_STABLE {0 | 1}
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| DNODES value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_LEVEL {1 | 2}
|
||||
| WAL_FSYNC_PERIOD value
|
||||
|
@ -63,19 +64,20 @@ database_option: {
|
|||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
||||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
|
||||
- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
|
||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||
- DNODES:指定 VNODE 所在的 DNODE 列表,如 '1,2,3',以逗号区分且字符间不能有空格,仅企业版支持。
|
||||
- WAL_LEVEL:WAL 级别,默认为 1。
|
||||
- 1:写 WAL,但不执行 fsync。
|
||||
- 2:写 WAL,而且执行 fsync。
|
||||
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
|
||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
|
||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||
### 创建数据库示例
|
||||
|
||||
```sql
|
||||
|
|
|
@ -42,12 +42,12 @@ table_option: {
|
|||
**使用说明**
|
||||
|
||||
1. 表(列)名命名规则参见[名称命名规则](./19-limit.md#名称命名规则)。
|
||||
1. 表名最大长度为 192。
|
||||
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。
|
||||
1. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(varchar)。
|
||||
1. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
1. 使用数据类型 BINARY/NCHAR/GEOMETRY,需指定其最长的字节数,如 BINARY(20),表示 20 字节。
|
||||
1. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress)
|
||||
2. 表名最大长度为 192。
|
||||
3. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。
|
||||
4. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(VARCHAR)。
|
||||
5. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 VARCHAR/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
6. 使用数据类型 VARCHAR/NCHAR/GEOMETRY,需指定其最长的字节数,如 VARCHAR(20),表示 20 字节。
|
||||
7. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress)
|
||||
|
||||
**参数说明**
|
||||
|
||||
|
@ -87,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] USING [db_name.]stb_name (field1_name [, field2_nam
|
|||
|
||||
**参数说明**
|
||||
|
||||
1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考数据写入章节。
|
||||
1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考'数据写入'章节。
|
||||
2. 为指定的 stb_name 创建子表,该超级表必须已经存在。
|
||||
3. field_name 列表顺序与 CSV 文件各列内容顺序一致。列表中不允许出现重复项,且必须包含 `tbname`,可包含零个或多个超级表中已定义的标签列。未包含在列表中的标签值将被设置为 NULL。
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: 数据写入
|
||||
title: 数据写入
|
||||
description: 写入数据的详细语法
|
||||
description: 写入数据的详细语法
|
||||
---
|
||||
|
||||
## 写入语法
|
||||
|
@ -25,9 +25,9 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
### 超级表语法
|
||||
```sql
|
||||
INSERT INTO
|
||||
stb1_name [(field1_name, ...)]
|
||||
stb1_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
[stb2_name [(field1_name, ...)]
|
||||
[stb2_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
...];
|
||||
```
|
||||
|
@ -47,7 +47,7 @@ INSERT INTO
|
|||
|
||||
2. VALUES 语法表示了要插入的一行或多行数据。
|
||||
|
||||
3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
|
||||
3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。如仅需创建子表,请参考'表'章节。
|
||||
|
||||
4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
|
||||
|
||||
|
@ -154,12 +154,20 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
|
|||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||
```
|
||||
## 超级表语法
|
||||
## 向超级表插入数据并自动创建子表
|
||||
|
||||
自动建表, 表名通过tbname列指定
|
||||
自动建表, 表名通过 tbname 列指定
|
||||
```sql
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||
VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||
('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
```
|
||||
## 通过 CSV 文件向超级表插入数据并自动创建子表
|
||||
|
||||
根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag
|
||||
|
||||
```sql
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||
FILE '/tmp/csvfile_21002.csv'
|
||||
```
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue